repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
mbreuss/consistency_trajectory_models_toy_task
ctm_train.py
[ { "identifier": "ConsistencyTrajectoryModel", "path": "ctm/ctm.py", "snippet": "class ConsistencyTrajectoryModel(nn.Module):\n\n def __init__(\n self, \n data_dim: int,\n cond_dim: int,\n sampler_type: str,\n sigma_data: float,\n sigma_min: float,\n sigma_max: float,\n conditioned: bool,\n device: str,\n use_teacher: bool = False,\n solver_type: str = 'heun',\n n_discrete_t: int = 20,\n lr: float = 1e-4,\n rho: int = 7,\n diffusion_lambda: float = 1.0,\n gan_lambda: float = 0.0,\n ema_rate: float = 0.999,\n n_sampling_steps: int = 10,\n sigma_sample_density_type: str = 'loglogistic',\n ) -> None:\n super().__init__()\n self.use_gan = False\n self.ema_rate = ema_rate\n self.diffusion_lambda = diffusion_lambda\n self.gan_lambda = gan_lambda\n self.n_discrete_t = n_discrete_t\n self.model = ConsistencyTrajectoryNetwork(\n x_dim=data_dim,\n hidden_dim=256,\n time_embed_dim=4,\n cond_dim=cond_dim,\n cond_mask_prob=0.0,\n num_hidden_layers=4,\n output_dim=data_dim,\n dropout_rate=0.1,\n cond_conditional=conditioned\n ).to(device)\n # we need an ema version of the model for the consistency loss\n self.target_model = copy.deepcopy(self.model)\n for param in self.target_model.parameters():\n param.requires_grad = False\n # we further can use a teacher model for the solver\n self.use_teacher = use_teacher\n if self.use_teacher:\n self.teacher_model = copy.deepcopy(self.model)\n self.device = device\n self.sampler_type = sampler_type\n # use the score wrapper \n self.sigma_data = sigma_data\n self.sigma_min = sigma_min\n self.sigma_max = sigma_max\n self.rho = rho\n self.n_sampling_steps = n_sampling_steps\n self.solver_type = solver_type\n self.sigma_sample_density_type = sigma_sample_density_type\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)\n self.epochs = 0\n \n def diffusion_wrapper(self, model, x, cond, t, s):\n \"\"\"\n Performs the diffusion wrapper for the given model, x, cond, and t.\n Based on the conditioning from EDM Karras et al. 2022.\n\n Args:\n model (torch.nn.Module): The neural network model to be used for the diffusion process.\n x (torch.Tensor): The input tensor to the model.\n cond (torch.Tensor): The conditioning tensor to be used during the diffusion process.\n t (float): The time step for the diffusion process.\n\n Returns:\n torch.Tensor: The scaled output tensor after applying the diffusion wrapper to the model.\n \"\"\"\n c_skip = self.sigma_data**2 / (\n t ** 2 + self.sigma_data**2\n )\n c_out = (\n t * self.sigma_data / (t**2 + self.sigma_data**2) ** 0.5\n )\n # these two are not mentioned in the paper but they use it in their code\n c_in = 1 / (t**2 + self.sigma_data**2) ** 0.5\n \n t = 0.25 * torch.log(t + 1e-40)\n c_in = append_dims(c_in, x.ndim)\n c_out = append_dims(c_out, x.ndim)\n c_skip = append_dims(c_skip, x.ndim)\n\n diffusion_output = model(c_in * x, cond, t, s)\n scaled_output = c_out * diffusion_output + c_skip * x\n \n return scaled_output\n \n def cmt_wrapper(self, model, x, cond, t, s):\n \"\"\"\n Applies the new cmt wrapper from page 4 of https://openreview.net/attachment?id=ymjI8feDTD&name=pdf\n\n Args:\n model (torch.nn.Module): The neural network model to be used for the diffusion process.\n x (torch.Tensor): The input tensor to the model.\n cond (torch.Tensor): The conditioning tensor to be used during the diffusion process.\n t (float): The time step for the diffusion process.\n s: (float): the target noise level for the diffusion process.\n\n Returns:\n torch.Tensor: The scaled output tensor after applying the diffusion wrapper to the model.\n \"\"\"\n if len(t.shape) == 1:\n t = t.unsqueeze(1)\n if len(s.shape) == 1:\n s = s.unsqueeze(1)\n G_0 = (s / t) * x + (1 - s /t) * self.diffusion_wrapper(model, x, cond, t, s)\n \n return G_0\n \n def _update_ema_weights(self):\n \"\"\"\n Updates the exponential moving average (EMA) weights of the target model.\n\n The method performs the following steps:\n 1. Gets the state dictionary of the self.model (source model).\n 2. Updates the EMA weights for each parameter in the target model by computing the weighted average between \n the corresponding parameter in the target model and the parameter in the source model, using the EMA rate parameter.\n \"\"\"\n # Get the state dictionary of the current/source model\n state_dict = self.model.state_dict()\n # Get the state dictionary of the target model\n target_state_dict = self.target_model.state_dict()\n\n # Iterate over the parameters in the target model state dictionary\n for key in state_dict:\n if key in target_state_dict:\n # Update the EMA weights for each parameter\n target_param_data = target_state_dict[key].data\n model_param_data = state_dict[key].data\n target_state_dict[key].data.copy_((1 - self.ema_rate) * target_param_data + self.ema_rate * model_param_data)\n\n # You can optionally load the updated state dict into the target model, if necessary\n # self.target_model.load_state_dict(target_state_dict)\n\n def train_step(self, x, cond):\n \"\"\"\n Main training step method to compute the loss for the Consistency Trajectory Model.\n The loss consists of three parts: the consistency loss, the diffusion loss, and the GAN loss (optional).\n The first part is similar to Song et al. (2023) and the second part is similar to Karras et al. (2022).\n The GAN Part is not implemented right now, since its not attractive for Imitation Learning applications.\n \"\"\"\n self.model.train()\n t_ctm, s, u = self.sample_noise_levels(shape=(len(x),), N=self.n_discrete_t, device=self.device)\n noise = torch.randn_like(x)\n # get the noise samples\n x_t = x + noise * append_dims(t_ctm, x.ndim)\n # use the solver if we have a teacher model otherwise use the euler method\n solver_target = self.solver(x_t, cond, t_ctm, u)\n\n # compute the cmt consistency loss\n cmt_loss = self.ctm_loss(x_t, cond, t_ctm, s, u, solver_target)\n \n # compute the diffusion loss\n # sample noise for the diffusion loss from the continuous noise distribution\n if self.diffusion_lambda > 0:\n t_sm = self.make_sample_density()(shape=(len(x),), device=self.device)\n x_t_sm = x + noise * append_dims(t_sm, x.ndim)\n diffusion_loss = self.diffusion_loss(x, x_t_sm, cond, t_sm)\n else:\n diffusion_loss = 0\n # compute the GAN loss if chosen\n # not implemented yet\n if self.use_gan:\n gan_loss = self.gan_loss(x_t, cond, x_t_sm)\n else:\n gan_loss = 0\n\n # compute the total loss\n \n loss = cmt_loss + self.diffusion_lambda * diffusion_loss + self.gan_lambda * gan_loss\n \n # perform the backward pass\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n # update the ema weights\n self._update_ema_weights()\n \n return loss, cmt_loss, diffusion_loss, gan_loss\n \n def sample_noise_levels(self, shape, N, device='cpu'):\n \"\"\"\n Samples a tensor of the specified shape with noise levels \n from `N` discretized levels of the noise scheduler.\n\n Args:\n shape (tuple): Shape of the tensor to sample.\n N (int): Number of discrete noise levels to discretize the scheduler.\n device (str): Device on which to create the noise levels, 'cpu' or 'cuda'.\n\n Returns:\n torch.Tensor: Tensor containing sampled noise levels.\n \"\"\"\n # Get the N discretized noise levels\n discretized_sigmas = get_sigmas_exponential(N, self.sigma_min, self.sigma_max, self.device)\n \n # Sample indices from this discretized range\n t = torch.randint(1, N, size=shape, device=device)\n s = torch.round(torch.rand_like(t.to(torch.float32)) * t.to(torch.float32)).to(torch.int32)\n u = torch.round(torch.rand_like(t.to(torch.float32)) * (t.to(torch.float32) -1 - s.to(torch.float32))+ s).to(torch.int32)\n # Use these indices to gather the noise levels from the discretized sigmas\n sigma_t = discretized_sigmas[t]\n sigma_s = discretized_sigmas[s]\n sigma_u = discretized_sigmas[u]\n return sigma_t, sigma_s, sigma_u\n\n def solver(self, x, cond, t, s):\n \"\"\"\n Eq. (3) in the paper\n \"\"\"\n if self.use_teacher:\n solver = self.teacher_model\n else:\n solver = self.model\n\n if self.solver_type == 'euler':\n solver_pred = self.euler_update_step(solver, x, cond, t, s)\n elif self.solver_type == 'heun':\n solver_pred = self.heun_update_step(solver, x, cond, t, s)\n elif self.solver_type == 'ddim':\n solver_pred = self.ddim_update_step(solver, x, cond, t, s)\n\n return solver_pred\n\n \n def eval_step(self, x, cond):\n \"\"\"\n Eval step method to compute the loss for the action prediction.\n \"\"\"\n self.model.eval()\n self.target_model.eval()\n x = x.to(self.device)\n cond = cond.to(self.device)\n # next generate the discrete timesteps\n t = [self.sample_discrete_timesteps(i) for i in range(self.t_steps)]\n # compute the loss\n x_T = torch.randn_like(x) * self.sigma_max\n pred_x = self. sample(x_T, cond, t)\n loss = torch.nn.functional.mse_loss(pred_x, x)\n return loss\n \n def ctm_loss(self, x_t, cond, t, s, u, solver_target):\n \"\"\"\n # TODO add description\n\n Args:\n x (torch.Tensor): Input tensor of shape [batch_size, dim].\n cond (torch.Tensor): Conditioning tensor of shape [batch_size, cond_dim].\n t1 (torch.Tensor): First discrete timestep tensor of shape [batch_size, 1].\n t2 (torch.Tensor): Second discrete timestep tensor of shape [batch_size, 1].\n\n Returns:\n torch.Tensor: Consistency loss tensor of shape [].\n \"\"\"\n jump_target = einops.repeat(torch.tensor([0]), '1 -> (b 1)', b=len(x_t))\n # compute the cmt prediction: jump from t to s\n ctm_pred = self.cmt_wrapper(self.model, x_t, cond, t, s)\n\n # compute the cmt target prediction with ema parameters inside self.target_model: jump from u to s\n # with torch.no_grad():\n ctm_target = self.cmt_wrapper(self.target_model, solver_target, cond, u, s)\n ctm_target_clean = self.cmt_wrapper(self.target_model, ctm_target, cond, s, jump_target)\n\n # transform them into the clean data space by jumping without gradient from s to 0\n # for both predictions and comparing them in the clean data space\n # with torch.no_grad():\n ctm_pred_clean = self.cmt_wrapper(self.target_model, ctm_pred, cond, s, jump_target)\n \n # compute the cmt loss\n cmt_loss = torch.nn.functional.mse_loss(ctm_pred_clean, ctm_target_clean)\n\n return cmt_loss\n\n\n @torch.no_grad() \n def heun_update_step(self, model, x, cond, t1, t2):\n \"\"\"\n Computes a single Heun update step from the Euler sampler with the teacher model\n\n Parameters:\n x (torch.Tensor): The input tensor.\n t1 (torch.Tensor): The initial timestep.\n t2 (torch.Tensor): The final timestep.\n x0 (torch.Tensor): The ground truth value used to compute the Euler update step.\n\n Returns:\n torch.Tensor: The output tensor after taking the Euler update step.\n \"\"\"\n denoised = self.cmt_wrapper(model, x, cond, t1, t1)\n d = (x - denoised) / append_dims(t1, x.ndim)\n \n \n sample_temp = x + d * append_dims(t2 - t1, x.ndim)\n denoised_2 = self.cmt_wrapper(model, sample_temp, cond, t2, t2)\n d_2 = (sample_temp - denoised_2) / append_dims(t2, x.ndim)\n d_prime = (d + d_2) / 2\n samples = x + d_prime * append_dims(t2 - t1, x.ndim)\n \n return samples\n \n @torch.no_grad() \n def ddim_update_step(self, model, x, cond, t1, t2):\n \"\"\"\n Computes a single Heun update step from the DDIM sampler with the teacher model\n\n Parameters:\n x (torch.Tensor): The input tensor.\n t1 (torch.Tensor): The initial timestep.\n t2 (torch.Tensor): The final timestep.\n x0 (torch.Tensor): The ground truth value used to compute the Euler update step.\n\n Returns:\n torch.Tensor: The output tensor after taking the Euler update step.\n \"\"\"\n sigma_fn = lambda t: t.neg().exp()\n t_fn = lambda sigma: sigma.log().neg()\n denoised = self.cmt_wrapper(model, x, cond, t1, t1)\n \n t, t_next = t_fn(t1), t_fn(t2)\n h = append_dims(t_next - t, x.ndim)\n samples = append_dims((sigma_fn(t_next) / sigma_fn(t)), x.ndim) * x - (-h).expm1() * denoised\n \n return samples\n\n def get_diffusion_scalings(self, sigma):\n \"\"\"\n Computes the scaling factors for diffusion training at a given time step sigma.\n\n Args:\n - self: the object instance of the model\n - sigma (float or torch.Tensor): the time step at which to compute the scaling factors\n \n , where self.sigma_data: the data noise level of the diffusion process, set during initialization of the model\n\n Returns:\n - c_skip (torch.Tensor): the scaling factor for skipping the diffusion model for the given time step sigma\n - c_out (torch.Tensor): the scaling factor for the output of the diffusion model for the given time step sigma\n - c_in (torch.Tensor): the scaling factor for the input of the diffusion model for the given time step sigma\n\n \"\"\"\n c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2)\n c_out = sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5\n c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5\n return c_skip, c_out, c_in\n \n @staticmethod\n def mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))\n\n def diffusion_train_step(self, x, cond, train_step, max_steps):\n \"\"\"\n Computes the training loss and performs a single update step for the score-based model.\n\n Args:\n - self: the object instance of the model\n - x (torch.Tensor): the input tensor of shape (batch_size, dim)\n - cond (torch.Tensor): the conditional input tensor of shape (batch_size, cond_dim)\n\n Returns:\n - loss.item() (float): the scalar value of the training loss for this batch\n\n \"\"\"\n self.model.train()\n x = x.to(self.device)\n cond = cond.to(self.device)\n self.optimizer.zero_grad()\n t = self.make_sample_density()(shape=(len(x),), device=self.device)\n x_t = x + torch.randn_like(x) * append_dims(t, x.ndim)\n loss = self.diffusion_loss(x, x_t, cond, t)\n loss.backward()\n self.optimizer.step()\n return loss.item()\n\n \n def diffusion_loss(self, x, x_t, cond, t):\n \"\"\"\n Computes the diffusion training loss for the given model, input, condition, and time.\n\n Args:\n - self: the object instance of the model\n - x (torch.Tensor): the input tensor of shape (batch_size, channels, height, width)\n - cond (torch.Tensor): the conditional input tensor of shape (batch_size, cond_dim)\n - t (torch.Tensor): the time step tensor of shape (batch_size,)\n\n Returns:\n - loss (torch.Tensor): the diffusion training loss tensor of shape ()\n\n The diffusion training loss is computed based on the following equation from Karras et al. 2022:\n loss = (model_output - target)^2.mean()\n where,\n - noise: a tensor of the same shape as x, containing randomly sampled noise\n - x_t: a tensor of the same shape as x, obtained by adding the noise tensor to x\n - c_skip, c_out, c_in: scaling tensors obtained from the diffusion scalings for the given time step\n - t: a tensor of the same shape as t, obtained by taking the natural logarithm of t and dividing it by 4\n - model_output: the output tensor of the model for the input x_1, condition cond, and time t\n - target: the target tensor for the given input x, scaling tensors c_skip, c_out, c_in, and time t\n \"\"\"\n c_skip, c_out, c_in = [append_dims(x, 2) for x in self.get_diffusion_scalings(t)]\n t = torch.log(t) / 4\n model_output = self.model(x_t * c_in, cond, t, t)\n target = (x - c_skip * x_t) / c_out\n return (model_output - target).pow(2).mean()\n \n def update_teacher_model(self):\n self.teacher_model.load_state_dict(self.target_model.state_dict())\n for param in self.teacher_model.parameters():\n param.requires_grad = False\n \n # next we init the model and target model with the same weights from the teacher\n self.model.load_state_dict(self.teacher_model.state_dict())\n for param in self.model.parameters():\n param.requires_grad = True\n self.target_model.load_state_dict(self.teacher_model.state_dict())\n for param in self.target_model.parameters():\n param.requires_grad = False\n print('Updated Teacher Model and froze all parameters!')\n \n def euler_update_step(self, x, t1, t2, denoised):\n \"\"\"\n Computes a single update step from the Euler sampler with a ground truth value.\n\n Parameters:\n x (torch.Tensor): The input tensor.\n t1 (torch.Tensor): The initial timestep.\n t2 (torch.Tensor): The final timestep.\n x0 (torch.Tensor): The ground truth value used to compute the Euler update step.\n\n Returns:\n torch.Tensor: The output tensor after taking the Euler update step.\n \"\"\"\n d = (x - denoised) / append_dims(t1, x.ndim)\n samples = x + d * append_dims(t2 - t1, x.ndim)\n return samples\n \n def euler_single_step(self, model, x, cond, t1, t2):\n \"\"\"\n \n \"\"\"\n denoised = self.diffusion_wrapper(model, x, cond, t1, t1)\n d = (x - denoised) / append_dims(t1, x.ndim)\n samples = x + d * append_dims(t2 - t1, x.ndim)\n return samples\n\n @torch.no_grad()\n @ema_eval_wrapper\n def sample_singlestep(self, x_shape, cond, return_seq=False):\n \"\"\"\n Samples a single step from the trained consistency trajectory model. \n If return_seq is True, returns a list of sampled tensors, \n otherwise returns a single tensor. \n \n Args:\n - x_shape (tuple): the shape of the tensor to be sampled.\n - cond (torch.Tensor or None): the conditional tensor.\n - return_seq (bool, optional): whether to return a list of sampled tensors (default False).\n \n Returns:\n - (torch.Tensor or list): the sampled tensor(s).\n \"\"\"\n sampled_x = []\n self.model.eval()\n if cond is not None:\n cond = cond.to(self.device)\n\n x = torch.randn_like(x_shape).to(self.device) * self.sigma_max\n sampled_x.append(x)\n x = self.cmt_wrapper(self.model, x, cond, torch.tensor([self.sigma_max]), torch.tensor([0]))\n sampled_x.append(x)\n if return_seq:\n return sampled_x\n else:\n return x\n \n @torch.no_grad()\n @ema_eval_wrapper\n def sample_diffusion_euler(self, x_shape, cond, n_sampling_steps=None, return_seq=False):\n \"\"\"\n Sample from the pre-trained diffusion model using the Euler method. This method is used for sanity checking \n the learned diffusion model. It generates a sequence of samples by taking small steps from one sample to the next. \n At each step, it generates a new noise from a normal distribution and combines it with the previous sample \n to get the next sample.\n \n Parameters:\n - x_shape (torch.Tensor): Shape of the input tensor to the model.\n - cond (torch.Tensor): Conditional information for the model.\n - n_sampling_steps (int, optional): Number of sampling steps to take. Defaults to None.\n - return_seq (bool, optional): Whether to return the full sequence of samples or just the final one. \n Defaults to False.\n \n Returns:\n - x (torch.Tensor or List[torch.Tensor]): Sampled tensor from the model. If `return_seq=True`, it returns\n a list of tensors, otherwise it returns a single tensor.\n \"\"\"\n self.model.eval()\n if cond is not None:\n cond = cond.to(self.device)\n x = torch.randn_like(x_shape).to(self.device) * self.sigma_max \n # x = torch.linspace(-4, 4, len(x_shape)).view(len(x_shape), 1).to(self.device)\n\n sampled_x = []\n if n_sampling_steps is None:\n n_sampling_steps = self.n_sampling_steps\n \n # sample the sequence of timesteps\n sigmas = self.sample_seq_timesteps(N=n_sampling_steps, type='exponential')\n sampled_x.append(x)\n # iterate over the remaining timesteps\n for i in trange(len(sigmas) - 1, disable=True):\n denoised = self.diffusion_wrapper(self.model, x, cond, sigmas[i], sigmas[i])\n x = self.euler_update_step(x, sigmas[i], sigmas[i+1], denoised)\n sampled_x.append(x)\n if return_seq:\n return sampled_x\n else:\n return x\n \n @torch.no_grad()\n @ema_eval_wrapper\n def ctm_gamma_sampler(self, x_shape, cond, gamma, n_sampling_steps=None, return_seq=False):\n \"\"\"\n Alg. 3 in the paper of CTM (page 22)\n \"\"\"\n self.model.eval()\n if cond is not None:\n cond = cond.to(self.device)\n x = torch.randn_like(x_shape).to(self.device) * self.sigma_max\n # x = torch.linspace(-4, 4, len(x_shape)).view(len(x_shape), 1).to(self.device)\n\n sampled_x = []\n if n_sampling_steps is None:\n n_sampling_steps = self.n_sampling_steps\n \n # sample the sequence of timesteps\n sigmas = self.sample_seq_timesteps(N=n_sampling_steps, type='exponential')\n sampled_x.append(x)\n # iterate over the remaining timesteps\n for i in trange(len(sigmas) - 1, disable=True):\n # get thenew sigma value \n sigma_hat = sigmas[i+1] * torch.sqrt(1 - gamma ** 2)\n # get the denoised value\n x_t_gamma = self.cmt_wrapper(self.model, x, cond, sigmas[i], sigma_hat)\n \n if sigmas[i + 1] > 0:\n x = x_t_gamma + gamma * sigmas[i+1] * torch.randn_like(x_shape).to(self.device)\n \n sampled_x.append(x)\n if return_seq:\n return sampled_x\n else:\n return x\n\n def sample_seq_timesteps(self, N=100, type='karras'):\n \"\"\"\n Generates a sequence of N timesteps for the given type.\n\n Args:\n - self: the object instance of the model\n - N (int): the number of timesteps to generate\n - type (str): the type of sequence to generate, either 'karras', 'linear', or 'exponential'\n\n Returns:\n - t (torch.Tensor): the generated sequence of timesteps of shape (N,)\n\n The method generates a sequence of timesteps for the given type using one of the following functions:\n - get_sigmas_karras: a function that generates a sequence of timesteps using the Karras et al. schedule\n - get_sigmas_linear: a function that generates a sequence of timesteps linearly spaced between sigma_min and sigma_max\n - get_sigmas_exponential: a function that generates a sequence of timesteps exponentially spaced between sigma_min and sigma_max\n where,\n - self.sigma_min, self.sigma_max: the minimum and maximum timesteps, set during initialization of the model\n - self.rho: the decay rate for the Karras et al. schedule, set during initialization of the model\n - self.device: the device on which to generate the timesteps, set during initialization of the model\n\n \"\"\"\n if type == 'karras':\n t = get_sigmas_karras(N, self.sigma_min, self.sigma_max, self.rho, self.device)\n elif type == 'linear':\n t = get_sigmas_linear(N, self.sigma_min, self.sigma_max, self.device)\n elif type == 'exponential':\n t = get_sigmas_exponential(N, self.sigma_min, self.sigma_max, self.device)\n else:\n raise NotImplementedError('Chosen Scheduler is implemented!')\n return t\n \n def make_sample_density(self):\n \"\"\"\n Returns a function that generates random timesteps based on the chosen sample density.\n\n Args:\n - self: the object instance of the model\n\n Returns:\n - sample_density_fn (callable): a function that generates random timesteps\n\n The method returns a callable function that generates random timesteps based on the chosen sample density.\n The available sample densities are:\n - 'lognormal': generates random timesteps from a log-normal distribution with mean and standard deviation set\n during initialization of the model also used in Karras et al. (2022)\n - 'loglogistic': generates random timesteps from a log-logistic distribution with location parameter set to the\n natural logarithm of the sigma_data parameter and scale and range parameters set during initialization\n of the model\n - 'loguniform': generates random timesteps from a log-uniform distribution with range parameters set during\n initialization of the model\n - 'uniform': generates random timesteps from a uniform distribution with range parameters set during initialization\n of the model\n - 'v-diffusion': generates random timesteps using the Variational Diffusion sampler with range parameters set during\n initialization of the model\n - 'discrete': generates random timesteps from the noise schedule using the exponential density\n - 'split-lognormal': generates random timesteps from a split log-normal distribution with mean and standard deviation\n set during initialization of the model\n \"\"\"\n sd_config = []\n \n if self.sigma_sample_density_type == 'lognormal':\n loc = self.sigma_sample_density_mean # if 'mean' in sd_config else sd_config['loc']\n scale = self.sigma_sample_density_std # if 'std' in sd_config else sd_config['scale']\n return partial(rand_log_normal, loc=loc, scale=scale)\n \n if self.sigma_sample_density_type == 'loglogistic':\n loc = sd_config['loc'] if 'loc' in sd_config else math.log(self.sigma_data)\n scale = sd_config['scale'] if 'scale' in sd_config else 0.5\n min_value = sd_config['min_value'] if 'min_value' in sd_config else self.sigma_min\n max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max\n return partial(rand_log_logistic, loc=loc, scale=scale, min_value=min_value, max_value=max_value)\n \n if self.sigma_sample_density_type == 'loguniform':\n min_value = sd_config['min_value'] if 'min_value' in sd_config else self.sigma_min\n max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max\n return partial(rand_log_uniform, min_value=min_value, max_value=max_value)\n if self.sigma_sample_density_type == 'uniform':\n return partial(rand_uniform, min_value=self.sigma_min, max_value=self.sigma_max)\n\n if self.sigma_sample_density_type == 'v-diffusion':\n min_value = self.min_value if 'min_value' in sd_config else self.sigma_min\n max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max\n return partial(rand_v_diffusion, sigma_data=self.sigma_data, min_value=min_value, max_value=max_value)\n if self.sigma_sample_density_type == 'discrete':\n sigmas = self.get_noise_schedule(self.n_sampling_steps, 'exponential')\n return partial(rand_discrete, values=sigmas)\n else:\n raise ValueError('Unknown sample density type')" }, { "identifier": "DataGenerator", "path": "ctm/toy_tasks/data_generator.py", "snippet": "class DataGenerator:\n def __init__(self, dist_type: str):\n self.dist_type = dist_type\n self.func_mapping = {\n \"two_gmm_1D\": (self.two_gmm_1D, self.two_gmm_1D_log_prob),\n \"uneven_two_gmm_1D\": (self.uneven_two_gmm_1D, self.uneven_two_gmm_1D_log_prob),\n \"three_gmm_1D\": (self.three_gmm_1D, self.three_gmm_1D_log_prob),\n \"single_gaussian_1D\": (self.single_gaussian_1D, self.single_gaussian_1D_log_prob),\n }\n if self.dist_type not in self.func_mapping:\n raise ValueError(\"Invalid distribution type\")\n self.sample_func, self.log_prob_func = self.func_mapping[self.dist_type]\n\n def generate_samples(self, num_samples: int):\n \"\"\"\n Generate `num_samples` samples and labels using the `sample_func`.\n \n Args:\n num_samples (int): Number of samples to generate.\n \n Returns:\n Tuple[np.ndarray, np.ndarray]: A tuple of two numpy arrays containing the generated samples and labels.\n \"\"\"\n samples, labels = self.sample_func(num_samples)\n return samples, labels\n \n def compute_log_prob(self, samples, exp: bool = False):\n \"\"\"\n Compute the logarithm of probability density function (pdf) of the given `samples`\n using the `log_prob_func`. If `exp` is True, return exponentiated log probability.\n \n Args:\n samples (np.ndarray): Samples for which pdf is to be computed.\n exp (bool, optional): If True, return exponentiated log probability.\n Default is False.\n \n Returns:\n np.ndarray: Logarithm of probability density function (pdf) of the given `samples`.\n If `exp` is True, exponentiated log probability is returned.\n \"\"\"\n return self.log_prob_func(samples, exp=exp)\n\n @staticmethod\n def two_gmm_1D(num_samples,):\n \"\"\"\n Generates `num_samples` samples from a 1D mixture of two Gaussians with equal weights.\n \n Args:\n num_samples (int): Number of samples to generate.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two torch tensors containing the generated\n samples and binary labels indicating which Gaussian component the sample is from.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.3)\n g2 = Normal(loc=1.5, scale=0.3)\n mixture_probs = torch.ones(num_samples) * 0.5\n is_from_g1 = torch.bernoulli(mixture_probs).bool()\n samples = torch.where(is_from_g1, g1.sample((num_samples,)), g2.sample((num_samples,)))\n return samples, is_from_g1.int()\n\n @staticmethod\n def uneven_two_gmm_1D(num_samples, w1=0.7):\n \"\"\"\n Generates `num_samples` samples from a 1D mixture of two Gaussians with weights `w1` and `w2`.\n \n Args:\n num_samples (int): Number of samples to generate.\n w1 (float, optional): Weight of first Gaussian component. Default is 0.7.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two torch tensors containing the generated\n samples and binary labels indicating which Gaussian component the sample is from.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.3)\n g2 = Normal(loc=1.5, scale=0.2)\n mixture_probs = torch.tensor([w1, 1-w1])\n is_from_g1 = torch.bernoulli(mixture_probs.repeat(num_samples, 1)).view(num_samples, -1).bool().squeeze()\n \n samples_g1 = g1.sample((num_samples, 1))\n samples_g2 = g2.sample((num_samples, 1))\n samples = torch.where(is_from_g1, samples_g1, samples_g2).squeeze()\n\n return samples, is_from_g1.int()\n \n @staticmethod\n def single_gaussian_1D(num_samples):\n \"\"\"\n Generates `num_samples` samples from a 1D Gaussian distribution.\n \n Args:\n num_samples (int): Number of samples to generate.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two torch tensors containing the generated\n samples and binary labels indicating which Gaussian component the sample is from.\n Since there is only one Gaussian component, all labels will be zero.\n \"\"\"\n g1 = Normal(loc=1, scale=0.2)\n samples = g1.sample((num_samples, 1))\n return samples, torch.zeros(num_samples).int()\n\n @staticmethod\n def three_gmm_1D(num_samples):\n \"\"\"\n Generates `num_samples` samples from a 1D mixture of three Gaussians with equal weights.\n \n Args:\n num_samples (int): Number of samples to generate.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two torch tensors containing the generated\n samples and integer labels indicating which Gaussian component the sample is from.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.2)\n g2 = Normal(loc=0, scale=0.2)\n g3 = Normal(loc=1.5, scale=0.2)\n mixture_probs = torch.ones(3) / 3\n component_assignments = torch.multinomial(mixture_probs, num_samples, replacement=True)\n samples = torch.zeros(num_samples, 1)\n \n g1_mask = (component_assignments == 0)\n g2_mask = (component_assignments == 1)\n g3_mask = (component_assignments == 2)\n \n samples[g1_mask] = g1.sample((g1_mask.sum(), )).view(-1, 1)\n samples[g2_mask] = g2.sample((g2_mask.sum(), )).view(-1, 1)\n samples[g3_mask] = g3.sample((g3_mask.sum(), )).view(-1, 1)\n \n return samples, component_assignments.int()\n\n @staticmethod\n def two_gmm_1D_log_prob(z, exp=False):\n \"\"\"\n Computes the logarithm of the probability density function (pdf) of a 1D mixture of two Gaussians\n with equal weights at the given points `z`.\n \n Args:\n z (torch.Tensor): Points at which to compute the pdf.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n torch.Tensor: Logarithm of probability density function (pdf) of a 1D mixture of two Gaussians\n with equal weights at the given points `z`. If `exp` is True, exponentiated log probability\n is returned.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.3)\n g2 = Normal(loc=1.5, scale=0.3)\n f = torch.log(0.5 * (g1.log_prob(z).exp() + g2.log_prob(z).exp()))\n if exp:\n return torch.exp(f)\n else:\n return f\n \n @staticmethod\n def uneven_two_gmm_1D_log_prob(z, w1=0.7, exp=False):\n \"\"\"\n Computes the logarithm of the probability density function (pdf) of a 1D mixture of two Gaussians\n with weights `w1` and `w2` at the given points `z`.\n \n Args:\n z (torch.Tensor): Points at which to compute the pdf.\n w1 (float, optional): Weight of first Gaussian component. Default is 0.7.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n torch.Tensor: Logarithm of probability density function (pdf) of a 1D mixture of two Gaussians\n with weights `w1` and `w2` at the given points `z`. If `exp` is True, exponentiated log probability\n is returned.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.3)\n g2 = Normal(loc=1.5, scale=0.2)\n f = torch.log(w1 * g1.log_prob(z).exp() + (1 - w1) * g2.log_prob(z).exp())\n if exp:\n return torch.exp(f)\n else:\n return f\n\n @staticmethod\n def three_gmm_1D_log_prob(z, exp=False):\n \"\"\"\n Computes the logarithm of the probability density function (pdf) of a 1D mixture of three Gaussians\n with equal weights at the given points `z`.\n \n Args:\n z (torch.Tensor): Points at which to compute the pdf.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n torch.Tensor: Logarithm of probability density function (pdf) of a 1D mixture of three Gaussians\n with equal weights at the given points `z`. If `exp` is True, exponentiated log probability\n is returned.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.2)\n g2 = Normal(loc=0, scale=0.2)\n g3 = Normal(loc=1.5, scale=0.2)\n f = torch.log(1/3 * (g1.log_prob(z).exp() + g2.log_prob(z).exp() + g3.log_prob(z).exp()))\n if exp:\n return torch.exp(f)\n else:\n return f\n\n @staticmethod\n def single_gaussian_1D_log_prob(z, exp=False):\n \"\"\"\n Computes the logarithm of the probability density function (pdf) of a 1D Gaussian\n distribution at the given points `z`.\n \n Args:\n z (torch.Tensor): Points at which to compute the pdf.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n torch.Tensor: Logarithm of probability density function (pdf) of a 1D Gaussian\n distribution at the given points `z`. If `exp` is True, exponentiated log probability\n is returned.\n \"\"\"\n g = Normal(loc=1, scale=0.2)\n f = g.log_prob(z)\n if exp:\n return torch.exp(f)\n else:\n return f" }, { "identifier": "plot_main_figure", "path": "ctm/visualization/vis_utils.py", "snippet": "def plot_main_figure(\n fn, \n model, \n n_samples, \n train_epochs, \n sampling_method='euler',\n x_range=[-4, 4], \n n_sampling_steps = 10,\n save_path='/home/moritz/code/cm_1D_Toy_Task/plots'\n): \n \"\"\"\n Plot the main figure for the given model and sampling method.\n Args:\n fn (callable): Target function to be plotted.\n model (object): Model to be used for sampling (ConsistencyModel or Beso).\n n_samples (int): Number of samples to be taken.\n train_epochs (int): Number of training epochs.\n sampling_method (str, optional): Method to be used for sampling ('multistep', 'onestep', or 'euler'). Defaults to False.\n x_range (list, optional): Range of x values to be plotted. Defaults to [-5, 5].\n n_sampling_steps (int, optional): Number of sampling steps. Defaults to 10.\n save_path (str, optional): Directory to save the plot. Defaults to '/home/moritz/code/cm_1D_Toy_Task/plots'.\n\n Raises ValueError: If the sampling_method is not one of the specified options ('multistep', 'onestep', or 'euler').\n \"\"\"\n test_samples = get_test_samples(model, n_samples, sampling_method, n_sampling_steps)\n test_samples = [x.detach().cpu().numpy() for x in test_samples]\n test_samples = np.stack(test_samples, axis=1)\n\n x_test = np.linspace(x_range[0], x_range[1], n_samples)\n target_fn = fn(torch.tensor(x_test), exp=True)\n\n fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(10, 10), sharex=True)\n ax1.set_xlim(*x_range)\n ax2.set_xlim(*x_range)\n ax3.set_xlim(*x_range)\n\n # Plot target distribution\n ax1.plot(x_test, target_fn, color='black', label='Target Distribution')\n\n # Plot predicted distribution\n kde = gaussian_kde(test_samples[:, -1, 0], bw_method=0.1)\n predicted_distribution = kde(x_test)\n ax1.plot(x_test, predicted_distribution, label='Predicted Distribution')\n\n # Create a LineCollection to show colors on the predicted distribution line\n points = np.array([x_test, predicted_distribution]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n lc = LineCollection(segments, cmap='viridis', norm=plt.Normalize(predicted_distribution.min(), predicted_distribution.max()))\n lc.set_array(predicted_distribution)\n lc.set_linewidth(2)\n\n ax1.add_collection(lc)\n stepsize = np.linspace(0, 1, model.n_sampling_steps)\n # stepsize = cm.get_noise_schedule(model.n_sampling_steps, noise_schedule_type='exponential').flip(0)\n # ax2.set_ylim(-0.1, 1.1)\n if sampling_method == 'onestep':\n n_sampling_steps = 1\n stepsize = np.linspace(0, 1, 2)\n ax2.quiver(test_samples[:, 0].reshape(-1),\n stepsize[0] * np.ones(n_samples),\n test_samples[:, 1].reshape(-1) - test_samples[:, 0].reshape(-1),\n stepsize[1] * np.ones(n_samples) - stepsize[0] * np.ones(n_samples),\n angles='xy', scale_units='xy', scale=1,\n width=0.001\n )\n else:\n n_sampling_steps = n_sampling_steps\n for i in range(1, n_sampling_steps):\n ax2.quiver(test_samples[:, i - 1].reshape(-1),\n stepsize[i - 1] * np.ones(n_samples),\n test_samples[:, i].reshape(-1) - test_samples[:, i-1].reshape(-1),\n stepsize[i] * np.ones(n_samples) - stepsize[i - 1] * np.ones(n_samples),\n angles='xy', scale_units='xy', scale=1,\n width=0.001\n )\n ax2.set_yticks([stepsize.min(), stepsize.max()])\n ax2.set_ylim(stepsize.min(), stepsize.max())\n \n mu = 0 # mean\n sigma = model.sigma_max # standard deviation\n\n # Compute the PDF values for x_test\n prob_samples = norm.pdf(x_test, loc=mu, scale=sigma)\n # Create a LineCollection to show colors on the normal distribution line\n points = np.array([x_test, prob_samples]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n lc = LineCollection(segments, cmap='viridis', norm=plt.Normalize(prob_samples.min(), prob_samples.max()))\n lc.set_array(prob_samples)\n lc.set_linewidth(2)\n\n ax3.add_collection(lc)\n ax3.set_ylim(0, 0.5)\n\n # ... (previous code remains unchanged)\n ax2.set_xticks([])\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax3.set_yticks([])\n ax2.set_yticklabels(['T', '0'])\n ax2.tick_params(axis='y', labelsize=16)\n # ax2.set_yticks('log')\n plt.subplots_adjust(hspace=0)\n plt.savefig(save_path + '/cm_' + sampling_method + f'_epochs_{train_epochs}.png', bbox_inches='tight', pad_inches=0.1) \n \n print('Plot saved!')" } ]
from tqdm import tqdm from ctm.ctm import ConsistencyTrajectoryModel from ctm.toy_tasks.data_generator import DataGenerator from ctm.visualization.vis_utils import plot_main_figure
11,613
""" Discrete consistency distillation training of the consistency model on a toy task. We train a diffusion model and the consistency model at the same time and iteratively update the weights of the consistency model and the diffusion model. """ if __name__ == "__main__": device = 'cpu' n_sampling_steps = 10 use_pretraining = True cm = ConsistencyTrajectoryModel( data_dim=1, cond_dim=1, sampler_type='euler', lr=4e-4, sigma_data=0.5, sigma_min=0.05, solver_type='heun', sigma_max=2, n_discrete_t=18, conditioned=False, diffusion_lambda= 1, device=device, rho=7, ema_rate=0.999, use_teacher=use_pretraining, ) train_epochs = 2002 # chose one of the following toy tasks: 'three_gmm_1D' 'uneven_two_gmm_1D' 'two_gmm_1D' 'single_gaussian_1D'
""" Discrete consistency distillation training of the consistency model on a toy task. We train a diffusion model and the consistency model at the same time and iteratively update the weights of the consistency model and the diffusion model. """ if __name__ == "__main__": device = 'cpu' n_sampling_steps = 10 use_pretraining = True cm = ConsistencyTrajectoryModel( data_dim=1, cond_dim=1, sampler_type='euler', lr=4e-4, sigma_data=0.5, sigma_min=0.05, solver_type='heun', sigma_max=2, n_discrete_t=18, conditioned=False, diffusion_lambda= 1, device=device, rho=7, ema_rate=0.999, use_teacher=use_pretraining, ) train_epochs = 2002 # chose one of the following toy tasks: 'three_gmm_1D' 'uneven_two_gmm_1D' 'two_gmm_1D' 'single_gaussian_1D'
data_manager = DataGenerator('three_gmm_1D')
1
2023-11-07 15:30:11+00:00
16k
awslabs/optimizing-multitask-training-through-dynamic-pipelines
dynapipe/schedule_opt/execution_planner.py
[ { "identifier": "ProfileBasedCostModelWithRC", "path": "dynapipe/data_opt/cost_models.py", "snippet": "class ProfileBasedCostModelWithRC(object):\n \"\"\"\n Wrapper class for multiple ProfileBasedCostModel objects, one for each\n tensor parallel degree and recomputation method.\n \"\"\"\n\n def __init__(\n self,\n profile_paths=None,\n _serialized_cms: Optional[Dict[Tuple[int, str], bytes]] = None,\n ) -> None:\n self.cost_models: dict[str, ProfileBasedCostModel] = {}\n if _serialized_cms is not None:\n for cm_key, serialized_cm in _serialized_cms.items():\n self.cost_models[cm_key] = ProfileBasedCostModel.deserialize(\n serialized_cm\n )\n return\n if not isinstance(profile_paths, list):\n # profile_paths is a dir\n assert os.path.isdir(profile_paths), (\n f\"Profile path {profile_paths} is not a directory \"\n \"or list of paths\"\n )\n profile_paths = [\n os.path.join(profile_paths, x)\n for x in os.listdir(profile_paths)\n if x.startswith(\"microbench\") and x.endswith(\"txt\")\n ]\n # separate paths by cost model key (tp_size, rc_type)\n self.per_key_profile_paths = defaultdict(list)\n for path in profile_paths:\n cm_key = self._parse_cm_key(path)\n self.per_key_profile_paths[cm_key].append(path)\n for cm_key, paths in self.per_key_profile_paths.items():\n self.cost_models[cm_key] = ProfileBasedCostModel(paths)\n\n def _parse_cm_key(self, filename):\n basename = os.path.basename(filename)\n if \"rc_full_uniform\" in basename:\n rc_type = \"full\"\n elif \"rc_selective\" in basename:\n rc_type = \"selective\"\n else:\n rc_type = \"none\"\n tp_size = int(basename.split(\"_\")[1][2:])\n return tp_size, rc_type\n\n def _check_valid_cm_key(self, cm_key):\n assert (\n cm_key in self.cost_models\n ), f\"Key {cm_key} not recorded in profile.\"\n\n def is_valid_stage(self, tp_size, rc_type, stage):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].is_valid_stage(stage)\n\n def valid_stages(self, tp_size, rc_type):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].valid_stages()\n\n def supported_sequence_lengths(self, tp_size, rc_type, stage):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].supported_sequence_lengths(\n stage\n )\n\n def get_cost(\n self,\n tp_size,\n rc_type,\n stage,\n seq_len,\n mbs,\n ):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the computation cost.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_cost(\n stage, seq_len, mbs\n )\n\n def get_stored_activation(self, tp_size, rc_type, stage, seq_len, mbs):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the stored activation.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_stored_activation(\n stage, seq_len, mbs\n )\n\n def get_peak_activation(self, tp_size, rc_type, stage, seq_len, mbs):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the peak activation.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_peak_activation(\n stage, seq_len, mbs\n )\n\n def get_model_state(\n self,\n tp_size,\n rc_type,\n stage,\n n_shards=1,\n zero_stage=0,\n param_factor=None,\n ):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the model state.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_model_state(\n stage,\n n_shards=n_shards,\n zero_stage=zero_stage,\n param_factor=param_factor,\n )\n\n def get_raw_cost_model(self, tp_size, rc_type):\n \"\"\"Get the raw cost model for the given TP degree and recomputation\n type.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)]\n\n def save(self, path):\n serialized_dict = {}\n for cm_key, cost_model in self.cost_models.items():\n serialized_dict[cm_key] = cost_model.serialize()\n with open(path, \"wb\") as f:\n pickle.dump(serialized_dict, f)\n\n @classmethod\n def load(cls, path):\n with open(path, \"rb\") as f:\n serialized_dict = pickle.load(f)\n return cls(_serialized_cms=serialized_dict)" }, { "identifier": "DynaPipeCluster", "path": "dynapipe/model.py", "snippet": "class DynaPipeCluster:\n def __init__(\n self,\n device2node: Dict[int, int],\n memory_limits: List[int],\n intra_node_bw_gbps: float,\n inter_node_bw_gbps: float,\n intra_node_lat_us: float,\n inter_node_lat_us: float,\n ) -> None:\n # memory_limits is in MB (megabytes)\n # bw is in Gbps (gigabits per second)\n # lat is in us (microseconds)\n devices = set()\n nodes = set()\n for device, node in device2node.items():\n devices.add(device)\n nodes.add(node)\n self.n_devices = len(devices)\n self.n_nodes = len(nodes)\n self.device2node = device2node\n flattened_devices = [device for device in device2node.keys()]\n assert list(sorted(list(set(flattened_devices)))) == list(\n range(self.n_devices)\n ), \"Device ids must be contiguous and start at 0\"\n assert len(memory_limits) == self.n_devices, (\n \"Expected memory limits for each of the \"\n f\"{self.n_devices} devices, but got \"\n f\"{len(memory_limits)} numbers.\"\n )\n self.memory_limits = memory_limits\n self.intra_node_bw = intra_node_bw_gbps\n self.inter_node_bw = inter_node_bw_gbps\n self.intra_node_lat = intra_node_lat_us\n self.inter_node_lat = inter_node_lat_us\n\n def _get_bw(self, dev0, dev1):\n if self.device2node[dev0] == self.device2node[dev1]:\n return self.intra_node_bw\n else:\n return self.inter_node_bw\n\n def _get_lat(self, dev0, dev1):\n if self.device2node[dev0] == self.device2node[dev1]:\n return self.intra_node_lat\n else:\n return self.inter_node_lat\n\n def get_comm_time(self, megabytes, dev0, dev1):\n if dev0 == dev1:\n return 0\n return self._get_lat(dev0, dev1) + 1e6 * (\n megabytes * 8 / 1e3\n ) / self._get_bw(dev0, dev1)\n\n def get_memory_limit(self, dev):\n return self.memory_limits[dev]\n\n def to_json(self) -> dict:\n return {\n \"n_devices\": self.n_devices,\n \"n_nodes\": self.n_nodes,\n \"device2node\": self.device2node,\n \"memory_limits\": self.memory_limits,\n \"intra_node_bw\": self.intra_node_bw,\n \"inter_node_bw\": self.inter_node_bw,\n \"intra_node_lat\": self.intra_node_lat,\n \"inter_node_lat\": self.inter_node_lat,\n }\n\n def dumps(self) -> str:\n return json.dumps(self.to_json())\n\n @staticmethod\n def loads(json_str: str) -> \"DynaPipeCluster\":\n return DynaPipeCluster.from_json(json.loads(json_str))\n\n @staticmethod\n def from_json(json_dict):\n converted_device2node = {\n int(k): int(v) for k, v in json_dict[\"device2node\"].items()\n }\n json_dict[\"device2node\"] = converted_device2node\n cluster = DynaPipeCluster(\n json_dict[\"device2node\"],\n json_dict[\"memory_limits\"],\n json_dict[\"intra_node_bw\"],\n json_dict[\"inter_node_bw\"],\n json_dict[\"intra_node_lat\"],\n json_dict[\"inter_node_lat\"],\n )\n return cluster" }, { "identifier": "DynaPipeMicrobatch", "path": "dynapipe/model.py", "snippet": "class DynaPipeMicrobatch:\n # This class is used to represent a microbatch for DynaPipe, which can be\n # converted to/from a model spec json file. It is used to supply\n # arguments to the micro-batch generator and scheduler.\n def __init__(self, name) -> None:\n self.name = name\n # in DynaPipeModel, \"layer\" refers to an actual layer in the model\n self.n_layers = None\n self.fw_exec_times = []\n self.bw_exec_times = []\n self.fw_comm_size = []\n self.bw_comm_size = []\n self.model_state_memory = []\n self.model_stored_activation_memory = []\n self.model_peak_activation_memory = []\n self.activation_shapes = []\n\n def _check_or_set_nlayers(self, n_layers, debug_name, minus_one=False):\n expected_value = self.n_layers if not minus_one else self.n_layers - 1\n if self.n_layers is not None:\n assert (\n n_layers == expected_value\n ), \"\"\"{} must have length n_layers {} ({}),\n but got length {}\"\"\".format(\n debug_name,\n \"- 1\" if minus_one else \"\",\n expected_value,\n n_layers,\n )\n else:\n self.n_layers = n_layers\n\n def set_fw_exec_times(self, fw_exec_times: List[float]) -> None:\n # time is in us (microseconds)\n self._check_or_set_nlayers(len(fw_exec_times), \"fw_exec_times\")\n self.fw_exec_times = fw_exec_times\n\n def set_bw_exec_times(self, bw_exec_times: List[float]) -> None:\n # time is in us (microseconds)\n self._check_or_set_nlayers(len(bw_exec_times), \"bw_exec_times\")\n self.bw_exec_times = bw_exec_times\n\n def set_fw_comm_size(self, fw_comm_size: List[float]) -> None:\n # size is in mega bytes (MB)\n self._check_or_set_nlayers(\n len(fw_comm_size), \"fw_comm_size\", minus_one=True\n )\n self.fw_comm_size = fw_comm_size\n\n def set_bw_comm_size(self, bw_comm_size: List[float]) -> None:\n # size is in mega bytes (MB)\n self._check_or_set_nlayers(\n len(bw_comm_size), \"bw_comm_size\", minus_one=True\n )\n self.bw_comm_size = bw_comm_size\n\n def set_model_state_memory(self, model_state_memory: List[float]) -> None:\n # size is in MB (megabytes)\n self._check_or_set_nlayers(\n len(model_state_memory), \"model_state_memory\"\n )\n self.model_state_memory = model_state_memory\n\n def set_model_stored_activation_memory(\n self, model_stored_activation_memory: List[float]\n ) -> None:\n # size is in MB (megabytes)\n self._check_or_set_nlayers(\n len(model_stored_activation_memory),\n \"model_stored_activation_memory\",\n )\n self.model_stored_activation_memory = model_stored_activation_memory\n\n def set_model_peak_activation_memory(\n self, model_peak_activation_memory: List[float]\n ) -> None:\n # size is in MB (megabytes)\n self._check_or_set_nlayers(\n len(model_peak_activation_memory), \"model_peak_activation_memory\"\n )\n self.model_peak_activation_memory = model_peak_activation_memory\n\n def set_activation_shapes(\n self, activation_shapes: List[List[Tuple[int, int, int]]]\n ) -> None:\n # activation_shapes: outer list: layer, inner list: output activations\n # Note that for decoders, the activation should be the\n # output of encoder + decoder, since encoder output is needed for\n # all decoder layers.\n self._check_or_set_nlayers(len(activation_shapes), \"activation_shapes\")\n # make shapes immutable\n activation_shapes = [tuple(x) for x in activation_shapes]\n self.activation_shapes = activation_shapes\n\n def check_all_set(self):\n assert self.n_layers is not None\n assert len(self.fw_exec_times) == self.n_layers\n assert len(self.bw_exec_times) == self.n_layers\n assert len(self.fw_comm_size) == self.n_layers - 1\n assert len(self.bw_comm_size) == self.n_layers - 1\n assert len(self.model_state_memory) == self.n_layers\n assert len(self.model_stored_activation_memory) == self.n_layers\n assert len(self.model_peak_activation_memory) == self.n_layers\n assert len(self.activation_shapes) == self.n_layers\n\n def to_json(self) -> dict:\n return {\n \"name\": self.name,\n \"n_layers\": self.n_layers,\n \"fw_exec_times\": self.fw_exec_times,\n \"bw_exec_times\": self.bw_exec_times,\n \"fw_comm_size\": self.fw_comm_size,\n \"bw_comm_size\": self.bw_comm_size,\n \"model_state_memory\": self.model_state_memory,\n \"model_stored_activation_memory\": self.model_stored_activation_memory, # noqa: E501\n \"model_peak_activation_memory\": self.model_peak_activation_memory,\n \"activation_shapes\": self.activation_shapes,\n }\n\n @staticmethod\n def from_json(json_dict):\n microbatch = DynaPipeMicrobatch(json_dict[\"name\"])\n microbatch.set_fw_exec_times(json_dict[\"fw_exec_times\"])\n microbatch.set_bw_exec_times(json_dict[\"bw_exec_times\"])\n microbatch.set_fw_comm_size(json_dict[\"fw_comm_size\"])\n microbatch.set_bw_comm_size(json_dict[\"bw_comm_size\"])\n microbatch.set_model_state_memory(json_dict[\"model_state_memory\"])\n microbatch.set_model_stored_activation_memory(\n json_dict[\"model_stored_activation_memory\"]\n )\n microbatch.set_model_peak_activation_memory(\n json_dict[\"model_peak_activation_memory\"]\n )\n microbatch.set_activation_shapes(json_dict[\"activation_shapes\"])\n return microbatch" }, { "identifier": "DynaPipeMinibatch", "path": "dynapipe/model.py", "snippet": "class DynaPipeMinibatch:\n # This class represents a list of microbatches (a minibatch)\n def __init__(\n self, name: str, microbatches: List[DynaPipeMicrobatch] = None\n ) -> None:\n self.name = name\n self.microbatches = microbatches if microbatches else []\n self.n_layers = None if not microbatches else microbatches[0].n_layers\n\n def add_microbatch(self, microbatch: DynaPipeMicrobatch) -> None:\n if self.n_layers is None:\n self.n_layers = microbatch.n_layers\n else:\n assert (\n self.n_layers == microbatch.n_layers\n ), \"All microbatches must have the same number of layers\"\n self.microbatches.append(microbatch)\n\n def __str__(self):\n return (\n \"(\"\n + self.name\n + \", \"\n + str(len(self.microbatches))\n + \" microbatches)\"\n )\n\n @staticmethod\n def from_json(json_dict):\n minibatch = DynaPipeMinibatch(json_dict[\"name\"])\n json_list = json_dict[\"microbatches\"]\n for json_dict in json_list:\n microbatch = DynaPipeMicrobatch.from_json(json_dict)\n minibatch.add_microbatch(microbatch)\n return minibatch\n\n def to_json(self) -> dict:\n return {\n \"name\": self.name,\n \"microbatches\": [\n microbatch.to_json() for microbatch in self.microbatches\n ],\n }\n\n def permute_microbatches(self, permutation: List[int]) -> None:\n assert len(permutation) == len(self.microbatches)\n permuted_microbatches = [self.microbatches[i] for i in permutation]\n return DynaPipeMinibatch(self.name, permuted_microbatches)" }, { "identifier": "TransformerModelSpec", "path": "dynapipe/model.py", "snippet": "class TransformerModelSpec:\n # Default setting:\n # * mlp_hidden_size = 4x hidden_dim\n # * kv_channels = hidden_dim // num_attn_heads\n # * use FP16 mixed precision training with Adam optimizer.\n n_encoder_layers: int\n n_decoder_layers: int\n hidden_dim: int\n num_attn_heads: int\n mlp_hidden_dim: Union[None, int] = None\n kv_channels: Union[None, int] = None\n bytes_per_element: int = 2\n optimizer_state_multiplier: int = 12\n\n def __post_init__(self):\n if self.mlp_hidden_dim is None:\n # if not specified, use the 4x hidden dim as it is the norm\n self.mlp_hidden_dim = self.hidden_dim * 4\n if self.kv_channels is None:\n # if not specified, use the hidden_dim // num_attn_heads\n assert self.hidden_dim % self.num_attn_heads == 0\n self.kv_channels = self.hidden_dim // self.num_attn_heads\n\n def serialize(self) -> bytes:\n def _serialize_int(x: int):\n return x.to_bytes(4, \"little\")\n\n return b\"\".join(\n [\n _serialize_int(x)\n for x in [\n self.n_encoder_layers,\n self.n_decoder_layers,\n self.hidden_dim,\n self.num_attn_heads,\n self.mlp_hidden_dim,\n self.kv_channels,\n self.bytes_per_element,\n self.optimizer_state_multiplier,\n ]\n ]\n )\n\n @classmethod\n def deserialize(cls, data: bytes):\n def _deserialize_int(data: bytes):\n return int.from_bytes(data, \"little\")\n\n return cls(\n *[_deserialize_int(data[i * 4 : (i + 1) * 4]) for i in range(8)]\n )" }, { "identifier": "get_simulator", "path": "dynapipe/model.py", "snippet": "def get_simulator(\n scheduler_type: str,\n dpp_minibatch: DynaPipeMinibatch,\n dpp_cluster: DynaPipeCluster,\n device_assignment: List[int],\n include_memory_stats: bool = True,\n memory_limit: float = float(\"inf\"),\n max_otf_microbatches: Union[None, int] = None,\n logger=None,\n) -> Scheduler:\n assert len(device_assignment) == dpp_minibatch.n_layers, (\n \"Device assignment must be specified for each layer. \"\n \"Expected {} layers, but got {}.\".format(\n dpp_minibatch.n_layers, len(device_assignment)\n )\n )\n bw_device_assignment = list(reversed(device_assignment))\n microbatch_specs = []\n for microbatch in dpp_minibatch.microbatches:\n fw_times = microbatch.fw_exec_times\n fw_comm_times = [\n dpp_cluster.get_comm_time(\n microbatch.fw_comm_size[i],\n device_assignment[i],\n device_assignment[i + 1],\n )\n for i in range(microbatch.n_layers - 1)\n ]\n bw_times = microbatch.bw_exec_times\n bw_comm_times = [\n dpp_cluster.get_comm_time(\n microbatch.bw_comm_size[i],\n bw_device_assignment[i],\n bw_device_assignment[i + 1],\n )\n for i in range(microbatch.n_layers - 1)\n ]\n fw_model_state = microbatch.model_state_memory\n fw_stored_activation = microbatch.model_stored_activation_memory\n fw_peak_activation = microbatch.model_peak_activation_memory\n microbatch_spec = SchedulerMicrobatchSpec(\n name=microbatch.name,\n fw_times=fw_times,\n fw_comm_times=fw_comm_times,\n fw_stored_activation_size=fw_stored_activation,\n fw_peak_activation_size=fw_peak_activation,\n bw_times=bw_times,\n bw_comm_times=bw_comm_times,\n activation_shapes=microbatch.activation_shapes,\n )\n microbatch_specs.append(microbatch_spec)\n minibatch_spec = SchedulerMinibatchSpec(\n dpp_minibatch.name,\n microbatch_specs,\n device_assignment,\n fw_model_state,\n )\n scheduler_class = get_scheduler_class(scheduler_type)\n if max_otf_microbatches is not None:\n assert scheduler_type in [\"cyclic\", \"wait-free-cyclic\"], (\n \"max_otf_microbatches is only supported for cyclic \"\n \"and wait-free-cyclic schedulers. \"\n \"Got scheduler_type={}\".format(scheduler_type)\n )\n simulator = scheduler_class(\n minibatch_spec,\n include_memory_stats=include_memory_stats,\n memory_limit=memory_limit,\n max_otf_microbatches=max_otf_microbatches,\n logger=logger,\n )\n else:\n simulator = scheduler_class(\n minibatch_spec,\n include_memory_stats=include_memory_stats,\n memory_limit=memory_limit,\n logger=logger,\n )\n return simulator" }, { "identifier": "InstructionOptimizer", "path": "dynapipe/pipe/instruction_optimizer.py", "snippet": "class InstructionOptimizer:\n \"\"\"\n Inject buffer allocation/free and communication finish\n ops into the pipeline instructions.\n \"\"\"\n\n def __init__(\n self,\n per_worker_instructions: List[List[PipeInstruction]],\n n_stages: int,\n ):\n self.per_worker_instructions = per_worker_instructions\n self.n_stages = n_stages\n\n def _inject_comm_finish_instrs(self, instrs: List[PipeInstruction]):\n # We assume that each rank has two communication streams,\n # one for communication with the previous rank and one for\n # the next rank. This gives better communication overlap\n # without the possibility to deadlock.\n #\n # For each RecvXXXStart, we need a RecvXXXFinish instr before\n # the instruction that uses the data, which is identified by\n # the corresponding microbatch and stage id.\n #\n # For each SendXXXStart, there is a trade-off between freeing the\n # memory early and unnecessary waiting if using static location for\n # SendXXXFinish. Therefore we dynamically query if the send is complete\n # during execution, and SendXXXFinish is added as late as possible,\n # only serving as constraints for correctness (in case dynamic query\n # fails).\n # We add SendActivationFinish only before the corresponding backward\n # pass, at which point the send must have completed. All SendGradFinish\n # are added at the end of the iteration.\n\n instr_map: Dict[\n Type[CommunicationStartInstruction],\n Type[CommunicationFinishInsturction],\n ] = {\n SendActivationStart: SendActivationFinish,\n RecvActivationStart: RecvActivationFinish,\n SendGradStart: SendGradFinish,\n RecvGradStart: RecvGradFinish,\n }\n _prepend_map = {}\n accumulated_send_activation_finish_instrs = defaultdict(list)\n accumulated_send_grad_finish_instrs = []\n new_instrs = []\n for instr in instrs:\n if _is_recv_instr(instr):\n key = _get_key(instr)\n assert key not in _prepend_map\n _prepend_map[key] = instr\n elif _is_send_instr(instr):\n instr: CommunicationStartInstruction\n # get the corresponding finish instr\n finish_instr = instr_map[type(instr)](\n instr.microbatch, instr.stage, instr.peer\n )\n # append existing send finish instrs\n # new_instrs.extend(accumulated_send_finish_instrs[instr.peer].copy())\n # accumulated_send_finish_instrs[instr.peer].clear()\n if isinstance(instr, SendActivationStart):\n accumulated_send_activation_finish_instrs[\n (\n instr.microbatch,\n _fw_stage_to_bw_stage(instr.stage, self.n_stages),\n )\n ].append(finish_instr)\n elif isinstance(instr, SendGradStart):\n accumulated_send_grad_finish_instrs.append(finish_instr)\n else:\n raise RuntimeError(f\"Unknown send instr: {instr}\")\n elif _is_compute_instr(instr):\n key = _get_key(instr)\n if key in _prepend_map:\n start_instr: CommunicationStartInstruction = _prepend_map[\n key\n ]\n new_instrs.append(\n instr_map[type(start_instr)](\n start_instr.microbatch,\n start_instr.stage,\n start_instr.peer,\n )\n )\n if not _is_forward(instr):\n # append existing send activation finish instrs\n new_instrs.extend(\n accumulated_send_activation_finish_instrs[\n (instr.microbatch, instr.stage)\n ].copy()\n )\n accumulated_send_activation_finish_instrs[\n (instr.microbatch, instr.stage)\n ].clear()\n new_instrs.append(instr)\n # append any remaining send finish instrs\n for (\n accumulated_send_finish_instrs\n ) in accumulated_send_activation_finish_instrs.values():\n assert len(accumulated_send_finish_instrs) == 0\n new_instrs.extend(accumulated_send_grad_finish_instrs)\n return new_instrs\n\n def _allocate_buffers(self, instrs: List[PipeInstruction]):\n # allcate: create new tensors (e.g. torch.zeros)\n # assign: assign a tensor to a buffer slot\n # Current assumptions:\n # 1. RecvXXXStart allocates its own buffers and writes to buffer_ids,\n # so we are only assigning buffer slots here. This can be optimized\n # by allocating buffers in advance if memory allocation issues\n # arise.\n # 2. ForwardPass and BackwardPass reads and writes the same buffer_ids.\n # SendXXXStart only reads but do not write to buffer_ids.\n # RecvXXXStart creates new buffers. SendXXXFinish and RecvXXXFinish\n # do not read or write to buffer_ids.\n buffer_slots: List[_Buffer] = []\n key_to_buffers: Dict[Any, List[_Buffer]] = defaultdict(list)\n\n def _allocate_buffer_slot(\n instr: BufferInstruction, shape, current_idx\n ) -> _Buffer:\n # find the first available buffer slot\n slot = len(buffer_slots)\n buffer = _Buffer(\n slot, instr.microbatch, instr.stage, shape, current_idx, None\n )\n buffer_slots.append(buffer)\n return buffer\n\n for instr_idx, instr in enumerate(instrs):\n if isinstance(\n instr,\n (\n ForwardPass,\n BackwardPass,\n SendActivationStart,\n SendGradStart,\n ),\n ):\n key = _get_key(instr)\n if isinstance(instr, BackwardPass) and instr.first_bw_layer:\n # first backward layer directly uses forward pass buffers\n assert key not in key_to_buffers\n fw_key = (instr.microbatch, instr.stage - 1, True)\n key_to_buffers[key] = key_to_buffers[fw_key].copy()\n assert (\n key in key_to_buffers\n ), f\"buffer not allocated for {instr}\"\n buffers = key_to_buffers[key]\n # we only allow dropping buffers\n # allocation needs explicit instrs\n assert len(buffers) >= len(instr.buffer_shapes), (\n f\"buffer allocation mismatch for {instr}, \"\n f\"expected less than {len(instr.buffer_shapes)}, \"\n f\"got {len(buffers)}\"\n )\n for buffer in buffers:\n instr.buffer_ids.append(buffer.slot)\n buffer.life_end = instr_idx\n elif isinstance(\n instr, (RecvActivationStart, RecvGradStart, LoadInput)\n ):\n # allocate new buffers\n key = _get_key(instr)\n for shape in instr.buffer_shapes:\n buffer = _allocate_buffer_slot(instr, shape, instr_idx)\n instr.buffer_ids.append(buffer.slot)\n key_to_buffers[key].append(buffer)\n\n # now insert buffer free instructions\n new_instrs = []\n buffers_freed_at_idx = defaultdict(list)\n for buffer in buffer_slots:\n assert buffer.life_end is not None, f\"buffer {buffer} not used. \"\n buffers_freed_at_idx[buffer.life_end].append(buffer.slot)\n for instr_idx, instr in enumerate(instrs):\n new_instrs.append(instr)\n if instr_idx in buffers_freed_at_idx:\n new_instrs.append(\n FreeBuffer(buffer_ids=buffers_freed_at_idx[instr_idx])\n )\n return new_instrs, len(buffer_slots)\n\n def optimize(self):\n result_instrs = []\n result_num_buffers = []\n for instrs in self.per_worker_instructions:\n instrs = self._inject_comm_finish_instrs(instrs)\n instrs, num_buffers = self._allocate_buffers(instrs)\n # check all needed buffers are allocated\n for instr in instrs:\n if isinstance(\n instr,\n (\n ForwardPass,\n BackwardPass,\n SendActivationStart,\n SendGradStart,\n RecvActivationStart,\n RecvGradStart,\n LoadInput,\n ),\n ):\n assert len(instr.buffer_ids) >= len(\n instr.buffer_shapes\n ), f\"buffer allocation mismatch for {instr}, \"\n f\"expected {len(instr.buffer_shapes)}, \"\n f\"got {len(instr.buffer_ids)}\"\n result_instrs.append(instrs)\n result_num_buffers.append(num_buffers)\n return result_instrs, result_num_buffers" }, { "identifier": "ExecutionPlan", "path": "dynapipe/pipe/instructions.py", "snippet": "class ExecutionPlan:\n \"\"\"\n Sequences of PipeInstructions to be executed by the PipeEngine, which\n defines the buffer allocation, the shape of the tensors and the pipeline\n schedule.\n\n The sequences of instructions must be executed in the exact order they are\n defined in the plan. No synchronization should be performed between\n instructions to avoid deadlock.\n\n Args:\n stages (int): The number of pipeline stages.\n stage_id (int): The stage that will execute the generated schedule.\n \"\"\"\n\n def __init__(\n self,\n instructions: List[PipeInstruction],\n micro_batches: int,\n nranks: int,\n nstages: int,\n rank: int,\n assigned_stages: List[int],\n recompute_method: RecomputeMethod = RecomputeMethod.NONE,\n num_pipe_buffers: Optional[int] = 0,\n ):\n self.instructions = instructions\n self.micro_batches = micro_batches\n self.nranks = nranks\n self.nstages = nstages\n self.rank = rank\n self.assigned_stages = assigned_stages\n self.recompute_method = recompute_method\n self._valid_rank(rank)\n self.num_pipe_buffers = num_pipe_buffers\n\n def _valid_rank(self, rank):\n return 0 <= rank < self.nranks\n\n @property\n def num_micro_batches(self):\n \"\"\"The number of total micro_batches in this schedule.\"\"\"\n return self.micro_batches\n\n def __iter__(self):\n self.it = None\n return self\n\n def __next__(self):\n if self.it is None:\n self.it = self.steps()\n return next(self.it)\n\n def __repr__(self) -> str:\n return (\n \"ExecutionPlan(micro_batches={}, nranks={}, nstages={}, rank={}, \"\n \"assigned_stages={}, recompute_method={}, \"\n \"num_pipe_buffers={}, instructions={})\".format(\n self.micro_batches,\n self.nranks,\n self.nstages,\n self.rank,\n self.assigned_stages,\n _RECOMPUTE_METHOD_NAMES[self.recompute_method],\n self.num_pipe_buffers,\n self.instructions,\n )\n )\n\n def __str__(self):\n \"\"\"Print the execution plan in a human readable format.\"\"\"\n return (\n \"ExecutionPlan(micro_batches={}, nranks={}, nstages={}, rank={}, \"\n \"assigned_stages={}, recompute_method={}, \"\n \"num_pipe_buffers={}, instructions=[\\n\\t\".format(\n self.micro_batches,\n self.nranks,\n self.nstages,\n self.rank,\n self.assigned_stages,\n _RECOMPUTE_METHOD_NAMES[self.recompute_method],\n self.num_pipe_buffers,\n )\n + \"\\n\\t\".join([str(x) for x in self.instructions])\n + \"\\n])\"\n )\n\n def __eq__(self, other: \"ExecutionPlan\"):\n if not isinstance(other, ExecutionPlan):\n return False\n return (\n self.micro_batches == other.micro_batches\n and self.nranks == other.nranks\n and self.nstages == other.nstages\n and self.rank == other.rank\n and self.assigned_stages == other.assigned_stages\n and self.recompute_method == other.recompute_method\n and self.num_pipe_buffers == other.num_pipe_buffers\n and self.instructions == other.instructions\n )\n\n def serialize(self, config=SerializationConfig()) -> bytes:\n \"\"\"Serialize the execution plan to a byte array.\"\"\"\n\n def _serialize_plan_meta(x: int):\n return x.to_bytes(\n config.EXECUTION_PLAN_META_BYTES, config.BYTES_ENDIANNESS\n )\n\n return (\n _serialize_plan_meta(self.micro_batches)\n + _serialize_plan_meta(self.nranks)\n + _serialize_plan_meta(self.nstages)\n + _serialize_plan_meta(self.rank)\n + _serialize_plan_meta(len(self.assigned_stages))\n + b\"\".join([_serialize_plan_meta(x) for x in self.assigned_stages])\n + _serialize_plan_meta(self.recompute_method)\n + _serialize_plan_meta(self.num_pipe_buffers)\n + len(self.instructions).to_bytes(\n config.EXECUTION_PLAN_META_BYTES, config.BYTES_ENDIANNESS\n )\n + b\"\".join(\n [instr.serialize(config) for instr in self.instructions]\n )\n )\n\n @classmethod\n def deserialize(\n cls, bytes, config=SerializationConfig()\n ) -> \"ExecutionPlan\":\n \"\"\"Deserialize the execution plan from a byte array.\"\"\"\n\n def _deserialize_plan_meta(bytes):\n return (\n int.from_bytes(\n bytes[: config.EXECUTION_PLAN_META_BYTES],\n config.BYTES_ENDIANNESS,\n ),\n bytes[config.EXECUTION_PLAN_META_BYTES :],\n )\n\n micro_batches, bytes = _deserialize_plan_meta(bytes)\n nranks, bytes = _deserialize_plan_meta(bytes)\n nstages, bytes = _deserialize_plan_meta(bytes)\n rank, bytes = _deserialize_plan_meta(bytes)\n n_assigned_stages, bytes = _deserialize_plan_meta(bytes)\n assigned_stages = []\n for _ in range(n_assigned_stages):\n assigned_stage, bytes = _deserialize_plan_meta(bytes)\n assigned_stages.append(assigned_stage)\n recompute_method, bytes = _deserialize_plan_meta(bytes)\n num_pipe_buffers, bytes = _deserialize_plan_meta(bytes)\n n_instructions = int.from_bytes(\n bytes[: config.EXECUTION_PLAN_META_BYTES], config.BYTES_ENDIANNESS\n )\n bytes = bytes[config.EXECUTION_PLAN_META_BYTES :]\n instructions = []\n for _ in range(n_instructions):\n instr, bytes = PipeInstruction.deserialize(bytes, config=config)\n instructions.append(instr)\n assert len(bytes) == 0\n return cls(\n instructions,\n micro_batches,\n nranks,\n nstages,\n rank,\n assigned_stages,\n recompute_method,\n num_pipe_buffers,\n )" }, { "identifier": "PipeInstruction", "path": "dynapipe/pipe/instructions.py", "snippet": "class PipeInstruction:\n \"\"\"Base class for all instructions to be executed by the pipeline engine.\n All keyword arguments are stored as members similar to a ``namedtuple``.\n These are then accessible to the PipeEngine during execution.\n Args:\n kwargs (optional): keyword arguments to store as members\n \"\"\"\n\n # used to generate a unique index for each instruction class\n # for serialization\n _instr_index_to_cls: Dict[int, Type[\"PipeInstruction\"]] = {}\n\n def __init__(self, microbatch, stage, **kwargs):\n self.name = self.__class__.__name__\n self.microbatch = microbatch\n self.stage = stage\n self.kwargs = kwargs\n for key, val in kwargs.items():\n setattr(self, key, val)\n\n def __repr__(self):\n name = f\"{self.name}(microbatch={self.microbatch}, stage={self.stage}\"\n if self.kwargs:\n name += \", \"\n name += \", \".join(\n f\"{key}={repr(arg)}\" for key, arg in self.kwargs.items()\n )\n name += \")\"\n return name\n\n def __init_subclass__(cls) -> None:\n cls._instr_index = len(PipeInstruction._instr_index_to_cls)\n PipeInstruction._instr_index_to_cls[cls._instr_index] = cls\n\n def serialize(self, config=SerializationConfig()) -> bytes:\n \"\"\"Serialize the instruction to a byte array.\"\"\"\n return (\n self._instr_index.to_bytes(\n config.INSTRUCTION_INDEX_BYTES, config.BYTES_ENDIANNESS\n )\n + self.microbatch.to_bytes(\n config.EXECUTION_PLAN_META_BYTES, config.BYTES_ENDIANNESS\n )\n + self.stage.to_bytes(\n config.EXECUTION_PLAN_META_BYTES, config.BYTES_ENDIANNESS\n )\n )\n\n def _deserialize(\n bytes: bytes, config=SerializationConfig()\n ) -> Tuple[Dict[str, Any], bytes]:\n return {}, bytes\n\n @classmethod\n def deserialize(\n cls, bytes: bytes, config=SerializationConfig()\n ) -> Tuple[\"PipeInstruction\", bytes]:\n \"\"\"Deserialize the instruction from a byte array.\"\"\"\n instr_index = int.from_bytes(\n bytes[: config.INSTRUCTION_INDEX_BYTES], config.BYTES_ENDIANNESS\n )\n bytes = bytes[config.INSTRUCTION_INDEX_BYTES :]\n microbatch = int.from_bytes(\n bytes[: config.EXECUTION_PLAN_META_BYTES], config.BYTES_ENDIANNESS\n )\n bytes = bytes[config.EXECUTION_PLAN_META_BYTES :]\n stage = int.from_bytes(\n bytes[: config.EXECUTION_PLAN_META_BYTES], config.BYTES_ENDIANNESS\n )\n bytes = bytes[config.EXECUTION_PLAN_META_BYTES :]\n kwargs, bytes = cls._instr_index_to_cls[instr_index]._deserialize(\n bytes, config=config\n )\n return (\n cls._instr_index_to_cls[instr_index](\n microbatch=microbatch, stage=stage, **kwargs\n ),\n bytes,\n )\n\n def __eq__(self, other: \"PipeInstruction\"):\n return (\n self.__class__ == other.__class__\n and self.microbatch == other.microbatch\n and self.stage == other.stage\n and self.kwargs == other.kwargs\n )" }, { "identifier": "name_to_recompute_method", "path": "dynapipe/pipe/instructions.py", "snippet": "def name_to_recompute_method(name: str) -> RecomputeMethod:\n if name in _NAME_TO_RECOMPUTE_METHOD:\n return _NAME_TO_RECOMPUTE_METHOD[name]\n raise ValueError(f\"Unknown recompute method: {name}\")" }, { "identifier": "validate_device_assignment", "path": "dynapipe/pipe/utils.py", "snippet": "def validate_device_assignment(\n model_spec: TransformerModelSpec,\n cluster_spec: DynaPipeCluster,\n device_assignment: List[int],\n):\n \"\"\"\n Validate device assignment and detect device assignment type.\n Args:\n device_assignment: List of device ids for each layer.\n \"\"\"\n appeared_devices = set()\n for device in device_assignment:\n if device not in appeared_devices:\n # new device\n assert device == len(appeared_devices), (\n \"Devices must appear in indexed order. \"\n \"e.g. [0, 1, 2, 3] is valid, \"\n \"[0, 1, 3, 2] is not valid.\"\n )\n appeared_devices.add(device)\n n_devices = len(appeared_devices)\n assert n_devices == cluster_spec.n_devices, (\n \"Number of devices used in device assignment \"\n \"must be equal to number of devices in cluster spec.\"\n )\n virtual_layer_to_actual_layers = [[]]\n virtual_layer_devices = [0]\n last_device = 0\n for device in device_assignment:\n if device == last_device:\n virtual_layer_to_actual_layers[-1].append(device)\n else:\n virtual_layer_to_actual_layers.append([device])\n virtual_layer_devices.append(device)\n last_device = device\n n_actual_layers_per_virtual_layer = len(virtual_layer_to_actual_layers[0])\n for virtual_layer in virtual_layer_to_actual_layers:\n n_encoder_layers_in_virtual_layer = len(\n [\n layer\n for layer in virtual_layer\n if layer < model_spec.n_encoder_layers\n ]\n )\n n_decoder_layers_in_virtual_layer = (\n len(virtual_layer) - n_encoder_layers_in_virtual_layer\n )\n if n_encoder_layers_in_virtual_layer > 0:\n assert (\n len(virtual_layer) == n_encoder_layers_in_virtual_layer\n ), \"Number of layers on each virtual layer must be the same.\"\n if n_decoder_layers_in_virtual_layer > 0:\n assert (\n len(virtual_layer) == n_decoder_layers_in_virtual_layer\n ), \"Number of layers on each virtual layer must be the same.\"\n if len(device_assignment) != n_actual_layers_per_virtual_layer:\n # only check if we are actually using pipeline parallelism\n assert (\n model_spec.n_encoder_layers % n_actual_layers_per_virtual_layer\n == 0\n ), (\n f\"Number of encoder layers ({model_spec.n_encoder_layers}) \"\n f\"must be divisible by number of layers on each virtual layer \"\n f\"({n_actual_layers_per_virtual_layer}).\"\n )\n assert (\n model_spec.n_decoder_layers % n_actual_layers_per_virtual_layer\n == 0\n ), (\n f\"Number of decoder layers ({model_spec.n_decoder_layers}) \"\n f\"must be divisible by number of layers on each virtual layer \"\n f\"({n_actual_layers_per_virtual_layer}).\"\n )\n # classify device assignment into linear, interleaved and other\n device_assignment_type = \"other\"\n if len(virtual_layer_devices) == n_devices:\n if virtual_layer_devices == list(range(n_devices)):\n device_assignment_type = \"linear\"\n else:\n n_chunks = len(virtual_layer_devices) // n_devices\n interleaved_assignment = list(range(n_devices)) * n_chunks\n if interleaved_assignment == virtual_layer_devices:\n device_assignment_type = \"interleaved\"\n if (\n device_assignment_type == \"interleaved\"\n and model_spec.n_decoder_layers == 0\n ):\n # interleaved device assignment is not supported for decoder only\n # models\n raise NotImplementedError(\n \"Interleaved device assignment is not supported \"\n \"for decoder only models.\"\n )\n valid_schedule_methods = [\"wait-free-cyclic\"]\n if device_assignment_type == \"linear\" and n_devices > 1:\n valid_schedule_methods.append(\"1F1B\")\n elif device_assignment_type == \"interleaved\":\n valid_schedule_methods.append(\"interleaved-1F1B\")\n n_chunks_per_device = len(virtual_layer_devices) // n_devices\n return (\n device_assignment_type,\n valid_schedule_methods,\n n_actual_layers_per_virtual_layer,\n n_chunks_per_device,\n )" }, { "identifier": "get_transformer_output_memory", "path": "dynapipe/utils/memory_utils.py", "snippet": "def get_transformer_output_memory(\n sequence_length, batch_size, hidden_dim, bytes_per_element\n):\n # size is in MB (megabytes)\n return sequence_length * batch_size * hidden_dim * bytes_per_element / 1e6" } ]
import itertools import logging import math import numpy as np from typing import List, Optional, Tuple from sklearn.cluster import AgglomerativeClustering, KMeans from dynapipe.data_opt.cost_models import ProfileBasedCostModelWithRC from dynapipe.model import ( DynaPipeCluster, DynaPipeMicrobatch, DynaPipeMinibatch, TransformerModelSpec, get_simulator, ) from dynapipe.pipe.instruction_optimizer import InstructionOptimizer from dynapipe.pipe.instructions import ( ExecutionPlan, PipeInstruction, name_to_recompute_method, ) from dynapipe.pipe.utils import validate_device_assignment from dynapipe.utils.memory_utils import get_transformer_output_memory from tqdm import tqdm
12,258
sch_type, permuted_minibatch, opt_cluster, device_assignment, include_memory_stats=include_memory_stats, memory_limit=scheduler_memory_limit, max_otf_microbatches=max_otf_microbatches, logger=logger, ) timeline_json = simulator.schedule() instructions = simulator.get_instructions() peak_memory = simulator.get_executor_peak_memory() max_memory_device = -1 max_device_memory = -1 for device, memory in peak_memory.items(): if memory > max_device_memory: max_memory_device = device max_device_memory = memory makespan = simulator.get_makespan() if makespan is None: continue makespan = makespan / 1000.0 debug_json = timeline_json mem_for_perms.append(max_device_memory) if max_device_memory > memory_limit: continue if makespan > max_makespan: max_makespan = makespan max_stats = ( perm, max_device_memory, max_memory_device, timeline_json, ) max_instructions = instructions if makespan < min_makespan: min_makespan = makespan min_stats = ( perm, max_device_memory, max_memory_device, timeline_json, ) min_instructions = instructions if logger is not None and max_makespan > 0.0: logger.debug( "Sched mem limit: {}, RC type: {}, Schedule type: {}, " "min peak memory: {} MB, makespan: {}.".format( scheduler_memory_limit, rc_type, sch_type, min(mem_for_perms), min_makespan, ) ) return ( max_makespan, max_stats, max_instructions, min_makespan, min_stats, min_instructions, debug_json, mem_for_perms, ) # first try without setting memory limit on scheduler # (i.e. see if there exist a feasible permutation) ( max_makespan, max_stats, max_instructions, min_makespan, min_stats, min_instructions, debug_json, mem_for_perms, ) = _run_schedules(float("inf")) if ( max_makespan == 0.0 and sch_type == "wait-free-cyclic" and not disable_scheduler_memory_limit ): # try with scheduler memory limit if logger is not None: logger.debug("Trying with scheduler memory limit.") ( max_makespan, max_stats, max_instructions, min_makespan, min_stats, min_instructions, debug_json, mem_for_perms, ) = _run_schedules(memory_limit) if max_makespan == 0.0 and raise_on_oom: # with open("./test_memory.json", "w") as f: # json.dump(debug_json, f) raise RuntimeError( "No feasible schedule within memory limit found. " "Memory consumption for different permutations: " "min: {}, max: {}.".format( [] if not mem_for_perms else min(mem_for_perms), [] if not mem_for_perms else max(mem_for_perms), ) ) return ( max_makespan, max_stats, max_instructions, min_makespan, min_stats, min_instructions, ) def construct_minibatch_spec( model_spec: TransformerModelSpec,
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 def optimize_schedule( sch_type: str, opt_minibatch: DynaPipeMinibatch, opt_cluster: DynaPipeCluster, device_assignment: List[int], try_permutations=True, perm_clusters=None, perm_cluster_algo="kmeans", include_memory_stats=False, progress_bar=False, memory_limit=float("inf"), disable_scheduler_memory_limit=False, max_otf_microbatches=None, raise_on_oom=True, rc_type: Optional[str] = None, logger: Optional[logging.Logger] = None, ): if try_permutations: if perm_clusters is None: if len(opt_minibatch.microbatches) > 20: perm_clusters = 3 else: perm_clusters = 4 if len(opt_minibatch.microbatches) > perm_clusters: mb_vectors = [] for mb in opt_minibatch.microbatches: # use fw and bw time as features mb_vectors.append( [ mb.fw_exec_times[0], mb.fw_exec_times[-1], mb.bw_exec_times[0], mb.bw_exec_times[-1], ] ) mb_vectors = np.array(mb_vectors) if perm_cluster_algo == "kmeans": cluster = KMeans( perm_clusters, random_state=0, n_init="auto", ).fit(mb_vectors) elif perm_cluster_algo == "agglomerative": cluster = AgglomerativeClustering( perm_clusters, linkage="complete", ).fit(mb_vectors) mb_labels = list(cluster.labels_) n_clusters = max(mb_labels) + 1 assert n_clusters <= perm_clusters mb_groups = [[] for _ in range(n_clusters)] mb_idx2group = {} for i, label in enumerate(mb_labels): mb_groups[label].append(i) mb_idx2group[i] = label result_premutations = [] for perm in itertools.permutations(range(len(mb_groups))): # generate a random permutation for each group mb_random_perm_per_label = {} for label, mb_indices in enumerate(mb_groups): shuffled_indices = np.random.permutation(mb_indices) mb_random_perm_per_label[label] = list(shuffled_indices) reconstructed_perm = [] for label in perm: reconstructed_perm.extend(mb_random_perm_per_label[label]) result_premutations.append(reconstructed_perm) permutations = result_premutations else: permutations = list( itertools.permutations(range(len(opt_minibatch.microbatches))) ) else: permutations = [] # always try the original order permutations.append(list(range(len(opt_minibatch.microbatches)))) def _run_schedules(scheduler_memory_limit): max_makespan = 0.0 max_stats = None max_instructions = [] min_makespan = float("inf") min_stats = None min_instructions = [] if progress_bar: iterator = tqdm(permutations) else: iterator = permutations debug_json = None mem_for_perms = [] for perm in iterator: permuted_minibatch = opt_minibatch.permute_microbatches(perm) # get simulator simulator = get_simulator( sch_type, permuted_minibatch, opt_cluster, device_assignment, include_memory_stats=include_memory_stats, memory_limit=scheduler_memory_limit, max_otf_microbatches=max_otf_microbatches, logger=logger, ) timeline_json = simulator.schedule() instructions = simulator.get_instructions() peak_memory = simulator.get_executor_peak_memory() max_memory_device = -1 max_device_memory = -1 for device, memory in peak_memory.items(): if memory > max_device_memory: max_memory_device = device max_device_memory = memory makespan = simulator.get_makespan() if makespan is None: continue makespan = makespan / 1000.0 debug_json = timeline_json mem_for_perms.append(max_device_memory) if max_device_memory > memory_limit: continue if makespan > max_makespan: max_makespan = makespan max_stats = ( perm, max_device_memory, max_memory_device, timeline_json, ) max_instructions = instructions if makespan < min_makespan: min_makespan = makespan min_stats = ( perm, max_device_memory, max_memory_device, timeline_json, ) min_instructions = instructions if logger is not None and max_makespan > 0.0: logger.debug( "Sched mem limit: {}, RC type: {}, Schedule type: {}, " "min peak memory: {} MB, makespan: {}.".format( scheduler_memory_limit, rc_type, sch_type, min(mem_for_perms), min_makespan, ) ) return ( max_makespan, max_stats, max_instructions, min_makespan, min_stats, min_instructions, debug_json, mem_for_perms, ) # first try without setting memory limit on scheduler # (i.e. see if there exist a feasible permutation) ( max_makespan, max_stats, max_instructions, min_makespan, min_stats, min_instructions, debug_json, mem_for_perms, ) = _run_schedules(float("inf")) if ( max_makespan == 0.0 and sch_type == "wait-free-cyclic" and not disable_scheduler_memory_limit ): # try with scheduler memory limit if logger is not None: logger.debug("Trying with scheduler memory limit.") ( max_makespan, max_stats, max_instructions, min_makespan, min_stats, min_instructions, debug_json, mem_for_perms, ) = _run_schedules(memory_limit) if max_makespan == 0.0 and raise_on_oom: # with open("./test_memory.json", "w") as f: # json.dump(debug_json, f) raise RuntimeError( "No feasible schedule within memory limit found. " "Memory consumption for different permutations: " "min: {}, max: {}.".format( [] if not mem_for_perms else min(mem_for_perms), [] if not mem_for_perms else max(mem_for_perms), ) ) return ( max_makespan, max_stats, max_instructions, min_makespan, min_stats, min_instructions, ) def construct_minibatch_spec( model_spec: TransformerModelSpec,
cost_model: ProfileBasedCostModelWithRC,
0
2023-11-08 07:58:20+00:00
16k
SqueezeAILab/LLMCompiler
src/llm_compiler/llm_compiler.py
[ { "identifier": "AsyncStatsCallbackHandler", "path": "src/callbacks/callbacks.py", "snippet": "class AsyncStatsCallbackHandler(AsyncCallbackHandler):\n \"\"\"Collect useful stats about the run.\n Add more stats as needed.\"\"\"\n\n def __init__(self, stream: bool = False) -> None:\n super().__init__()\n self.cnt = 0\n self.input_tokens = 0\n self.output_tokens = 0\n # same for gpt-3.5\n self.encoder = tiktoken.encoding_for_model(\"gpt-4\")\n self.stream = stream\n self.all_times = []\n self.start_time = 0\n\n async def on_chat_model_start(self, serialized, prompts, **kwargs):\n self.start_time = time.time()\n if self.stream:\n # if streaming mode, on_llm_end response is not collected\n # therefore, we need to count input token based on the\n # prompt length at the beginning\n self.cnt += 1\n self.input_tokens += len(self.encoder.encode(prompts[0][0].content))\n\n async def on_llm_new_token(self, token, *args, **kwargs):\n if self.stream:\n # if streaming mode, on_llm_end response is not collected\n # therefore, we need to manually count output token based on the\n # number of streamed out tokens\n self.output_tokens += 1\n\n async def on_llm_end(self, response, *args, **kwargs):\n self.all_times.append(round(time.time() - self.start_time, 2))\n if not self.stream:\n # if not streaming mode, on_llm_end response is collected\n # so we can use this stats directly\n token_usage = response.llm_output[\"token_usage\"]\n self.input_tokens += token_usage[\"prompt_tokens\"]\n self.output_tokens += token_usage[\"completion_tokens\"]\n self.cnt += 1\n\n def reset(self) -> None:\n self.cnt = 0\n self.input_tokens = 0\n self.output_tokens = 0\n self.all_times = []\n\n def get_stats(self) -> dict[str, int]:\n return {\n \"calls\": self.cnt,\n \"input_tokens\": self.input_tokens,\n \"output_tokens\": self.output_tokens,\n \"all_times\": self.all_times,\n }" }, { "identifier": "Chain", "path": "src/chains/chain.py", "snippet": "class Chain(Serializable, Runnable[Dict[str, Any], Dict[str, Any]], ABC):\n \"\"\"Abstract base class for creating structured sequences of calls to components.\n\n Chains should be used to encode a sequence of calls to components like\n models, document retrievers, other chains, etc., and provide a simple interface\n to this sequence.\n\n Copied from langchain v0.0.283.\n\n The Chain interface makes it easy to create apps that are:\n - Stateful: add Memory to any Chain to give it state,\n - Observable: pass Callbacks to a Chain to execute additional functionality,\n like logging, outside the main sequence of component calls,\n - Composable: the Chain API is flexible enough that it is easy to combine\n Chains with other components, including other Chains.\n\n The main methods exposed by chains are:\n - `__call__`: Chains are callable. The `__call__` method is the primary way to\n execute a Chain. This takes inputs as a dictionary and returns a\n dictionary output.\n - `run`: A convenience method that takes inputs as args/kwargs and returns the\n output as a string or object. This method can only be used for a subset of\n chains and cannot return as rich of an output as `__call__`.\n \"\"\"\n\n def invoke(\n self,\n input: Dict[str, Any],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Dict[str, Any]:\n config = config or {}\n return self(\n input,\n callbacks=config.get(\"callbacks\"),\n tags=config.get(\"tags\"),\n metadata=config.get(\"metadata\"),\n run_name=config.get(\"run_name\"),\n **kwargs,\n )\n\n async def ainvoke(\n self,\n input: Dict[str, Any],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Dict[str, Any]:\n if type(self)._acall == Chain._acall:\n # If the chain does not implement async, fall back to default implementation\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.invoke, input, config, **kwargs)\n )\n\n config = config or {}\n return await self.acall(\n input,\n callbacks=config.get(\"callbacks\"),\n tags=config.get(\"tags\"),\n metadata=config.get(\"metadata\"),\n run_name=config.get(\"run_name\"),\n **kwargs,\n )\n\n memory: Optional[BaseMemory] = None\n \"\"\"Optional memory object. Defaults to None.\n Memory is a class that gets called at the start\n and at the end of every chain. At the start, memory loads variables and passes\n them along in the chain. At the end, it saves any returned variables.\n There are many different types of memory - please see memory docs\n for the full catalog.\"\"\"\n callbacks: Callbacks = Field(default=None, exclude=True)\n \"\"\"Optional list of callback handlers (or callback manager). Defaults to None.\n Callback handlers are called throughout the lifecycle of a call to a chain,\n starting with on_chain_start, ending with on_chain_end or on_chain_error.\n Each custom chain can optionally call additional callback methods, see Callback docs\n for full details.\"\"\"\n callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)\n \"\"\"Deprecated, use `callbacks` instead.\"\"\"\n verbose: bool = Field(default_factory=_get_verbosity)\n \"\"\"Whether or not run in verbose mode. In verbose mode, some intermediate logs\n will be printed to the console. Defaults to `langchain.verbose` value.\"\"\"\n tags: Optional[List[str]] = None\n \"\"\"Optional list of tags associated with the chain. Defaults to None.\n These tags will be associated with each call to this chain,\n and passed as arguments to the handlers defined in `callbacks`.\n You can use these to eg identify a specific instance of a chain with its use case.\n \"\"\"\n metadata: Optional[Dict[str, Any]] = None\n \"\"\"Optional metadata associated with the chain. Defaults to None.\n This metadata will be associated with each call to this chain,\n and passed as arguments to the handlers defined in `callbacks`.\n You can use these to eg identify a specific instance of a chain with its use case.\n \"\"\"\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @property\n def _chain_type(self) -> str:\n raise NotImplementedError(\"Saving not supported for this chain type.\")\n\n @root_validator()\n def raise_callback_manager_deprecation(cls, values: Dict) -> Dict:\n \"\"\"Raise deprecation warning if callback_manager is used.\"\"\"\n if values.get(\"callback_manager\") is not None:\n if values.get(\"callbacks\") is not None:\n raise ValueError(\n \"Cannot specify both callback_manager and callbacks. \"\n \"callback_manager is deprecated, callbacks is the preferred \"\n \"parameter to pass in.\"\n )\n warnings.warn(\n \"callback_manager is deprecated. Please use callbacks instead.\",\n DeprecationWarning,\n )\n values[\"callbacks\"] = values.pop(\"callback_manager\", None)\n return values\n\n @validator(\"verbose\", pre=True, always=True)\n def set_verbose(cls, verbose: Optional[bool]) -> bool:\n \"\"\"Set the chain verbosity.\n\n Defaults to the global setting if not specified by the user.\n \"\"\"\n if verbose is None:\n return _get_verbosity()\n else:\n return verbose\n\n @property\n @abstractmethod\n def input_keys(self) -> List[str]:\n \"\"\"Keys expected to be in the chain input.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def output_keys(self) -> List[str]:\n \"\"\"Keys expected to be in the chain output.\"\"\"\n raise NotImplementedError\n\n def _validate_inputs(self, inputs: Dict[str, Any]) -> None:\n \"\"\"Check that all inputs are present.\"\"\"\n missing_keys = set(self.input_keys).difference(inputs)\n if missing_keys:\n raise ValueError(f\"Missing some input keys: {missing_keys}\")\n\n def _validate_outputs(self, outputs: Dict[str, Any]) -> None:\n missing_keys = set(self.output_keys).difference(outputs)\n if missing_keys:\n raise ValueError(f\"Missing some output keys: {missing_keys}\")\n\n @abstractmethod\n def _call(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n \"\"\"Execute the chain.\n\n This is a private method that is not user-facing. It is only called within\n `Chain.__call__`, which is the user-facing wrapper method that handles\n callbacks configuration and some input/output processing.\n\n Args:\n inputs: A dict of named inputs to the chain. Assumed to contain all inputs\n specified in `Chain.input_keys`, including any inputs added by memory.\n run_manager: The callbacks manager that contains the callback handlers for\n this run of the chain.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n raise NotImplementedError\n\n async def _acall(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n \"\"\"Asynchronously execute the chain.\n\n This is a private method that is not user-facing. It is only called within\n `Chain.acall`, which is the user-facing wrapper method that handles\n callbacks configuration and some input/output processing.\n\n Args:\n inputs: A dict of named inputs to the chain. Assumed to contain all inputs\n specified in `Chain.input_keys`, including any inputs added by memory.\n run_manager: The callbacks manager that contains the callback handlers for\n this run of the chain.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n raise NotImplementedError(\"Async call not supported for this chain type.\")\n\n def __call__(\n self,\n inputs: Union[Dict[str, Any], Any],\n return_only_outputs: bool = False,\n callbacks: Callbacks = None,\n *,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n run_name: Optional[str] = None,\n include_run_info: bool = False,\n ) -> Dict[str, Any]:\n \"\"\"Execute the chain.\n\n Args:\n inputs: Dictionary of inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n return_only_outputs: Whether to return only outputs in the\n response. If True, only new keys generated by this chain will be\n returned. If False, both input keys and new keys generated by this\n chain will be returned. Defaults to False.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n metadata: Optional metadata associated with the chain. Defaults to None\n include_run_info: Whether to include run info in the response. Defaults\n to False.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n inputs = self.prep_inputs(inputs)\n callback_manager = CallbackManager.configure(\n callbacks,\n self.callbacks,\n self.verbose,\n tags,\n self.tags,\n metadata,\n self.metadata,\n )\n new_arg_supported = inspect.signature(self._call).parameters.get(\"run_manager\")\n run_manager = callback_manager.on_chain_start(\n dumpd(self),\n inputs,\n name=run_name,\n )\n try:\n outputs = (\n self._call(inputs, run_manager=run_manager)\n if new_arg_supported\n else self._call(inputs)\n )\n except (KeyboardInterrupt, Exception) as e:\n run_manager.on_chain_error(e)\n raise e\n run_manager.on_chain_end(outputs)\n final_outputs: Dict[str, Any] = self.prep_outputs(\n inputs, outputs, return_only_outputs\n )\n if include_run_info:\n final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)\n return final_outputs\n\n async def acall(\n self,\n inputs: Union[Dict[str, Any], Any],\n return_only_outputs: bool = False,\n callbacks: Callbacks = None,\n *,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n run_name: Optional[str] = None,\n include_run_info: bool = False,\n ) -> Dict[str, Any]:\n \"\"\"Asynchronously execute the chain.\n\n Args:\n inputs: Dictionary of inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n return_only_outputs: Whether to return only outputs in the\n response. If True, only new keys generated by this chain will be\n returned. If False, both input keys and new keys generated by this\n chain will be returned. Defaults to False.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n metadata: Optional metadata associated with the chain. Defaults to None\n include_run_info: Whether to include run info in the response. Defaults\n to False.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n inputs = self.prep_inputs(inputs)\n callback_manager = AsyncCallbackManager.configure(\n callbacks,\n self.callbacks,\n self.verbose,\n tags,\n self.tags,\n metadata,\n self.metadata,\n )\n new_arg_supported = inspect.signature(self._acall).parameters.get(\"run_manager\")\n run_manager = await callback_manager.on_chain_start(\n dumpd(self),\n inputs,\n name=run_name,\n )\n try:\n outputs = (\n await self._acall(inputs, run_manager=run_manager)\n if new_arg_supported\n else await self._acall(inputs)\n )\n except (KeyboardInterrupt, Exception) as e:\n await run_manager.on_chain_error(e)\n raise e\n await run_manager.on_chain_end(outputs)\n final_outputs: Dict[str, Any] = self.prep_outputs(\n inputs, outputs, return_only_outputs\n )\n if include_run_info:\n final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)\n return final_outputs\n\n def prep_outputs(\n self,\n inputs: Dict[str, str],\n outputs: Dict[str, str],\n return_only_outputs: bool = False,\n ) -> Dict[str, str]:\n \"\"\"Validate and prepare chain outputs, and save info about this run to memory.\n\n Args:\n inputs: Dictionary of chain inputs, including any inputs added by chain\n memory.\n outputs: Dictionary of initial chain outputs.\n return_only_outputs: Whether to only return the chain outputs. If False,\n inputs are also added to the final outputs.\n\n Returns:\n A dict of the final chain outputs.\n \"\"\"\n self._validate_outputs(outputs)\n if self.memory is not None:\n self.memory.save_context(inputs, outputs)\n if return_only_outputs:\n return outputs\n else:\n return {**inputs, **outputs}\n\n def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:\n \"\"\"Validate and prepare chain inputs, including adding inputs from memory.\n\n Args:\n inputs: Dictionary of raw inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n\n Returns:\n A dictionary of all inputs, including those added by the chain's memory.\n \"\"\"\n if not isinstance(inputs, dict):\n _input_keys = set(self.input_keys)\n if self.memory is not None:\n # If there are multiple input keys, but some get set by memory so that\n # only one is not set, we can still figure out which key it is.\n _input_keys = _input_keys.difference(self.memory.memory_variables)\n if len(_input_keys) != 1:\n raise ValueError(\n f\"A single string input was passed in, but this chain expects \"\n f\"multiple inputs ({_input_keys}). When a chain expects \"\n f\"multiple inputs, please call it by passing in a dictionary, \"\n \"eg `chain({'foo': 1, 'bar': 2})`\"\n )\n inputs = {list(_input_keys)[0]: inputs}\n if self.memory is not None:\n external_context = self.memory.load_memory_variables(inputs)\n inputs = dict(inputs, **external_context)\n self._validate_inputs(inputs)\n return inputs\n\n @property\n def _run_output_key(self) -> str:\n if len(self.output_keys) != 1:\n raise ValueError(\n f\"`run` not supported when there is not exactly \"\n f\"one output key. Got {self.output_keys}.\"\n )\n return self.output_keys[0]\n\n def run(\n self,\n *args: Any,\n callbacks: Callbacks = None,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Convenience method for executing chain.\n\n The main difference between this method and `Chain.__call__` is that this\n method expects inputs to be passed directly in as positional arguments or\n keyword arguments, whereas `Chain.__call__` expects a single input dictionary\n with all the inputs\n\n Args:\n *args: If the chain expects a single input, it can be passed in as the\n sole positional argument.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n **kwargs: If the chain expects multiple inputs, they can be passed in\n directly as keyword arguments.\n\n Returns:\n The chain output.\n\n Example:\n .. code-block:: python\n\n # Suppose we have a single-input chain that takes a 'question' string:\n chain.run(\"What's the temperature in Boise, Idaho?\")\n # -> \"The temperature in Boise is...\"\n\n # Suppose we have a multi-input chain that takes a 'question' string\n # and 'context' string:\n question = \"What's the temperature in Boise, Idaho?\"\n context = \"Weather report for Boise, Idaho on 07/03/23...\"\n chain.run(question=question, context=context)\n # -> \"The temperature in Boise is...\"\n \"\"\"\n # Run at start to make sure this is possible/defined\n _output_key = self._run_output_key\n\n if args and not kwargs:\n if len(args) != 1:\n raise ValueError(\"`run` supports only one positional argument.\")\n return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[\n _output_key\n ]\n\n if kwargs and not args:\n return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[\n _output_key\n ]\n\n if not kwargs and not args:\n raise ValueError(\n \"`run` supported with either positional arguments or keyword arguments,\"\n \" but none were provided.\"\n )\n else:\n raise ValueError(\n f\"`run` supported with either positional arguments or keyword arguments\"\n f\" but not both. Got args: {args} and kwargs: {kwargs}.\"\n )\n\n async def arun(\n self,\n *args: Any,\n callbacks: Callbacks = None,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Convenience method for executing chain.\n\n The main difference between this method and `Chain.__call__` is that this\n method expects inputs to be passed directly in as positional arguments or\n keyword arguments, whereas `Chain.__call__` expects a single input dictionary\n with all the inputs\n\n\n Args:\n *args: If the chain expects a single input, it can be passed in as the\n sole positional argument.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n **kwargs: If the chain expects multiple inputs, they can be passed in\n directly as keyword arguments.\n\n Returns:\n The chain output.\n\n Example:\n .. code-block:: python\n\n # Suppose we have a single-input chain that takes a 'question' string:\n await chain.arun(\"What's the temperature in Boise, Idaho?\")\n # -> \"The temperature in Boise is...\"\n\n # Suppose we have a multi-input chain that takes a 'question' string\n # and 'context' string:\n question = \"What's the temperature in Boise, Idaho?\"\n context = \"Weather report for Boise, Idaho on 07/03/23...\"\n await chain.arun(question=question, context=context)\n # -> \"The temperature in Boise is...\"\n \"\"\"\n if len(self.output_keys) != 1:\n raise ValueError(\n f\"`run` not supported when there is not exactly \"\n f\"one output key. Got {self.output_keys}.\"\n )\n elif args and not kwargs:\n if len(args) != 1:\n raise ValueError(\"`run` supports only one positional argument.\")\n return (\n await self.acall(\n args[0], callbacks=callbacks, tags=tags, metadata=metadata\n )\n )[self.output_keys[0]]\n\n if kwargs and not args:\n return (\n await self.acall(\n kwargs, callbacks=callbacks, tags=tags, metadata=metadata\n )\n )[self.output_keys[0]]\n\n raise ValueError(\n f\"`run` supported with either positional arguments or keyword arguments\"\n f\" but not both. Got args: {args} and kwargs: {kwargs}.\"\n )\n\n def dict(self, **kwargs: Any) -> Dict:\n \"\"\"Dictionary representation of chain.\n\n Expects `Chain._chain_type` property to be implemented and for memory to be\n null.\n\n Args:\n **kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`\n method.\n\n Returns:\n A dictionary representation of the chain.\n\n Example:\n .. code-block:: python\n\n chain.dict(exclude_unset=True)\n # -> {\"_type\": \"foo\", \"verbose\": False, ...}\n \"\"\"\n if self.memory is not None:\n raise ValueError(\"Saving of memory is not yet supported.\")\n _dict = super().dict(**kwargs)\n _dict[\"_type\"] = self._chain_type\n return _dict\n\n def save(self, file_path: Union[Path, str]) -> None:\n \"\"\"Save the chain.\n\n Expects `Chain._chain_type` property to be implemented and for memory to be\n null.\n\n Args:\n file_path: Path to file to save the chain to.\n\n Example:\n .. code-block:: python\n\n chain.save(file_path=\"path/chain.yaml\")\n \"\"\"\n # Convert file to Path object.\n if isinstance(file_path, str):\n save_path = Path(file_path)\n else:\n save_path = file_path\n\n directory_path = save_path.parent\n directory_path.mkdir(parents=True, exist_ok=True)\n\n # Fetch dictionary to save\n chain_dict = self.dict()\n\n if save_path.suffix == \".json\":\n with open(file_path, \"w\") as f:\n json.dump(chain_dict, f, indent=4)\n elif save_path.suffix == \".yaml\":\n with open(file_path, \"w\") as f:\n yaml.dump(chain_dict, f, default_flow_style=False)\n else:\n raise ValueError(f\"{save_path} must be json or yaml\")\n\n def apply(\n self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None\n ) -> List[Dict[str, str]]:\n \"\"\"Call the chain on all inputs in the list.\"\"\"\n return [self(inputs, callbacks=callbacks) for inputs in input_list]" }, { "identifier": "JOINNER_REPLAN", "path": "src/llm_compiler/constants.py", "snippet": "JOINNER_REPLAN = \"Replan\"" }, { "identifier": "Planner", "path": "src/llm_compiler/planner.py", "snippet": "class Planner:\n def __init__(\n self,\n llm: BaseChatModel,\n example_prompt: str,\n example_prompt_replan: str,\n tools: Sequence[Union[Tool, StructuredTool]],\n stop: Optional[list[str]],\n ):\n self.llm = llm\n # different system prompt is needed when replanning\n # since they have different guidelines, and also examples provided by the user\n self.system_prompt = generate_llm_compiler_prompt(\n tools=tools,\n example_prompt=example_prompt,\n is_replan=False,\n )\n self.system_prompt_replan = generate_llm_compiler_prompt(\n tools=tools,\n example_prompt=example_prompt_replan,\n is_replan=True,\n )\n self.tools = tools\n self.output_parser = LLMCompilerPlanParser(tools=tools)\n self.stop = stop\n\n async def run_llm(\n self,\n inputs: dict[str, Any],\n is_replan: bool = False,\n callbacks: Callbacks = None,\n ) -> str:\n \"\"\"Run the LLM.\"\"\"\n if is_replan:\n system_prompt = self.system_prompt_replan\n assert \"context\" in inputs, \"If replanning, context must be provided\"\n human_prompt = f\"Question: {inputs['input']}\\n{inputs['context']}\\n\"\n else:\n system_prompt = self.system_prompt\n human_prompt = f\"Question: {inputs['input']}\"\n\n messages = [\n SystemMessage(content=system_prompt),\n HumanMessage(content=human_prompt),\n ]\n\n llm_response = await self.llm._call_async(\n messages,\n callbacks=callbacks,\n stop=self.stop,\n )\n log(\"LLMCompiler planner response: \\n\", llm_response.content, block=True)\n\n return llm_response.content\n\n async def plan(\n self, inputs: dict, is_replan: bool, callbacks: Callbacks = None, **kwargs: Any\n ):\n llm_response = await self.run_llm(\n inputs=inputs, is_replan=is_replan, callbacks=callbacks\n )\n llm_response = llm_response + \"\\n\"\n return self.output_parser.parse(llm_response)\n\n async def aplan(\n self,\n inputs: dict,\n task_queue: asyncio.Queue[Optional[str]],\n is_replan: bool,\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Plan:\n \"\"\"Given input, asynchronously decide what to do.\"\"\"\n all_callbacks = [\n LLMCompilerCallback(\n queue=task_queue,\n tools=self.tools,\n )\n ]\n if callbacks:\n all_callbacks.extend(callbacks)\n await self.run_llm(inputs=inputs, is_replan=is_replan, callbacks=all_callbacks)" }, { "identifier": "Task", "path": "src/llm_compiler/task_fetching_unit.py", "snippet": "class Task:\n idx: int\n name: str\n tool: Callable\n args: Collection[Any]\n dependencies: Collection[int]\n stringify_rule: Optional[Callable] = None\n thought: Optional[str] = None\n observation: Optional[str] = None\n is_join: bool = False\n\n async def __call__(self) -> Any:\n log(\"running task\")\n x = await self.tool(*self.args)\n log(\"done task\")\n return x\n\n def get_though_action_observation(\n self, include_action=True, include_thought=True, include_action_idx=False\n ) -> str:\n thought_action_observation = \"\"\n if self.thought and include_thought:\n thought_action_observation = f\"Thought: {self.thought}\\n\"\n if include_action:\n idx = f\"{self.idx}. \" if include_action_idx else \"\"\n if self.stringify_rule:\n # If the user has specified a custom stringify rule for the\n # function argument, use it\n thought_action_observation += f\"{idx}{self.stringify_rule(self.args)}\\n\"\n else:\n # Otherwise, we have a default stringify rule\n thought_action_observation += (\n f\"{idx}{self.name}\"\n f\"{_default_stringify_rule_for_arguments(self.args)}\\n\"\n )\n if self.observation is not None:\n thought_action_observation += f\"Observation: {self.observation}\\n\"\n return thought_action_observation" }, { "identifier": "TaskFetchingUnit", "path": "src/llm_compiler/task_fetching_unit.py", "snippet": "class TaskFetchingUnit:\n tasks: Dict[str, Task]\n tasks_done: Dict[str, asyncio.Event]\n remaining_tasks: set[str]\n\n def __init__(self):\n self.tasks = {}\n self.tasks_done = {}\n self.remaining_tasks = set()\n\n def set_tasks(self, tasks: dict[str, Any]):\n self.tasks.update(tasks)\n self.tasks_done.update({task_idx: asyncio.Event() for task_idx in tasks})\n self.remaining_tasks.update(set(tasks.keys()))\n\n def _all_tasks_done(self):\n return all(self.tasks_done[d].is_set() for d in self.tasks_done)\n\n def _get_all_executable_tasks(self):\n return [\n task_name\n for task_name in self.remaining_tasks\n if all(\n self.tasks_done[d].is_set() for d in self.tasks[task_name].dependencies\n )\n ]\n\n def _preprocess_args(self, task: Task):\n \"\"\"Replace dependency placeholders, i.e. ${1}, in task.args with the actual observation.\"\"\"\n args = []\n for arg in task.args:\n arg = _replace_arg_mask_with_real_value(arg, task.dependencies, self.tasks)\n args.append(arg)\n task.args = args\n\n async def _run_task(self, task: Task):\n self._preprocess_args(task)\n if not task.is_join:\n observation = await task()\n task.observation = observation\n self.tasks_done[task.idx].set()\n\n async def schedule(self):\n \"\"\"Run all tasks in self.tasks in parallel, respecting dependencies.\"\"\"\n # run until all tasks are done\n while not self._all_tasks_done():\n # Find tasks with no dependencies or with all dependencies met\n executable_tasks = self._get_all_executable_tasks()\n\n for task_name in executable_tasks:\n asyncio.create_task(self._run_task(self.tasks[task_name]))\n self.remaining_tasks.remove(task_name)\n\n await asyncio.sleep(SCHEDULING_INTERVAL)\n\n async def aschedule(self, task_queue: asyncio.Queue[Optional[Task]], func):\n \"\"\"Asynchronously listen to task_queue and schedule tasks as they arrive.\"\"\"\n no_more_tasks = False # Flag to check if all tasks are received\n\n while True:\n if not no_more_tasks:\n # Wait for a new task to be added to the queue\n task = await task_queue.get()\n\n # Check for sentinel value indicating end of tasks\n if task is None:\n no_more_tasks = True\n else:\n # Parse and set the new tasks\n self.set_tasks({task.idx: task})\n\n # Schedule and run executable tasks\n executable_tasks = self._get_all_executable_tasks()\n\n if executable_tasks:\n for task_name in executable_tasks:\n asyncio.create_task(self._run_task(self.tasks[task_name]))\n self.remaining_tasks.remove(task_name)\n elif no_more_tasks and self._all_tasks_done():\n # Exit the loop if no more tasks are expected and all tasks are done\n break\n else:\n # If no executable tasks are found, sleep for the SCHEDULING_INTERVAL\n await asyncio.sleep(SCHEDULING_INTERVAL)" }, { "identifier": "StructuredTool", "path": "src/tools/base.py", "snippet": "class StructuredTool(BaseTool):\n \"\"\"Tool that can operate on any number of inputs.\"\"\"\n\n description: str = \"\"\n args_schema: Type[BaseModel] = Field(..., description=\"The tool schema.\")\n \"\"\"The input arguments' schema.\"\"\"\n func: Optional[Callable[..., Any]]\n \"\"\"The function to run when the tool is called.\"\"\"\n coroutine: Optional[Callable[..., Awaitable[Any]]] = None\n \"\"\"The asynchronous version of the function.\"\"\"\n stringify_rule: Optional[Callable[..., str]] = None\n\n # --- Runnable ---\n\n async def ainvoke(\n self,\n input: Union[str, Dict],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Any:\n if not self.coroutine:\n # If the tool does not implement async, fall back to default implementation\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.invoke, input, config, **kwargs)\n )\n\n return super().ainvoke(input, config, **kwargs)\n\n # --- Tool ---\n\n @property\n def args(self) -> dict:\n \"\"\"The tool's input arguments.\"\"\"\n return self.args_schema.schema()[\"properties\"]\n\n def _run(\n self,\n *args: Any,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Use the tool.\"\"\"\n if self.func:\n new_argument_supported = signature(self.func).parameters.get(\"callbacks\")\n return (\n self.func(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else self.func(*args, **kwargs)\n )\n raise NotImplementedError(\"Tool does not support sync\")\n\n async def _arun(\n self,\n *args: Any,\n run_manager: Optional[AsyncCallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> str:\n \"\"\"Use the tool asynchronously.\"\"\"\n if self.coroutine:\n new_argument_supported = signature(self.coroutine).parameters.get(\n \"callbacks\"\n )\n return (\n await self.coroutine(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else await self.coroutine(*args, **kwargs)\n )\n return await asyncio.get_running_loop().run_in_executor(\n None,\n self._run,\n partial(self._run, run_manager=run_manager, **kwargs),\n *args,\n )\n\n @classmethod\n def from_function(\n cls,\n func: Optional[Callable] = None,\n coroutine: Optional[Callable[..., Awaitable[Any]]] = None,\n name: Optional[str] = None,\n description: Optional[str] = None,\n return_direct: bool = False,\n args_schema: Optional[Type[BaseModel]] = None,\n infer_schema: bool = True,\n **kwargs: Any,\n ) -> StructuredTool:\n \"\"\"Create tool from a given function.\n\n A classmethod that helps to create a tool from a function.\n\n Args:\n func: The function from which to create a tool\n coroutine: The async function from which to create a tool\n name: The name of the tool. Defaults to the function name\n description: The description of the tool. Defaults to the function docstring\n return_direct: Whether to return the result directly or as a callback\n args_schema: The schema of the tool's input arguments\n infer_schema: Whether to infer the schema from the function's signature\n **kwargs: Additional arguments to pass to the tool\n\n Returns:\n The tool\n\n Examples:\n\n .. code-block:: python\n\n def add(a: int, b: int) -> int:\n \\\"\\\"\\\"Add two numbers\\\"\\\"\\\"\n return a + b\n tool = StructuredTool.from_function(add)\n tool.run(1, 2) # 3\n \"\"\"\n\n if func is not None:\n source_function = func\n elif coroutine is not None:\n source_function = coroutine\n else:\n raise ValueError(\"Function and/or coroutine must be provided\")\n name = name or source_function.__name__\n description = description or source_function.__doc__\n if description is None:\n raise ValueError(\n \"Function must have a docstring if description not provided.\"\n )\n\n # Description example:\n # search_api(query: str) - Searches the API for the query.\n sig = signature(source_function)\n description = f\"{name}{sig} - {description.strip()}\"\n _args_schema = args_schema\n if _args_schema is None and infer_schema:\n _args_schema = create_schema_from_function(f\"{name}Schema\", source_function)\n return cls(\n name=name,\n func=func,\n coroutine=coroutine,\n args_schema=_args_schema,\n description=description,\n return_direct=return_direct,\n **kwargs,\n )" }, { "identifier": "Tool", "path": "src/tools/base.py", "snippet": "class Tool(BaseTool):\n \"\"\"Tool that takes in function or coroutine directly.\"\"\"\n\n description: str = \"\"\n func: Optional[Callable[..., str]]\n \"\"\"The function to run when the tool is called.\"\"\"\n coroutine: Optional[Callable[..., Awaitable[str]]] = None\n \"\"\"The asynchronous version of the function.\"\"\"\n stringify_rule: Optional[Callable[..., str]] = None\n\n # --- Runnable ---\n\n async def ainvoke(\n self,\n input: Union[str, Dict],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Any:\n if not self.coroutine:\n # If the tool does not implement async, fall back to default implementation\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.invoke, input, config, **kwargs)\n )\n\n return super().ainvoke(input, config, **kwargs)\n\n # --- Tool ---\n\n @property\n def args(self) -> dict:\n \"\"\"The tool's input arguments.\"\"\"\n if self.args_schema is not None:\n return self.args_schema.schema()[\"properties\"]\n # For backwards compatibility, if the function signature is ambiguous,\n # assume it takes a single string input.\n return {\"tool_input\": {\"type\": \"string\"}}\n\n def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]:\n \"\"\"Convert tool input to pydantic model.\"\"\"\n args, kwargs = super()._to_args_and_kwargs(tool_input)\n # For backwards compatibility. The tool must be run with a single input\n all_args = list(args) + list(kwargs.values())\n if len(all_args) != 1:\n raise ToolException(\n f\"Too many arguments to single-input tool {self.name}.\"\n f\" Args: {all_args}\"\n )\n return tuple(all_args), {}\n\n def _run(\n self,\n *args: Any,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Use the tool.\"\"\"\n if self.func:\n new_argument_supported = signature(self.func).parameters.get(\"callbacks\")\n return (\n self.func(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else self.func(*args, **kwargs)\n )\n raise NotImplementedError(\"Tool does not support sync\")\n\n async def _arun(\n self,\n *args: Any,\n run_manager: Optional[AsyncCallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Use the tool asynchronously.\"\"\"\n if self.coroutine:\n new_argument_supported = signature(self.coroutine).parameters.get(\n \"callbacks\"\n )\n return (\n await self.coroutine(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else await self.coroutine(*args, **kwargs)\n )\n else:\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self._run, run_manager=run_manager, **kwargs), *args\n )\n\n # TODO: this is for backwards compatibility, remove in future\n def __init__(\n self, name: str, func: Optional[Callable], description: str, **kwargs: Any\n ) -> None:\n \"\"\"Initialize tool.\"\"\"\n super(Tool, self).__init__(\n name=name, func=func, description=description, **kwargs\n )\n\n @classmethod\n def from_function(\n cls,\n func: Optional[Callable],\n name: str, # We keep these required to support backwards compatibility\n description: str,\n return_direct: bool = False,\n args_schema: Optional[Type[BaseModel]] = None,\n coroutine: Optional[\n Callable[..., Awaitable[Any]]\n ] = None, # This is last for compatibility, but should be after func\n **kwargs: Any,\n ) -> Tool:\n \"\"\"Initialize tool from a function.\"\"\"\n if func is None and coroutine is None:\n raise ValueError(\"Function and/or coroutine must be provided\")\n return cls(\n name=name,\n func=func,\n coroutine=coroutine,\n description=description,\n return_direct=return_direct,\n args_schema=args_schema,\n **kwargs,\n )" }, { "identifier": "log", "path": "src/utils/logger_utils.py", "snippet": "def log(self, latency: float, answer: str, label: str, key: str) -> None:\n self._latency_dict[key].append(latency)\n self._answer_dict[key].append(answer)\n self._label_dict[key].append(label)" } ]
import asyncio from typing import Any, Dict, List, Mapping, Optional, Sequence, Union, cast from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.llms import BaseLLM from langchain.prompts.base import StringPromptValue from src.callbacks.callbacks import AsyncStatsCallbackHandler from src.chains.chain import Chain from src.llm_compiler.constants import JOINNER_REPLAN from src.llm_compiler.planner import Planner from src.llm_compiler.task_fetching_unit import Task, TaskFetchingUnit from src.tools.base import StructuredTool, Tool from src.utils.logger_utils import log
11,590
) class LLMCompiler(Chain, extra="allow"): """LLMCompuler Engine.""" """The step container to use.""" input_key: str = "input" output_key: str = "output" def __init__( self, tools: Sequence[Union[Tool, StructuredTool]], planner_llm: BaseLLM, planner_example_prompt: str, planner_example_prompt_replan: Optional[str], planner_stop: Optional[list[str]], planner_stream: bool, agent_llm: BaseLLM, joinner_prompt: str, joinner_prompt_final: Optional[str], max_replans: int, benchmark: bool, **kwargs, ) -> None: """ Args: tools: List of tools to use. max_replans: Maximum number of replans to do. benchmark: Whether to collect benchmark stats. Planner Args: planner_llm: LLM to use for planning. planner_example_prompt: Example prompt for planning. planner_example_prompt_replan: Example prompt for replanning. Assign this if you want to use different example prompt for replanning. If not assigned, default to `planner_example_prompt`. planner_stop: Stop tokens for planning. planner_stream: Whether to stream the planning. Agent Args: agent_llm: LLM to use for agent. joinner_prompt: Prompt to use for joinner. joinner_prompt_final: Prompt to use for joinner at the final replanning iter. If not assigned, default to `joinner_prompt`. """ super().__init__(**kwargs) if not planner_example_prompt_replan: log( "Replan example prompt not specified, using the same prompt as the planner." ) planner_example_prompt_replan = planner_example_prompt self.planner = Planner( llm=planner_llm, example_prompt=planner_example_prompt, example_prompt_replan=planner_example_prompt_replan, tools=tools, stop=planner_stop, ) self.agent = LLMCompilerAgent(agent_llm) self.joinner_prompt = joinner_prompt self.joinner_prompt_final = joinner_prompt_final or joinner_prompt self.planner_stream = planner_stream self.max_replans = max_replans # callbacks self.benchmark = benchmark if benchmark: self.planner_callback = AsyncStatsCallbackHandler(stream=planner_stream) self.executor_callback = AsyncStatsCallbackHandler(stream=False) else: self.planner_callback = None self.executor_callback = None def get_all_stats(self): stats = {} if self.benchmark: stats["planner"] = self.planner_callback.get_stats() stats["executor"] = self.executor_callback.get_stats() stats["total"] = { k: v + stats["executor"][k] for k, v in stats["planner"].items() } return stats def reset_all_stats(self): if self.planner_callback: self.planner_callback.reset() if self.executor_callback: self.executor_callback.reset() @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: return [self.output_key] # TODO(sk): move all join related functions to a separate class def _parse_joinner_output(self, raw_answer: str) -> str: """We expect the joinner output format to be: ``` Thought: xxx Action: Finish/Replan(yyy) ``` Returns: thought (xxx) answer (yyy) is_replan (True/False) """ thought, answer, is_replan = "", "", False # default values raw_answers = raw_answer.split("\n") for ans in raw_answers: if ans.startswith("Action:"): answer = ans[ans.find("(") + 1 : ans.find(")")]
class LLMCompilerAgent: """Self defined agent for LLM Compiler.""" def __init__(self, llm: BaseLLM) -> None: self.llm = llm async def arun(self, prompt: str, callbacks=None) -> str: return await self.llm.agenerate_prompt( prompts=[StringPromptValue(text=prompt)], stop=None, callbacks=callbacks, ) class LLMCompiler(Chain, extra="allow"): """LLMCompuler Engine.""" """The step container to use.""" input_key: str = "input" output_key: str = "output" def __init__( self, tools: Sequence[Union[Tool, StructuredTool]], planner_llm: BaseLLM, planner_example_prompt: str, planner_example_prompt_replan: Optional[str], planner_stop: Optional[list[str]], planner_stream: bool, agent_llm: BaseLLM, joinner_prompt: str, joinner_prompt_final: Optional[str], max_replans: int, benchmark: bool, **kwargs, ) -> None: """ Args: tools: List of tools to use. max_replans: Maximum number of replans to do. benchmark: Whether to collect benchmark stats. Planner Args: planner_llm: LLM to use for planning. planner_example_prompt: Example prompt for planning. planner_example_prompt_replan: Example prompt for replanning. Assign this if you want to use different example prompt for replanning. If not assigned, default to `planner_example_prompt`. planner_stop: Stop tokens for planning. planner_stream: Whether to stream the planning. Agent Args: agent_llm: LLM to use for agent. joinner_prompt: Prompt to use for joinner. joinner_prompt_final: Prompt to use for joinner at the final replanning iter. If not assigned, default to `joinner_prompt`. """ super().__init__(**kwargs) if not planner_example_prompt_replan: log( "Replan example prompt not specified, using the same prompt as the planner." ) planner_example_prompt_replan = planner_example_prompt self.planner = Planner( llm=planner_llm, example_prompt=planner_example_prompt, example_prompt_replan=planner_example_prompt_replan, tools=tools, stop=planner_stop, ) self.agent = LLMCompilerAgent(agent_llm) self.joinner_prompt = joinner_prompt self.joinner_prompt_final = joinner_prompt_final or joinner_prompt self.planner_stream = planner_stream self.max_replans = max_replans # callbacks self.benchmark = benchmark if benchmark: self.planner_callback = AsyncStatsCallbackHandler(stream=planner_stream) self.executor_callback = AsyncStatsCallbackHandler(stream=False) else: self.planner_callback = None self.executor_callback = None def get_all_stats(self): stats = {} if self.benchmark: stats["planner"] = self.planner_callback.get_stats() stats["executor"] = self.executor_callback.get_stats() stats["total"] = { k: v + stats["executor"][k] for k, v in stats["planner"].items() } return stats def reset_all_stats(self): if self.planner_callback: self.planner_callback.reset() if self.executor_callback: self.executor_callback.reset() @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: return [self.output_key] # TODO(sk): move all join related functions to a separate class def _parse_joinner_output(self, raw_answer: str) -> str: """We expect the joinner output format to be: ``` Thought: xxx Action: Finish/Replan(yyy) ``` Returns: thought (xxx) answer (yyy) is_replan (True/False) """ thought, answer, is_replan = "", "", False # default values raw_answers = raw_answer.split("\n") for ans in raw_answers: if ans.startswith("Action:"): answer = ans[ans.find("(") + 1 : ans.find(")")]
is_replan = JOINNER_REPLAN in ans
2
2023-12-06 21:12:54+00:00
16k
bytedance/ImageDream
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
12,434
) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
5
2023-12-13 21:09:37+00:00
16k
allenai/unified-io-2
t5x/train.py
[ { "identifier": "PackingStrategy", "path": "t5x/examples/unified_io/packing.py", "snippet": "class PackingStrategy:\n \"\"\"Defines how to pack data during training and handles batch-level constraints\n from the input/target encoders\"\"\"\n\n pack_max_len: Optional[Tuple[int, int]] = None\n \"\"\"If packing, max input/target length to to\"\"\"\n\n pack_pool_size: int = 10\n \"\"\"Pool to use when packing examples\"\"\"\n\n constraint_pool_size: int = 10\n \"\"\"Pool to use when matching batch constraints\"\"\"\n\n max_to_pack: int = 2\n \"\"\"Max examples to pack together\"\"\"\n\n @property\n def pack(self):\n return self.pack_max_len is not None\n\n def batch(self, ds, batch_size, drop_remainder=True, batch_constraints=None):\n if batch_constraints is None:\n batch_constraints = []\n for k, v in get_input_modalities().items():\n bound = v.get_constraints()\n if bound is not None:\n def _fn(ex):\n mask = tf.cast(ex[f\"inputs/{k}/mask\"], tf.bool)\n return tf.reduce_sum(tf.cast(tf.reduce_any(mask, -1), tf.int32))\n batch_bound = int(round(bound * batch_size))\n if self.pack_max_len is None:\n logging.info(f\"Adding batch constraint {k}/{bound} => \"\n f\"({batch_bound} per batch of {batch_size})\")\n else:\n bound *= self.max_to_pack\n logging.info(f\"Adding batch constraint {k}/{bound} => \"\n f\"({batch_bound} per batch of {batch_size} groups of {self.max_to_pack})\")\n batch_constraints.append((_fn, batch_bound))\n if self.pack:\n enc, dec = self.pack_max_len\n if self.max_to_pack == 2:\n ds = pair_examples(ds, enc, dec, self.pack_pool_size)\n else:\n raise NotImplementedError()\n if batch_constraints:\n ds = batch_with_constraints(ds, batch_size, self.constraint_pool_size, batch_constraints)\n else:\n ds = ds.batch(batch_size, drop_remainder=drop_remainder)\n return unfold(ds, batch_size, n=self.max_to_pack)\n else:\n if batch_constraints:\n return batch_with_constraints(ds, batch_size, self.constraint_pool_size, batch_constraints)\n else:\n return ds.batch(batch_size, drop_remainder=drop_remainder)" }, { "identifier": "checkpoints", "path": "t5x/checkpoints.py", "snippet": "VERSION = 3\n_DESIRED_CHUNK_SIZE_BYTES = 64 * 1024 * 1024\n_TRAIN_DS_PREFIX = 'train_ds'\n_OPTIMIZER_KEY = 'optimizer'\n_VERSION_KEY = 'version'\n_CHECKPOINTS_SUBDIR = 'checkpoints'\n_STATE_KEY = 'state'\n_DATASET_KEY = 'dataset'\n_FLAX_CHECKPOINT_FILE = 'checkpoint'\ndef _choose_chunk_shape(write_shape: Sequence[int],\n target_elements: int) -> List[int]:\n def get_total_elements():\ndef _run_future_tree(future_tree):\ndef all_steps(checkpoints_dir: str) -> Sequence[int]:\ndef all_dataset_checkpoint_steps(checkpoints_dir: str) -> Sequence[int]:\ndef latest_step(checkpoints_dir: str) -> Optional[int]:\ndef _get_local_data(x):\ndef _sync_global_devices(name: str) -> None:\ndef get_checkpoint_dir(checkpoints_dir: str, step: int) -> str:\ndef _cast(target: PyTreeDef, dtype: jnp.dtype):\n def maybe_cast(x):\ndef _update_ts_path_from_relative_to_absolute(\n ckpt_dir: str, ts_spec_dict: MutableMapping[str, Any]):\ndef _maybe_update_ts_from_file_to_gcs(ckpt_contents):\n def _gfile_to_gcs_driver(arr_or_ts_spec_dict):\n def _is_leaf(value):\ndef _maybe_update_ts_from_gcs_to_file(ckpt_contents):\n def _gcs_to_file_driver(arr_or_ts_spec_dict):\n def _is_leaf(value):\n def __init__(self, num_bytes):\n async def wait_for_bytes(self, n_bytes):\n async def return_bytes(self, n_bytes):\n def __call__(self, state_dict: PyTreeDef,\n parameter_infos: PyTreeDef) -> Tuple[PyTreeDef, PyTreeDef]:\n def __call__(self,\n state_dict: PyTreeDef,\n target_state_dict: PyTreeDef,\n *,\n is_resuming: bool = False) -> PyTreeDef:\n def __init__(self, dataset_iterator: tf.data.Iterator):\n def save(self, filename: str):\n def load(self, filename: str):\n def __init__(self,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n checkpoints_dir: str,\n dataset_iterator: Optional[\n Union[tf.data.Iterator,\n clu.data.dataset_iterator.DatasetIterator]] = None,\n *,\n keep: Optional[int] = None,\n save_dtype: jnp.dtype = np.float32,\n restore_dtype: Optional[jnp.dtype] = None,\n use_gda: Optional[bool] = True,\n keep_dataset_checkpoints: Optional[int] = None):\n def _get_state_dict_for_save(\n self,\n state_dict: Dict[str, Any],\n lazy_load: bool = True) -> MutableMapping[str, Any]:\n def _lazy_load_device_array(arr):\n def _get_parameter_infos(self):\n def _get_param_info(name: str, arr: Any, axes: partitioning.PartitionSpec):\n def _get_checkpoint_dir(self, step: int) -> str:\n def all_steps(self) -> Sequence[int]:\n def all_dataset_checkpoint_steps(self) -> Sequence[int]:\n def latest_step(self) -> Optional[int]:\n def _remove_old_dataset_checkpoints(self):\n def _remove_old_checkpoints(self):\n def save(self,\n train_state: train_state_lib.TrainState,\n state_transformation_fns: Sequence[SaveStateTransformationFn] = (),\n *,\n concurrent_gb: int = 128):\n def _write_state_to_tensorstore(\n self,\n ckpt_dir: str,\n train_state: train_state_lib.TrainState,\n concurrent_gb: int,\n state_transformation_fns: Sequence[SaveStateTransformationFn],\n ) -> Mapping[str, Any]:\n async def _write_array(maybe_arr: Any,\n param_info: Optional[_ParameterInfo],\n cast: bool = False):\n def _cast_arr_if_not_partitioned(maybe_arr, param_info):\n def _transform_state_and_infos(\n self,\n state_dict: PyTreeDef,\n parameter_infos: PyTreeDef,\n state_transformation_fns: Sequence[SaveStateTransformationFn],\n ) -> Tuple[PyTreeDef, PyTreeDef]:\n def restore(\n self,\n step: Optional[int] = None,\n path: Optional[str] = None,\n state_transformation_fns: Sequence[RestoreStateTransformationFn] = (),\n fallback_state: Optional[Mapping[str, Any]] = None,\n lazy_parameters: bool = False) -> train_state_lib.TrainState:\n def _restore_train_state(\n self,\n state_dict: optimizers.OptimizerStateType) -> train_state_lib.TrainState:\n def _create_lazy_awaitable_array(\n self, param_info: _ParameterInfo, maybe_ts_spec: Any, ckpt_path: str,\n restore_dtype: Optional[jnp.dtype]) -> LazyAwaitableArray:\n async def get_fn():\n def _read_state_from_tensorstore(\n self,\n ckpt_path: str,\n written_state_dict: Mapping[str, Any],\n restore_parameter_infos: Optional[Mapping[str, Any]] = None,\n lazy_parameters: bool = False,\n ) -> Mapping[str, Any]:\n def restore_from_tf_checkpoint(\n self,\n path_or_dir: str,\n strict: bool = True,\n translator: Optional[checkpoint_importer.CheckpointTranslator] = None\n ) -> train_state_lib.TrainState:\n def _partition_parameter(maybe_arr: Any, param_info: _ParameterInfo):\n def convert_from_tf_checkpoint(\n self,\n path_or_dir: str,\n *,\n state_transformation_fns: Sequence[SaveStateTransformationFn] = (),\n concurrent_gb: int = 16,\n translator: Optional[checkpoint_importer.CheckpointTranslator] = None):\n def _get_optimizer_state_dict(\n self, ckpt_contents: PyTreeDef,\n state_transformation_fns: Sequence[RestoreStateTransformationFn]):\n def __call__(self,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n checkpoints_dir: str,\n dataset_iterator: Optional[tf.data.Iterator] = None,\n *,\n keep: Optional[int] = None,\n save_dtype: jnp.dtype = np.float32,\n restore_dtype: Optional[jnp.dtype] = None,\n use_gda: Optional[bool] = False,\n keep_dataset_checkpoints: Optional[int] = None) -> Checkpointer:\n def __init__(self,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n checkpoints_dir: str,\n dataset_iterator: Optional[tf.data.Iterator] = None,\n *,\n keep: Optional[int] = None,\n save_dtype: jnp.dtype = np.float32,\n restore_dtype: Optional[jnp.dtype] = None,\n metric_name_to_monitor: str = 'train/accuracy',\n metric_mode: str = 'max',\n keep_checkpoints_without_metrics: bool = True,\n force_keep_period: Optional[int] = None,\n use_gda: bool = False,\n keep_dataset_checkpoints: Optional[int] = None):\n def _populate_metrics_for_steps(self,\n steps: Iterable[int]) -> Mapping[int, float]:\n def _try_fill_metric_run_and_tag_names(self, run_keys: Iterable[str]) -> bool:\n def _filter_out_force_keep_period_steps(self, existing_steps):\n def _remove_old_checkpoints(self):\ndef _get_optimizer_state_dict(\n ckpt_contents: PyTreeDef, optimizer_state: Mapping[str, Any],\n state_transformation_fns: Sequence[RestoreStateTransformationFn]):\ndef _transform_state_and_infos(\n state_dict: PyTreeDef,\n parameter_infos: PyTreeDef,\n state_transformation_fns: Sequence[SaveStateTransformationFn],\n) -> Tuple[PyTreeDef, PyTreeDef]:\nasync def _read_ts(param_info: _ParameterInfo,\n maybe_tspec: Any,\n ckpt_path: str,\n restore_dtype: Optional[jnp.dtype] = None,\n mesh: Optional[gda_lib.Shape] = None,\n axes: Optional[gda_lib.MeshAxes] = None):\ndef fake_param_info(maybe_tspec: Any) -> Optional[_ParameterInfo]:\ndef find_checkpoint(path: str, step: Optional[int] = None) -> str:\ndef load_t5x_checkpoint(\n path: str,\n step: Optional[int] = None,\n state_transformation_fns: Sequence[RestoreStateTransformationFn] = (),\n remap: bool = True,\n restore_dtype: Optional[jnp.dtype] = None,\n lazy_parameters: bool = False) -> PyTreeDef:\n def _create_lazy_awaitable_array(\n param_info: _ParameterInfo, maybe_ts_spec: Any, ckpt_path: str,\n restore_dtype: Optional[jnp.dtype]) -> LazyAwaitableArray:\ndef _transforms_from_state_transformation_fns(\n state_transformation_fns: Sequence[SaveStateTransformationFn]):\n def __init__(self, checkpoint_filename: str):\n def save(self, directory: epath.Path, item: tf.data.Iterator):\n def restore(self,\n directory: epath.Path,\n item: Optional[tf.data.Iterator] = None) -> tf.data.Iterator:\n def structure(self, directory: epath.Path) -> Any:\n def structure(self, directory: epath.Path) -> PyTreeDef:\n def tensorstore_spec_to_name(leaf):\n def save(self,\n directory: Union[str, epath.Path],\n item: Any,\n *args,\n force: bool = False,\n tmp_directory: Optional[Union[str, epath.Path]] = None,\n **kwargs):\n def __init__(self,\n directory: str,\n train_state_shape: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n dataset_iterator: Optional[tf.data.Iterator] = None,\n save_dtype: Optional[jnp.dtype] = None,\n restore_dtype: Optional[jnp.dtype] = None,\n keep: Optional[int] = None,\n keep_dataset_checkpoints: Optional[int] = None):\n def all_steps(self) -> Sequence[int]:\n def _parameter_infos(self,\n train_state: train_state_lib.TrainState) -> PyTreeDef:\n def _get_save_directory(self,\n step: int,\n directory: epath.Path,\n key_name: Optional[str] = None) -> epath.Path:\n def save(self,\n train_state: train_state_lib.TrainState,\n state_transformation_fns: Sequence[SaveStateTransformationFn] = ()):\n def _save_args(param_info):\n def restore(\n self,\n step: int,\n fallback_state: Optional[Mapping[str, Any]] = None,\n state_transformation_fns: Sequence[SaveStateTransformationFn] = (),\n lazy_parameters: Optional[bool] = False) -> train_state_lib.TrainState:\n def _restore_args(param_info):\n def _add_checkpoint_info(self, step, metrics):\nclass _ParameterInfo:\nclass _BytesConditionVariable(object):\nclass SaveStateTransformationFn(typing_extensions.Protocol):\nclass RestoreStateTransformationFn(typing_extensions.Protocol):\nclass _TfDataCheckpointer:\nclass Checkpointer(object):\nclass CheckpointerConstructor(typing_extensions.Protocol):\nclass SaveBestCheckpointer(Checkpointer):\nclass _OrbaxParamInfo:\nclass DatasetCheckpointHandler(orbax.checkpoint.CheckpointHandler):\nclass TrainStateCheckpointHandler(orbax.checkpoint.PyTreeCheckpointHandler):\nclass NonAtomicCheckpointer(orbax.checkpoint.Checkpointer):\nclass CheckpointManager(orbax.checkpoint.CheckpointManager):" }, { "identifier": "eval", "path": "t5x/eval.py", "snippet": "_DEFAULT_GIN_SEARCH_PATHS = [\n os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n]\n FLAGS = flags.FLAGS\nclass SummarizeConfigFn(Protocol):\nclass InferenceEvaluator:\n def __call__(self, model_dir: str,\n summary_writer: Optional[metric_writers.SummaryWriter],\n step: int) -> None:\n def __init__(\n self,\n infer_eval_dataset_cfg: utils.DatasetConfig,\n inference_evaluator_cls: utils.EvaluatorConstructor,\n model: models.BaseModel,\n partitioner: partitioning.BasePartitioner,\n log_dir: Optional[str] = None,\n verify_matching_vocabs_fn: Optional[\n Callable[[utils.DatasetConfig, models.BaseModel], None]]=None,\n ):\n def model_feature_shapes(self) -> Mapping[str, Tuple[int, ...]]:\n def eval_tasks(self) -> Sequence[seqio.Task]:\n def close(self):\n def evaluate(\n self,\n train_state: train_state_lib.TrainState,\n train_state_axes: train_state_lib.TrainState,\n ) -> seqio.evaluation.AllMetricsFuture:\ndef evaluate(\n *,\n model: models.BaseTransformerModel,\n dataset_cfg: utils.DatasetConfig,\n restore_checkpoint_cfg: utils.RestoreCheckpointConfig,\n partitioner: partitioning.BasePartitioner,\n output_dir: str,\n inference_evaluator_cls: utils.EvaluatorConstructor = UnifiedIOEvaluator,\n use_wandb = True,\n summarize_config_fn: SummarizeConfigFn = gin_utils.summarize_gin_config,\n train_state_initializer_cls: Type[\n utils.TrainStateInitializer] = utils.TrainStateInitializer,\n fallback_init_rng: Optional[int] = None,\n log_only=False\n):\n def main(argv: Sequence[str]):\n def _main(argv: Sequence[str]):" }, { "identifier": "models", "path": "t5x/models.py", "snippet": "class TokensIdsToLogitsCallable(typing_extensions.Protocol):\nclass DecodeFnCallable(typing_extensions.Protocol):\nclass BaseModel(abc.ABC):\nclass BaseTransformerModel(BaseModel):\nclass EncoderDecoderModel(BaseTransformerModel):\nclass DecoderOnlyModel(BaseTransformerModel):\n def __call__(\n self, decoding_state: decoding.DecodingState\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def __call__(self, *, inputs: jnp.ndarray, cache: Mapping[str, jnp.ndarray],\n tokens_to_logits: TokensIdsToLogitsCallable, eos_id: int,\n num_decodes: int, decode_rng: Optional[jax.random.KeyArray],\n cache_offset: int, **kwargs) -> Tuple[jnp.ndarray, jnp.ndarray]:\n def __init__(self, optimizer_def: optimizers.OptimizerDefType):\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def eval_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def predict_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None) -> jnp.ndarray:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def __init__(\n self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: Optional[DecodeFnCallable] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[Union[\n float, int, str, losses.SpecialLossNormalizingFactor]] = None,\n ):\n def input_vocabulary(self):\n def output_vocabulary(self):\n def decode_fn(self):\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None) -> jnp.ndarray:\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def _compute_metrics(\n self,\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n mask: jnp.ndarray,\n loss: jnp.ndarray,\n z_loss: Optional[jnp.ndarray] = None,\n segment_ids: Optional[Mapping[str, jnp.ndarray]] = None,\n ) -> MetricsMap:\n def __init__(\n self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: DecodeFnCallable = decoding.beam_search,\n feature_converter_cls: Optional[Callable[...,\n seqio.FeatureConverter]] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None,\n ):\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None,\n mutable: flax_scope.CollectionFilter = False,\n other_variables: Optional[PyTreeDef] = None,\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, flax_scope.FrozenVariableDict]]:\n def _compute_logits_from_slice(\n self, decoding_state: decoding.DecodingState, params: PyTreeDef,\n encoded_inputs: jnp.ndarray, raw_inputs: jnp.ndarray,\n max_decode_length: int) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n prompt_with_targets: bool = False\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False,\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, Mapping[str, Any]]]:\n def __init__(\n self,\n module: nn.Module,\n vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: DecodeFnCallable = decoding.temperature_sample,\n inputs_bidirectional_attention: bool = False,\n feature_converter_cls: Optional[Callable[...,\n seqio.FeatureConverter]] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None,\n ):\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def _get_decoder_causal_attention(self, batch):\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None,\n mutable: flax_scope.CollectionFilter = False) -> jnp.ndarray:\n def _compute_logits_from_slice(\n self,\n decoding_state: decoding.DecodingState,\n params: PyTreeDef,\n max_decode_length: int,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n def _compute_kv_cache(\n self,\n params: PyTreeDef,\n inputs: jnp.ndarray,\n inputs_lengths: jnp.ndarray,\n decoder_causal_attention: jnp.ndarray,\n ) -> PyTreeDef:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n *,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\ndef remove_prefix(sequence: jnp.ndarray,\n prefix_length: jnp.ndarray) -> jnp.ndarray:\ndef compute_weighted_accuracy(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n weights: Optional[jnp.ndarray] = None) -> Tuple[jnp.ndarray, jnp.ndarray]:\ndef compute_metrics(logits: jnp.ndarray, targets: jnp.ndarray,\n weights: jnp.ndarray, loss: jnp.ndarray,\n weight_sum: jnp.ndarray,\n additional_metrics: MetricsMap) -> MetricsMap:\ndef compute_base_metrics(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n mask: jnp.ndarray,\n loss: jnp.ndarray,\n z_loss: Optional[jnp.ndarray] = None,\n segment_ids: Optional[Mapping[str, jnp.ndarray]] = None,\n) -> MetricsMap:\ndef get_input_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\ndef get_output_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\n FEATURE_CONVERTER_CLS: Callable[..., seqio.FeatureConverter]\n FEATURE_CONVERTER_CLS = seqio.EncDecFeatureConverter\n FEATURE_CONVERTER_CLS = seqio.DecoderFeatureConverter" }, { "identifier": "evaluator", "path": "t5x/examples/unified_io/evaluator.py", "snippet": "class UnifiedIOOutput:\nclass UnifiedIOEvaluator(seqio.Evaluator):\n def text(self):\n def text_tokens(self):\n def image_tokens(self):\n def image(self):\n def audio(self):\n def scores(self):\ndef build_uio_outputs(aux_values, vocab) -> List[UnifiedIOOutput]:\n def __init__(\n self,\n mixture_or_task_name: str,\n feature_converter,\n eval_split: str = \"validation\",\n use_cached: bool = False,\n seed: Optional[int] = 42,\n sequence_length: Optional[Mapping[str, int]] = None,\n num_examples: Optional[int] = None,\n shuffle: bool = False,\n logger_cls: Sequence = (),\n log_dir: Optional[str] = None,\n use_memory_cache: bool = True,\n target_field_name: str = \"targets\",\n ):\n def _compute_metrics(self,\n predicted_tokens: AllOutputTokensType,\n scores: AllOutputScoresType,\n all_aux_values: AllOutputAuxValuesType,\n step: Optional[int] = None) -> AllMetricsType:" }, { "identifier": "partitioning", "path": "t5x/partitioning.py", "snippet": "class AxisNames(tuple):\nclass LocalChunkInfo:\nclass LocalChunker:\nclass DataLayout:\nclass BasePartitioner(metaclass=abc.ABCMeta):\nclass PjittedFnWithContext(PartitionedCallable):\nclass BasePjitPartitioner(BasePartitioner):\nclass PjitPartitioner(BasePjitPartitioner):\n def __new__(cls, *names):\n def __repr__(self):\ndef pjit(\n fun: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = (),\n backend: Optional[str] = None):\ndef pjit_with_cpu_fallback(\n fun: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = (),\n backend: Optional[str] = None):\ndef with_sharding_constraint(x, axis_resources):\ndef bounds_from_last_device(\n last_device: jax.lib.xla_client.Device) -> HardwareMesh:\ndef get_coords(device: jax.lib.xla_client.Device) -> HardwareMesh:\ndef global_mesh_defined():\ndef get_mesh(model_parallel_submesh: HardwareMesh,\n input_devices: Sequence[JaxDevice] = (),\n input_local_devices: Sequence[JaxDevice] = (),\n tile_by_host_if_needed: bool = True,\n backend: Optional[str] = None) -> Mesh:\n def dh_dd_mh_md(g: int, m: int, l: int) -> Tuple[int, int, int, int]:\ndef get_cpu_mesh() -> Mesh:\ndef get_gpu_mesh(num_partitions: int) -> Mesh:\ndef default_mesh(num_partitions: int,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n backend: Optional[str] = None) -> Mesh:\n def __init__(self, global_mesh: Mesh):\n def get_local_chunk_info(\n self, global_shape: Tuple[int, ...],\n mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo:\ndef standard_logical_axis_rules(\n activation_partitioning_dims: int = 1,\n parameter_partitioning_dims: int = 1,\n additional_rules: Optional[LogicalAxisRules] = None) -> LogicalAxisRules:\ndef _id_fn(x, ix):\n def __init__(self,\n num_partitions: Optional[int] = None,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n params_on_devices: bool = True,\n backend: Optional[str] = None):\n def mesh(self) -> Mesh:\n def data_partition_spec(self) -> PartitionSpec:\n def get_data_layout(self,\n batch_size: Optional[int] = None,\n host_index: Optional[int] = None) -> DataLayout:\n def get_local_chunk_info(\n self, global_shape: Tuple[int, ...],\n mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo:\n def params_on_devices(self):\n def move_params_to_devices(self, train_state: TrainState,\n train_state_axes: TrainState) -> TrainState:\n def _local_chunker(self):\n def get_logical_axes(self, train_state: TrainState) -> TrainState:\n def get_mesh_axes(self, train_state: TrainState) -> TrainState:\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PartitionedCallable:\n def compile(self, partitioned_fn: PartitionedCallable,\n *args) -> CompiledPartitionedCallable:\n def __init__(self,\n pjitted_fn,\n partition_mesh: Mesh,\n logical_axis_rules: flax_partitioning.LogicalRules = ()):\n def __call__(self, *args):\n def lower(self, *args):\n def _local_chunker(self) -> LocalChunker:\n def mesh(self) -> Mesh:\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PjittedFnWithContext:\n def compile(self, partitioned_fn: PjittedFnWithContext,\n *args) -> CompiledPartitionedCallable:\n def __init__(self,\n num_partitions: Optional[int] = None,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n params_on_devices: bool = True,\n backend: Optional[str] = None,\n logical_axis_rules: Optional[LogicalAxisRules] = None,\n use_cpu_pjit: Optional[bool] = False):\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PjittedFnWithContext:\n def logical_axis_rules(self):\n def get_logical_axes(self, train_state: TrainState) -> TrainState:\n def get_mesh_axes(self, train_state: TrainState) -> TrainState:\n def _logical_to_mesh_axes(param_name, logical_axes):" }, { "identifier": "train_state", "path": "t5x/train_state.py", "snippet": "EMPTY_DICT = flax.core.freeze({})\nclass TrainState(typing_extensions.Protocol):\nclass FlaxOptimTrainState(flax.struct.PyTreeNode):\nclass GanOptimTrainState(FlaxOptimTrainState):\nclass InferenceState(flax.struct.PyTreeNode):\n def step(self) -> jnp.ndarray:\n def params(self) -> FrozenVariableDict:\n def param_states(self) -> FrozenVariableDict:\n def flax_mutables(self) -> FrozenVariableDict:\n def state_dict(self) -> MutableVariableDict:\n def restore_state(self, state_dict: Mapping[str, Any]) -> 'TrainState':\n def replace_params(self, params: VariableDict) -> 'TrainState':\n def replace_flax_mutables(self, flax_mutables: FrozenDict) -> 'TrainState':\n def replace_step(self, step: jnp.ndarray) -> 'TrainState':\n def apply_gradient(self,\n grads,\n learning_rate,\n flax_mutables=EMPTY_DICT) -> 'TrainState':\n def as_logical_axes(self) -> 'TrainState':\ndef _validate_params_axes(params_axes, params):\ndef _split_variables_and_axes(\n variables_and_axes: FrozenVariableDict\n) -> Tuple[FrozenVariableDict, FrozenVariableDict]:\n def create(cls, optimizer_def: optimizers.OptimizerDefType,\n model_variables: FrozenVariableDict) -> 'FlaxOptimTrainState':\n def step(self) -> jnp.ndarray:\n def params(self) -> FrozenVariableDict:\n def param_states(self) -> FrozenVariableDict:\n def state_dict(self) -> MutableVariableDict:\n def apply_gradient(self,\n grads,\n learning_rate,\n flax_mutables=EMPTY_DICT) -> 'FlaxOptimTrainState':\n def replace_params(self, params: VariableDict) -> 'FlaxOptimTrainState':\n def replace_flax_mutables(self,\n flax_mutables: FrozenDict) -> 'FlaxOptimTrainState':\n def replace_step(self, step: jnp.ndarray) -> 'FlaxOptimTrainState':\n def restore_state(self, state_dict: VariableDict) -> 'FlaxOptimTrainState':\n def as_logical_axes(self) -> 'FlaxOptimTrainState':\n def apply_gradient(self,\n grads,\n learning_rate,\n flax_mutables=EMPTY_DICT) -> 'FlaxOptimTrainState':\n def create(cls, model_variables: FrozenVariableDict) -> 'InferenceState':\n def param_states(self) -> FrozenVariableDict:\n def apply_gradient(self, *args, **kwargs) -> 'InferenceState':\n def state_dict(self) -> MutableMapping[str, Any]:\n def replace_step(self, step: jnp.ndarray) -> 'InferenceState':\n def replace_params(self, params: FrozenVariableDict) -> 'InferenceState':\n def replace_flax_mutables(self,\n flax_mutables: FrozenDict) -> 'InferenceState':\n def restore_state(self, state_dict: Mapping[str, Any]) -> 'InferenceState':\n def as_logical_axes(self) -> 'InferenceState':" }, { "identifier": "trainer", "path": "t5x/trainer.py", "snippet": "def _merge_metrics(a, b):\ndef merge_metrics(a, b):\n def result(self) -> Mapping[str, Array]:\n def result(self) -> Mapping[str, clu.values.Value]:\n def result(self) -> float:\n def __call__(\n self,\n step: jnp.ndarray,\n ) -> jnp.ndarray:\n def __call__(self, metrics: MetricMapType, duration: float,\n num_steps: int) -> Mapping[str, jnp.ndarray]:\n def __call__(\n self, train_state: train_state_lib.TrainState,\n batch: BatchType) -> Tuple[train_state_lib.TrainState, MetricMapType]:\n def __call__(self, train_state: train_state_lib.TrainState,\n batch: jnp.ndarray) -> MetricMapType:\n def compute_metrics(\n self, gradients: ModelWeights,\n old_train_state: train_state_lib.TrainState,\n new_train_state: train_state_lib.TrainState) -> MutableMetricMapType:\n def _make_rms_metrics(name, tree):\n def _make_max_metrics(name, tree):\n def compute_metrics(\n self, gradients: ModelWeights,\n old_train_state: train_state_lib.TrainState,\n new_train_state: train_state_lib.TrainState) -> MutableMetricMapType:\n def __init__(self):\n def close(self):\n def __del__(self):\n def _get_completion_future(self, block_on: PyTreeDef = ()) -> TimeFuture:\n def _get_completion_time():\n def start(self, block_on: PyTreeDef = ()):\n def stop(self, block_on: PyTreeDef = ()) -> TimeFuture:\n def __init__(self, name: str, summary_dir: Optional[str] = None, log_to_wandb=False):\n def __del__(self):\n def close(self):\n def summary_writer(self) -> metric_writers.MetricWriter:\n def write_scalar(self, key: str, val: metric_writers.interface.Scalar,\n step: int):\n def write_scalars(self, step: int,\n scalars: Mapping[str, metric_writers.interface.Scalar]):\n def start_duration_timer(self, block_on: PyTreeDef = ()):\n def write_metrics_summary(self, metrics: MetricMapType, step: int,\n num_steps: int) -> MetricValueMapFuture:\n def _summarize_and_write():\n def _ensure_not_on_device(x):\n def flush(self):\n def __init__(self, model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str], summary_dir: Optional[str],\n train_state_axes: Any, rng: Rng,\n use_wandb=False, packing_strategy=None, log_weights=None):\n def __enter__(self):\n def __exit__(self, exc_type, exc_value, traceback):\n def close(self):\n def _get_step_rng(self, step: int) -> Rng:\n def train_state(self):\n def train_state(self, train_state: PyTreeDef):\n def _weight_metric_fn(self):\n def _get_weight_metrics_fn(_params):\n def train(self,\n batch_iter: Union[Iterator[BatchType],\n clu.data.dataset_iterator.DatasetIterator],\n num_steps: int,\n start_step: Optional[int] = None) -> ArrayMapFuture:\n def compile_train(self, batch: ElementSpec) -> None:\n def eval(\n self, batch_iters: Mapping[str,\n Iterator[BatchType]], pbar_nsteps=None) -> Mapping[str, Array]:\n def compile_eval(self, batches: Mapping[str, BatchType]) -> None:\n def _partitioned_train_step(self) -> PartitionedTrainCallable:\n def _partitioned_eval_step(self) -> PartitionedEvalCallable:\ndef accumulate_grads_microbatched(\n model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n batch: BatchType,\n dropout_rng: Rng,\n num_microbatches: Optional[int],\n data_partition_spec: PartitionSpec = PartitionSpec(\"data\"),\n loss_fn_args=None\n) -> Tuple[train_state_lib.TrainState, MutableMetricMapType,\n def get_microbatch(batch: BatchType, idx: int) -> Mapping[str, jnp.ndarray]:\n def metrics_and_grad(loop_cnt, dropout_rng, flax_mutables=None):\n def per_microbatch_train_step(\n loop_cnt: int, state: Tuple[jnp.ndarray, jnp.ndarray,\n Mapping[str, jnp.ndarray],\n Optional[FlaxMutables]]\n ) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, jnp.ndarray],\ndef apply_grads(\n train_state: train_state_lib.TrainState,\n grad_accum: ModelWeights,\n metrics: MutableMetricMapType,\n learning_rate: jnp.ndarray,\n weight_metrics_computer: Optional[WeightMetricsComputer],\n other_state_variables: Optional[Mapping[str, Any]] = None\n) -> Tuple[train_state_lib.TrainState, MetricMapType]:\ndef eval_step(model: models.BaseModel, train_state: train_state_lib.TrainState,\n batch: jnp.ndarray) -> MetricMapType:\ndef train_with_lr(\n train_state: train_state_lib.TrainState,\n batch: BatchType,\n learning_rate: jnp.ndarray,\n dropout_rng: Rng,\n model: models.BaseModel,\n num_microbatches: Optional[int],\n weight_metrics_computer: Optional[WeightMetricsComputer] = None,\n data_partition_spec: PartitionSpec = PartitionSpec(\"data\"),\n loss_fn_args=None\n):\n def __call__(self, model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str], summary_dir: Optional[str],\n train_state_axes: Any, rng: Rng) -> BaseTrainer:\n def __init__(self,\n model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str],\n summary_dir: Optional[str],\n train_state_axes: Any,\n rng: Rng,\n learning_rate_fn: LearningRateCallable,\n num_microbatches: Optional[int],\n weight_metrics_computer: Optional[WeightMetricsComputer] = None,\n use_wandb=True,\n packing_strategy=None,\n log_weights=False\n ):\n def _partitioned_train_step(self) -> PartitionedTrainCallable:\n def train_step(train_state: train_state_lib.TrainState, batch: BatchType, static_args=None):\n def _partitioned_eval_step(self) -> PartitionedEvalCallable:\ndef _warn_action_not_run(action, task, metric):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\n def __init__(self,\n metric: Tuple[str, str],\n mode: str,\n patience: int = 3,\n atol: float = 0.,\n rtol: float = 0.):\n def _compare_fn(self, current, previous):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\n def __init__(self, task: str, metric: str = \"loss\"):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\nclass ArrayMapFuture(typing_extensions.Protocol):\nclass MetricValueMapFuture(typing_extensions.Protocol):\nclass TimeFuture(typing_extensions.Protocol):\nclass LearningRateCallable(typing_extensions.Protocol):\nclass SummarizeMetricsCallable(typing_extensions.Protocol):\nclass PartitionedTrainCallable(typing_extensions.Protocol):\nclass PartitionedEvalCallable(typing_extensions.Protocol):\nclass GradNormComputer(object):\nclass WeightMetricsComputer(object):\nclass _AsyncTimer(object):\nclass MetricsManager(object):\nclass PreemptionError(Exception):\nclass BaseTrainer(abc.ABC):\nclass BaseTrainerConstructor(Protocol):\nclass Trainer(BaseTrainer):\nclass ActionMode(enum.Enum):\nclass BaseAction(abc.ABC):\nclass EarlyStoppingAction(BaseAction):\nclass TerminateOnNanAction(BaseAction):\n _WEIGHT_METRICS = [\n \"weight_rms\", \"weight_gradient_rms\", \"weight_update_rms\", \"weight_max\"\n ]\n TRAIN = 1\n TRAIN_EVAL = 2\n INFER_EVAL = 3" }, { "identifier": "utils", "path": "t5x/utils.py", "snippet": "class EvaluatorConstructor(typing_extensions.Protocol):\nclass SaveCheckpointConfig:\nclass RestoreCheckpointConfig:\nclass CheckpointConfig:\nclass LegacyCheckpointer(orbax.checkpoint.Checkpointer):\nclass LegacyCheckpointManager(orbax.checkpoint.CheckpointManager):\nclass DatasetConfig:\nclass GDADatasetIterator(clu.data.dataset_iterator.DatasetIterator):\nclass InitFnCallable(typing_extensions.Protocol):\nclass LearningRateCallable(typing_extensions.Protocol):\nclass TrainStateInitializer:\nclass InferStepWithRngCallable(typing_extensions.Protocol):\nclass InferStepWithoutRngCallable(typing_extensions.Protocol):\nclass InferFnCallable(typing_extensions.Protocol):\nclass GetDatasetCallable(typing_extensions.Protocol):\nclass GetEvalDatasetCallable(typing_extensions.Protocol):\nclass _RegexMap(collections.abc.Mapping):\n def __call__(\n self,\n mixture_or_task_name: str,\n feature_converter: seqio.FeatureConverter,\n eval_split: str,\n use_cached: bool,\n seed: Optional[int],\n sequence_length: Optional[Mapping[str, int]],\n log_dir: Optional[str],\n use_memory_cache: bool,\n ) -> seqio.Evaluator:\n def __post_init__(self):\n def __post_init__(self):\n def __init__(self,\n *,\n save_checkpointer: Optional[checkpoints.Checkpointer] = None,\n restore_checkpointer: checkpoints.Checkpointer,\n strict: Optional[bool] = False):\n async def async_save(self, path: str, item: Any):\n async def async_restore(self, path: str, item: Optional[Any] = None) -> Any:\n def save(self,\n path: str,\n item: train_state_lib.TrainState,\n state_transformation_fns: Sequence[\n checkpoints.SaveStateTransformationFn] = (),\n *,\n concurrent_gb: int = 128):\n def restore(self,\n path: str,\n item: Optional[train_state_lib.TrainState],\n state_transformation_fns: Sequence[\n checkpoints.RestoreStateTransformationFn] = (),\n fallback_state: Optional[Mapping[str, Any]] = None,\n lazy_parameters: bool = False) -> train_state_lib.TrainState:\n def __init__(self,\n *,\n save_cfg: Optional[SaveCheckpointConfig] = None,\n restore_cfg: RestoreCheckpointConfig,\n train_state_shape: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n ds_iter: Optional[\n Union[tf.data.Iterator,\n clu.data.dataset_iterator.DatasetIterator]] = None,\n model_dir: Optional[str] = None,\n use_gda: Optional[bool] = True):\n def save(self,\n train_state: train_state_lib.TrainState,\n state_transformation_fns: Sequence[\n checkpoints.SaveStateTransformationFn] = ()):\n def restore(\n self,\n paths: Sequence[str],\n restore_cfg: RestoreCheckpointConfig,\n fallback_state: Optional[Mapping[str, Any]] = None\n ) -> Union[train_state_lib.TrainState, Sequence[train_state_lib.TrainState]]:\ndef _get_index_mappings(device_to_idxs):\ndef _create_gda(partitioner: partitioning.BasePartitioner,\n global_shapes: PyTreeDef, host_arrays: PyTreeDef) -> PyTreeDef:\n def _put_to_devices(x, global_shape):\n def _gda(dbs, global_shape):\n def __init__(self, iterator: clu.data.dataset_iterator.DatasetIterator,\n partitioner: partitioning.BasePartitioner,\n global_shapes: PyTreeDef):\n def __next__(self):\n def reset(self):\n def element_spec(self):\n def save(self, filename):\n def restore(self, filename):\n def iterator(self):\ndef sync_global_devices(name: str) -> None:\ndef multihost_assert_equal(input_tree, fail_message: str = ''):\ndef _hardware_uniform(\n rng_key: Array,\n shape: Shape,\n dtype: jnp.dtype = np.float32,\n minval: Array = np.float32(0),\n maxval: Array = np.float32(1)\n) -> Array:\ndef _hardware_bernoulli(\n rng_key: Array, p: np.ndarray = np.float32(0.5),\n shape: Shape = ()) -> Array:\ndef set_hardware_rng_ops():\ndef get_zeros_batch_like_spec(\n batch_spec: Mapping[str,\n jax.ShapeDtypeStruct]) -> Mapping[str, jnp.ndarray]:\ndef get_zeros_batch_like_dataset(dataset: tf.data.Dataset,\n batch_size=None) -> Mapping[str, jnp.ndarray]:\n def __call__(\n self, rng: Array, input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str,\n DType]]) -> flax_scope.FrozenVariableDict:\n def __call__(self, step: jnp.ndarray) -> jnp.ndarray:\ndef create_learning_rate_scheduler(\n factors: str = 'constant * linear_warmup * rsqrt_decay',\n base_learning_rate: float = 0.5,\n warmup_steps: int = 1000,\n decay_factor: float = 0.5,\n steps_per_decay: int = 20000,\n steps_per_cycle: int = 100000,\n step_offset: int = 0,\n min_learning_rate: float = 1e-8) -> LearningRateCallable:\n def step_fn(step: jnp.ndarray) -> jnp.ndarray:\ndef steps(prefix, config, data_size=None, batch_size=None, default=ValueError):\ndef create_vision_learning_rate_scheduler(\n total_steps, batch_size=None, data_size=None,\n base=1.0, decay_type=\"stair\",\n scale_with_batchsize=False, **kw):\n def step_fn(step):\ndef get_first_valid_restore_config_and_paths(\n restore_cfgs: Sequence[RestoreCheckpointConfig]\n) -> Tuple[Optional[RestoreCheckpointConfig], Sequence[str]]:\ndef get_fallback_state(restore_cfg: RestoreCheckpointConfig,\n init_fn: Callable[[jnp.ndarray], Mapping[str, Any]],\n init_rng: jnp.ndarray) -> Optional[Mapping[str, Any]]:\n def __init__(self,\n optimizer_def: Optional[optimizers.OptimizerDefType],\n init_fn: InitFnCallable,\n input_shapes: Mapping[str, Array],\n partitioner: partitioning.BasePartitioner,\n model=None,\n input_types: Optional[Mapping[str, DType]] = None):\n def initialize_train_state(rng: Array):\n def from_scratch(self, init_rng: Array) -> train_state_lib.TrainState:\n def from_checkpoints(\n self,\n restore_cfgs: Sequence[RestoreCheckpointConfig],\n ds_iter: Optional[tf.data.Iterator] = None,\n init_rng: Optional[jnp.ndarray] = None,\n ) -> Iterable[train_state_lib.TrainState]:\n def _restore_path(path, cfg):\n def from_checkpoint(\n self,\n ckpt_cfgs: Sequence[RestoreCheckpointConfig],\n *,\n ds_iter: Optional[tf.data.Iterator] = None,\n init_rng: Optional[jnp.ndarray] = None\n ) -> Optional[train_state_lib.TrainState]:\n def from_checkpoint_or_scratch(\n self,\n ckpt_cfgs: Sequence[RestoreCheckpointConfig],\n *,\n init_rng: Array,\n ds_iter: Optional[tf.data.Iterator] = None) -> train_state_lib.TrainState:\ndef log_model_info(log_file: Optional[str],\n full_train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner):\n def _log_info_and_write_to_file(writer, format_str, *args):\n def _log_variable(name: str, arr: Optional[np.ndarray],\n logical_axes: Optional[partitioning.AxisNames],\n mesh_axes: Optional[partitioning.PartitionSpec]):\n def __call__(self,\n params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray],\n rng: jnp.ndarray = None) -> PyTreeDef:\n def __call__(self, params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray]) -> PyTreeDef:\n def __call__(\n self,\n ds: tf.data.Dataset,\n train_state: train_state_lib.TrainState,\n rng: Optional[jnp.ndarray] = None\n ) -> Union[_InferFnResult, _InferFnWithAuxResult]:\ndef _remove_padding(all_inferences, all_indices):\ndef get_infer_fn(infer_step: InferStepCallable, batch_size: int,\n train_state_axes: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner, \n pbar=False) -> InferFnCallable:\n def infer_step_with_indices(params, batch, rng, indices):\n def infer_fn(ds: tf.data.Dataset,\n train_state: train_state_lib.TrainState,\n rng: Optional[jnp.ndarray] = None):\n def _copy_to_host_async(x):\ndef import_module(module: str):\ndef get_vocabulary(\n cfg: DatasetConfig) -> Tuple[seqio.Vocabulary, seqio.Vocabulary]:\ndef verify_matching_vocabs(cfg: DatasetConfig, model: Any):\ndef get_dataset(cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n num_epochs: Optional[int] = None,\n continue_from_last_checkpoint: bool = False,\n batching_fn=None) -> tf.data.Dataset:\ndef get_dataset_inner(cfg: DatasetConfig,\n shard_info: seqio.ShardInfo,\n feature_converter_cls: Callable[...,\n seqio.FeatureConverter],\n seed: Optional[int] = None,\n num_epochs: Optional[int] = None,\n batching_fn=None\n ):\n def __call__(\n self,\n cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n num_epochs: Optional[int] = None,\n continue_from_last_checkpoint: bool = True\n ) -> Union[clu.data.dataset_iterator.DatasetIterator, tf.data.Dataset]:\n def __call__(\n self, cfg: DatasetConfig, shard_id: int, num_shards: int, eval_steps: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter]\n ) -> Mapping[str, tf.data.Dataset]:\ndef get_training_eval_datasets(\n cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n eval_steps: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n deterministic: bool = False,\n model_dir: Optional[str] = None,\n start_step: int = 0,\n) -> Mapping[str, tf.data.Dataset]:\n def _repeat_shard_batch_take_cache(ds: tf.data.Dataset):\ndef round_vocab_size_to_multiple(vocabulary: seqio.Vocabulary,\n divisor: int = 128):\ndef flatten_dict_string_keys(x):\ndef flatten_lists(lsts: Iterable[Iterable]) -> Sequence:\n def __init__(self, kvs: Sequence[Tuple[str, Any]]):\n def __getitem__(self, key: str) -> Any:\n def __len__(self) -> int:\n def __iter__(self) -> Iterable[Tuple[re.Pattern, Any]]:\ndef override_params_axes_names(\n model_variables: flax_scope.FrozenVariableDict,\n params_axes_names_override: Sequence[Tuple[str, Tuple[str, ...]]] = ()\n) -> flax_scope.FrozenVariableDict:\ndef get_local_data(x):" }, { "identifier": "init_wandb", "path": "t5x/examples/unified_io/utils.py", "snippet": "@gin.configurable()\ndef init_wandb(name=None, group=None, entity=None, project=None):\n utils.create_learning_rate_scheduler() # Makes sure this is registered in `operative_config`\n config_str = gin.operative_config_str()\n logging.info(f\"Init wandb with group={group} name={name}\")\n wandb.init(\n group=group,\n name=name,\n entity=entity,\n project=project,\n force=True,\n notes=config_str\n )" } ]
import functools import math import os import time import warnings import clu.data import jax import jax.numpy as jnp import numpy as np import seqio import tensorflow as tf import jax.profiler import gin from typing import Callable, Sequence, Mapping, Tuple, Type, Optional from t5x.examples.unified_io.packing import PackingStrategy from absl import logging from clu import metric_writers from jax import random from jax.experimental import multihost_utils from jax.experimental.global_device_array import GlobalDeviceArray from t5x import checkpoints from t5x import eval as eval_lib from t5x import models from t5x.examples.unified_io import evaluator from t5x import partitioning from t5x import train_state as train_state_lib from t5x import trainer as trainer_lib from t5x import utils from os.path import expanduser from t5x.examples.unified_io.utils import init_wandb from t5x.examples.unified_io.metrics.metrics import null_metric from t5x.examples.unified_io.data.postprocessing import return_example from absl import app from absl import flags from t5x import gin_utils
13,875
# Copyright 2022 The T5X Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Script to pretrain or finetune in JAX using a SeqIO pipeline. """ # Set Linen to add profiling information when constructing Modules. # Must be set before flax imports. # pylint:disable=g-import-not-at-top os.environ['FLAX_PROFILE'] = 'true' # TODO(adarob): Re-enable once users are notified and tests are updated. os.environ['FLAX_LAZY_RNG'] = 'no' os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = os.path.join( expanduser("~"), ".config/gcloud/application_default_credentials.json") # Automatically search for gin files relative to the T5X package. _DEFAULT_GIN_SEARCH_PATHS = [ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ] PyTreeDef = type(jax.tree_util.tree_structure(None)) P = partitioning.PartitionSpec # Special key that used to distinguish train metrics. TRAIN_METRIC_KEY = 'train' # String keys that is acceptable from config. _ACTION_KEYS = frozenset(trainer_lib.ActionMode.__members__.keys()) def run_actions( mode: trainer_lib.ActionMode, actions: trainer_lib.ActionMapType, train_state: train_state_lib.TrainState, metrics_by_task: Mapping[str, trainer_lib.MetricValueMapType]) -> bool: """Invokes all actions on the given mode on host 0, then broadcasts to all. Args: mode: The mode to run the actions. e.g., if mode is `train`, only actions configured to run with `train` mode will be invoked. actions: A mapping of actions that runs after train, eval or infer_eval, to inspect the model and perform useful operations, e.g., early stopping. train_state: The current train_state of the trainer. metrics_by_task: A map of metrics keyed by task name. Returns: A bool indicating whether training should be halted. Raises: RuntimeError: When the metrics processed on host 0 is None. """ stop_training = False if jax.process_index() == 0: if not metrics_by_task: raise RuntimeError('Metric is unexpectedly empty on process 0') for action in actions.get(mode, []): stop_training |= action.run(train_state, metrics_by_task=metrics_by_task) # Broadcast result from host 0 to others. return bool(multihost_utils.broadcast_one_to_all(jnp.array(stop_training))) def train( *, model: models.BaseTransformerModel,
# Copyright 2022 The T5X Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Script to pretrain or finetune in JAX using a SeqIO pipeline. """ # Set Linen to add profiling information when constructing Modules. # Must be set before flax imports. # pylint:disable=g-import-not-at-top os.environ['FLAX_PROFILE'] = 'true' # TODO(adarob): Re-enable once users are notified and tests are updated. os.environ['FLAX_LAZY_RNG'] = 'no' os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = os.path.join( expanduser("~"), ".config/gcloud/application_default_credentials.json") # Automatically search for gin files relative to the T5X package. _DEFAULT_GIN_SEARCH_PATHS = [ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ] PyTreeDef = type(jax.tree_util.tree_structure(None)) P = partitioning.PartitionSpec # Special key that used to distinguish train metrics. TRAIN_METRIC_KEY = 'train' # String keys that is acceptable from config. _ACTION_KEYS = frozenset(trainer_lib.ActionMode.__members__.keys()) def run_actions( mode: trainer_lib.ActionMode, actions: trainer_lib.ActionMapType, train_state: train_state_lib.TrainState, metrics_by_task: Mapping[str, trainer_lib.MetricValueMapType]) -> bool: """Invokes all actions on the given mode on host 0, then broadcasts to all. Args: mode: The mode to run the actions. e.g., if mode is `train`, only actions configured to run with `train` mode will be invoked. actions: A mapping of actions that runs after train, eval or infer_eval, to inspect the model and perform useful operations, e.g., early stopping. train_state: The current train_state of the trainer. metrics_by_task: A map of metrics keyed by task name. Returns: A bool indicating whether training should be halted. Raises: RuntimeError: When the metrics processed on host 0 is None. """ stop_training = False if jax.process_index() == 0: if not metrics_by_task: raise RuntimeError('Metric is unexpectedly empty on process 0') for action in actions.get(mode, []): stop_training |= action.run(train_state, metrics_by_task=metrics_by_task) # Broadcast result from host 0 to others. return bool(multihost_utils.broadcast_one_to_all(jnp.array(stop_training))) def train( *, model: models.BaseTransformerModel,
train_dataset_cfg: utils.DatasetConfig,
8
2023-12-12 20:23:33+00:00
16k
zju3dv/EasyVolcap
tests/hardware_splatting_tests.py
[ { "identifier": "eglContextManager", "path": "easyvolcap/utils/egl_utils.py", "snippet": "class eglContextManager:\n # Manages the creation and destruction of an EGL context\n # Will resize if the size of the window changes\n # Will also manage gl.Viewport to render different parts of the screen\n # Only resize the underlying egl ctx when exceeding current size\n def __init__(self, W=1920, H=1080) -> None:\n self.H, self.W = H, W\n self.max_H, self.max_W = H, W # always create at first\n self.eglctx = create_opengl_context()\n self.create_fbo_with_rbos(W, H)\n self.resize(W, H) # maybe create new framebuffer\n\n def create_fbo_with_rbos(self, W: int, H: int):\n if hasattr(self, 'fbo'):\n gl.glDeleteFramebuffers(1, [self.fbo])\n gl.glDeleteRenderbuffers(6, [self.rbo0, self.rbo1, self.rbo2, self.rbo3, self.rbo4, self.rbo_dpt])\n\n # Add new buffer\n self.fbo = gl.glGenFramebuffers(1)\n self.rbo0, self.rbo1, self.rbo2, self.rbo3, self.rbo4, self.rbo_dpt = gl.glGenRenderbuffers(6)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo0)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo1)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo2)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo3)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo4)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo_dpt)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_DEPTH_COMPONENT, W, H)\n\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_RENDERBUFFER, self.rbo0)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_RENDERBUFFER, self.rbo1)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_RENDERBUFFER, self.rbo2)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_RENDERBUFFER, self.rbo3)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_RENDERBUFFER, self.rbo4)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_RENDERBUFFER, self.rbo_dpt)\n gl.glDrawBuffers(5, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2, gl.GL_COLOR_ATTACHMENT3, gl.GL_COLOR_ATTACHMENT4])\n\n gl.glViewport(0, 0, W, H) # wtf\n gl.glScissor(0, 0, W, H) # wtf # NOTE: Need to redefine scissor size\n\n def resize(self, W=1920, H=1080):\n self.H, self.W = H, W\n if self.H > self.max_H or self.W > self.max_W:\n self.max_H, self.max_W = max(int(self.H * 1.0), self.max_H), max(int(self.W * 1.0), self.max_W)\n self.create_fbo_with_rbos(self.max_W, self.max_H)\n gl.glViewport(0, 0, self.W, self.H)" }, { "identifier": "Quad", "path": "easyvolcap/utils/gl_utils.py", "snippet": "class Quad(Mesh):\n # A shared texture for CUDA (pytorch) and OpenGL\n # Could be rendererd to screen using blitting or just drawing a quad\n def __init__(self, H: int = 256, W: int = 256, use_cudagl: bool = True, compose: bool = False, compose_power: float = 1.0): # the texture to blip\n self.use_cudagl = use_cudagl\n self.vert_sizes = [3] # only position\n self.vert_gl_types = [gl.GL_FLOAT] # only position\n self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type\n self.max_verts, self.max_faces = 0, 0\n self.verts = torch.as_tensor([[-1., -1., 0.5],\n [1., -1., 0.5],\n [-1., 1., 0.5],\n [1., 1., 0.5],])\n self.update_gl_buffers()\n self.compile_shaders()\n\n self.max_H, self.max_W = H, W\n self.H, self.W = H, W\n self.compose = compose\n self.compose_power = compose_power\n self.init_texture()\n\n @property\n def n_faces_bytes(self): return 0\n\n def use_gl_program(self, program: shaders.ShaderProgram):\n super().use_gl_program(program)\n self.uniforms.tex = gl.glGetUniformLocation(program, 'tex')\n gl.glUseProgram(self.quad_program) # use a different program\n gl.glUniform1i(self.uniforms.tex, 0)\n\n def compile_shaders(self):\n try:\n self.quad_program = shaders.compileProgram(\n shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER),\n shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER)\n )\n except Exception as e:\n print(str(e).encode('utf-8').decode('unicode_escape'))\n raise e\n\n def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers\n self.H, self.W = H, W\n if self.H > self.max_H or self.W > self.max_W: # max got updated\n self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W)\n self.init_texture()\n\n def init_texture(self):\n if hasattr(self, 'cu_tex'):\n from cuda import cudart\n CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex))\n\n if hasattr(self, 'fbo'):\n gl.glDeleteFramebuffers(1, [self.fbo])\n gl.glDeleteTextures(1, [self.tex])\n\n # Init the texture to be blit onto the screen\n self.tex = gl.glGenTextures(1)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)\n gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0))\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)\n\n # Init the framebuffer object if explicit blitting is used (slower than drawing quad)\n self.fbo = gl.glGenFramebuffers(1)\n old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING)\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)\n gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0)\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo)\n\n if self.use_cudagl:\n from cuda import cudart\n if self.compose:\n # Both reading and writing of this resource is required\n flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone\n else:\n flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard\n self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags))\n\n def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0):\n assert self.use_cudagl, \"Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad\"\n w = w or self.W\n h = h or self.H\n if image.shape[-1] == 3:\n image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel\n\n from cuda import cudart\n kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice\n CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))\n cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0))\n\n if self.compose:\n \"\"\"\n Blit current framebuffer to this texture (self.tex)\n Read content of this texture into a cuda buffer\n Perform alpha blending based on the frame's alpha channel\n Copy the blended image back into the texture (self.tex)\n \"\"\"\n old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING)\n gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0\n gl.glBlitFramebuffer(x, y, w, h,\n x, y, w, h,\n gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame\n gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old)\n\n buffer = torch.empty_like(image)\n CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst\n w * 4 * buffer.element_size(), # dpitch\n cu_tex_arr, # src\n x * 4 * image.element_size(), # wOffset\n y, # hOffset\n w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes)\n h, # height\n kind, # kind\n torch.cuda.current_stream().cuda_stream)) # stream\n\n # cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]])\n alpha = image[..., -1:] / 255\n image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int\n image[..., -1:] = buffer[..., -1:] + image[..., -1:]\n image = image.clip(0, 255)\n\n CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr,\n x * 4 * image.element_size(),\n y,\n image.data_ptr(),\n w * 4 * image.element_size(), # differently sized\n w * 4 * image.element_size(), # rgba, should do a composition first\n h,\n kind,\n torch.cuda.current_stream().cuda_stream))\n CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))\n\n def upload_to_texture(self, ptr: np.ndarray):\n H, W = ptr.shape[:2]\n H, W = min(self.H, H), min(self.W, W)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)\n gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, W, H, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[:H, :W]) # to gpu, might slow down?\n\n @property\n def verts_data(self): # a heavy copy operation\n verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync\n verts = np.asarray(verts, dtype=np.float32, order='C')\n return verts\n\n def render(self, camera: Camera = None):\n self.draw() # no uploading needed\n\n def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):\n \"\"\"\n Upload the texture instead of the camera\n This respects the OpenGL convension of lower left corners\n \"\"\"\n w = w or self.W\n h = h or self.H\n _, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT)\n gl.glViewport(x, y, w, h)\n gl.glScissor(x, y, w, h) # only render in this small region of the viewport\n\n gl.glUseProgram(self.quad_program) # use a different program\n gl.glActiveTexture(gl.GL_TEXTURE0)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)\n\n gl.glBindVertexArray(self.vao)\n gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))\n gl.glBindVertexArray(0)\n\n # Some house keepings\n gl.glViewport(0, 0, W, H)\n gl.glScissor(0, 0, W, H)\n\n def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):\n \"\"\"\n This respects the OpenGL convension of lower left corners\n \"\"\"\n w = w or self.W\n h = h or self.H\n old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING)\n gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0\n gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped\n x, y, x + w, y + h, # the height is flipped\n gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)\n gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old)" }, { "identifier": "Mesh", "path": "easyvolcap/utils/gl_utils.py", "snippet": "class Mesh:\n class RenderType(Enum):\n POINTS = 1\n LINES = 2\n TRIS = 3\n QUADS = 4 # TODO: Support quad loading\n STRIPS = 5\n\n # Helper class to render a mesh on opengl\n # This implementation should only be used for debug visualization\n # Since no differentiable mechanism will be added\n # We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly\n\n def __init__(self,\n verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update\n faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update\n colors: torch.Tensor = None,\n normals: torch.Tensor = None,\n scalars: dotdict[str, torch.Tensor] = dotdict(),\n render_type: RenderType = RenderType.TRIS,\n\n # Misc info\n name: str = 'mesh',\n filename: str = '',\n visible: bool = True,\n\n # Render options\n shade_flat: bool = False, # smooth shading\n point_radius: float = 0.015,\n render_normal: bool = False,\n\n # Storage options\n store_device: str = 'cpu',\n compute_device: str = 'cuda',\n vert_sizes=[3, 3, 3], # pos + color + norm\n\n # Init options\n est_normal_thresh: int = 100000,\n\n # Ignore unused input\n **kwargs,\n ) -> None:\n super().__init__()\n self.name = name\n self.visible = visible\n self.render_type = render_type\n\n self.shade_flat = shade_flat\n self.point_radius = point_radius\n self.render_normal = render_normal\n\n self.store_device = store_device\n self.compute_device = compute_device\n self.vert_sizes = vert_sizes\n\n self.est_normal_thresh = est_normal_thresh\n\n # Uniform and program\n self.compile_shaders()\n self.uniforms = dotdict() # uniform values\n\n # Before initialization\n self.max_verts = 0\n self.max_faces = 0\n\n # OpenGL data\n if filename: self.load_from_file(filename)\n else: self.load_from_data(verts, faces, colors, normals, scalars)\n\n def compile_shaders(self):\n try:\n self.mesh_program = shaders.compileProgram(\n shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER),\n shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER)\n )\n self.point_program = shaders.compileProgram(\n shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER),\n shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER)\n )\n except Exception as e:\n print(str(e).encode('utf-8').decode('unicode_escape'))\n raise e\n\n @property\n def n_verts_bytes(self):\n return len(self.verts) * self.vert_size * self.verts.element_size()\n\n @property\n def n_faces_bytes(self):\n return len(self.faces) * self.face_size * self.faces.element_size()\n\n @property\n def verts_data(self): # a heavy copy operation\n verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync\n verts = np.asarray(verts, dtype=np.float32, order='C')\n return verts\n\n @property\n def faces_data(self): # a heavy copy operation\n faces = self.faces.ravel().numpy() # N, 3\n faces = np.asarray(faces, dtype=np.uint32, order='C')\n return faces\n\n @property\n def face_size(self):\n return self.render_type.value\n\n @property\n def vert_size(self):\n return sum(self.vert_sizes)\n\n def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'):\n verts, faces, colors, normals, scalars = self.load_data_from_file(filename)\n self.load_from_data(verts, faces, colors, normals, scalars)\n\n def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'):\n self.name = os.path.split(filename)[-1]\n verts, faces, colors, normals, scalars = None, None, None, None, None\n verts, faces = load_mesh(filename, device=self.store_device)\n if not len(faces):\n verts, colors, normals, scalars = load_pts(filename)\n self.render_type = Mesh.RenderType.POINTS\n else:\n self.render_type = Mesh.RenderType(faces.shape[-1]) # use value\n return verts, faces, colors, normals, scalars\n\n def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()):\n # Data type conversion\n verts = torch.as_tensor(verts) # convert to tensor if input is of other types\n if verts.dtype == torch.float32:\n pass # supports this for now\n elif verts.dtype == torch.float16:\n pass # supports this for now\n else:\n verts = verts.type(torch.float) # convert to float32 if input is of higher precision\n gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT\n self.vert_gl_types = [gl_dtype] * len(self.vert_sizes)\n\n # Prepare main mesh data: vertices and faces\n self.verts = torch.as_tensor(verts, device=self.store_device)\n self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support\n\n # Prepare colors and normals\n if colors is not None:\n self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype)\n else:\n bounds = get_bounds(self.verts[None])[0]\n self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0])\n if normals is not None:\n self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype)\n else:\n self.estimate_vertex_normals()\n\n # Prepare other scalars\n if scalars is not None:\n for k, v in scalars.items():\n setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok?\n\n # Prepare OpenGL related buffer\n self.update_gl_buffers()\n\n def estimate_vertex_normals(self):\n def est_pcd_norms():\n if self.verts.dtype == torch.half:\n self.normals = self.verts\n else:\n from pytorch3d.structures import Pointclouds, Meshes\n pcd = Pointclouds([self.verts]).to(self.compute_device)\n self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim\n\n def est_tri_norms():\n if self.verts.dtype == torch.half:\n self.normals = self.verts\n else:\n from pytorch3d.structures import Pointclouds, Meshes\n mesh = Meshes([self.verts], [self.faces]).to(self.compute_device)\n self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim\n\n if not len(self.verts) > self.est_normal_thresh:\n if self.render_type == Mesh.RenderType.TRIS: est_tri_norms()\n elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms()\n else:\n # log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping'))\n self.normals = self.verts\n else:\n # log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation'))\n self.normals = self.verts\n\n def offscreen_render(self, eglctx: \"eglContextManager\", camera: Camera):\n eglctx.resize(camera.W, camera.H)\n self.render(camera)\n\n def render(self, camera: Camera):\n if not self.visible: return\n\n # For point rendering\n if self.render_type == Mesh.RenderType.POINTS:\n gl.glUseProgram(self.point_program)\n self.use_gl_program(self.point_program)\n else:\n gl.glUseProgram(self.mesh_program)\n self.use_gl_program(self.mesh_program)\n\n self.upload_gl_uniforms(camera)\n gl.glBindVertexArray(self.vao)\n\n if self.render_type == Mesh.RenderType.POINTS:\n gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices\n elif self.render_type == Mesh.RenderType.LINES:\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices\n elif self.render_type == Mesh.RenderType.TRIS:\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices\n elif self.render_type == Mesh.RenderType.QUADS:\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices\n elif self.render_type == Mesh.RenderType.STRIPS:\n gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))\n else:\n raise NotImplementedError\n\n gl.glBindVertexArray(0)\n\n def use_gl_program(self, program: shaders.ShaderProgram):\n use_gl_program(program)\n self.uniforms.shade_flat = gl.glGetUniformLocation(program, \"shade_flat\")\n self.uniforms.point_radius = gl.glGetUniformLocation(program, \"point_radius\")\n self.uniforms.render_normal = gl.glGetUniformLocation(program, \"render_normal\")\n self.uniforms.H = gl.glGetUniformLocation(program, \"H\")\n self.uniforms.W = gl.glGetUniformLocation(program, \"W\")\n self.uniforms.n = gl.glGetUniformLocation(program, \"n\")\n self.uniforms.f = gl.glGetUniformLocation(program, \"f\")\n self.uniforms.P = gl.glGetUniformLocation(program, \"P\")\n self.uniforms.K = gl.glGetUniformLocation(program, \"K\")\n self.uniforms.V = gl.glGetUniformLocation(program, \"V\")\n self.uniforms.M = gl.glGetUniformLocation(program, \"M\")\n\n def upload_gl_uniforms(self, camera: Camera):\n K = camera.gl_ixt # hold the reference\n V = camera.gl_ext # hold the reference\n M = glm.identity(mat4)\n P = K * V * M\n\n gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat)\n gl.glUniform1f(self.uniforms.point_radius, self.point_radius)\n gl.glUniform1i(self.uniforms.render_normal, self.render_normal)\n gl.glUniform1i(self.uniforms.H, camera.H) # o2w\n gl.glUniform1i(self.uniforms.W, camera.W) # o2w\n gl.glUniform1f(self.uniforms.n, camera.n) # o2w\n gl.glUniform1f(self.uniforms.f, camera.f) # o2w\n gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip\n gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip\n gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c\n gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w\n\n def update_gl_buffers(self):\n # Might be overwritten\n self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0,\n len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated\n\n if hasattr(self, 'verts'):\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)\n gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference\n if hasattr(self, 'faces'):\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data)\n\n def resize_buffers(self, v: int = 0, f: int = 0):\n if v > self.max_verts or f > self.max_faces:\n if v > self.max_verts: self.max_verts = v\n if f > self.max_faces: self.max_faces = f\n self.init_gl_buffers(v, f)\n\n def init_gl_buffers(self, v: int = 0, f: int = 0):\n # This will only init the corresponding buffer object\n n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes\n n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes\n\n # Housekeeping\n if hasattr(self, 'vao'):\n gl.glDeleteVertexArrays(1, [self.vao])\n gl.glDeleteBuffers(2, [self.vbo, self.ebo])\n\n self.vao = gl.glGenVertexArrays(1)\n self.vbo = gl.glGenBuffers(1)\n self.ebo = gl.glGenBuffers(1)\n\n gl.glBindVertexArray(self.vao)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work\n\n # https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao\n cumsum = 0\n for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)):\n gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float\n gl.glEnableVertexAttribArray(i)\n cumsum += s\n\n if n_faces_bytes > 0:\n # Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW)\n gl.glBindVertexArray(0)\n\n def render_imgui(self):\n pass" }, { "identifier": "Camera", "path": "easyvolcap/utils/viewer_utils.py", "snippet": "class Camera:\n # Helper class to manage camera parameters\n def __init__(self,\n H: int = 512,\n W: int = 512,\n K: torch.Tensor = torch.tensor([[512.0, 0.0, 256], [0.0, 512.0, 256.0], [0.0, 0.0, 1.0]]), # intrinsics\n R: torch.Tensor = torch.tensor([[-1.0, 0.0, 0.0,], [0.0, 0.0, -1.0,], [0.0, -1.0, 0.0,]]), # extrinsics\n T: torch.Tensor = torch.tensor([[0.0], [0.0], [-3.0],]), # extrinsics\n n: float = 0.002, # bounds limit\n f: float = 100, # bounds limit\n t: float = 0.0, # temporal dimension (implemented as a float instead of int)\n v: float = 0.0, # view dimension (implemented as a float instead of int)\n bounds: torch.Tensor = torch.tensor([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]]), # bounding box\n\n # camera update hyperparameters\n origin: torch.Tensor = torch.tensor([0.0, 0.0, 0.0]),\n world_up: torch.Tensor = torch.tensor([0.0, 0.0, 1.0]),\n movement_speed: float = 1.0, # gui movement speed\n\n batch: dotdict = None, # will ignore all other inputs\n string: str = None, # will ignore all other inputs\n **kwargs,\n ) -> None:\n\n # Batch (network input parameters)\n if string is None:\n if batch is None:\n batch = dotdict()\n batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds = H, W, K, R, T, n, f, t, v, bounds\n self.from_batch(batch)\n \n # Other configurables\n self.origin = vec3(*origin)\n self.world_up = vec3(*world_up)\n self.movement_speed = movement_speed\n # self.front = self.front # will trigger an update\n else:\n self.from_string(string)\n\n # Internal states to facilitate camera position change\n self.is_dragging = False # rotation\n self.about_origin = False # about origin rotation\n self.is_panning = False # translation\n self.lock_fx_fy = True\n\n @property\n def w2p(self):\n ixt = mat4(self.ixt)\n ixt[3, 3] = 0\n ixt[2, 3] = 1\n return ixt @ self.ext # w2c -> c2p = w2p\n\n @property\n def V(self): return self.c2w\n\n @property\n def ixt(self): return self.K\n\n @property\n def gl_ext(self):\n gl_c2w = self.c2w\n gl_c2w[0] *= 1 # flip x\n gl_c2w[1] *= -1 # flip y\n gl_c2w[2] *= -1 # flip z\n gl_ext = glm.affineInverse(gl_c2w)\n return gl_ext # use original opencv ext since we've taken care of the intrinsics in gl_ixt\n\n @property\n def gl_ixt(self):\n # Construct opengl camera matrix with projection & clipping\n # https://fruty.io/2019/08/29/augmented-reality-with-opencv-and-opengl-the-tricky-projection-matrix/\n # https://gist.github.com/davegreenwood/3a32d779f81f08dce32f3bb423672191\n # fmt: off\n gl_ixt = mat4(\n 2 * self.fx / self.W, 0, 0, 0,\n 2 * self.s / self.W, 2 * self.fy / self.H, 0, 0,\n 1 - 2 * (self.cx / self.W), 2 * (self.cy / self.H) - 1, (self.f + self.n) / (self.n - self.f), -1,\n 0, 0, 2 * self.f * self.n / (self.n - self.f), 0,\n )\n # fmt: on\n\n return gl_ixt\n\n @property\n def ext(self): return self.w2c\n\n @property\n def w2c(self):\n w2c = mat4(self.R)\n w2c[3] = vec4(*self.T, 1.0)\n return w2c\n\n @property\n def c2w(self):\n return glm.affineInverse(self.w2c)\n\n @property\n def right(self) -> vec3: return vec3(self.R[0, 0], self.R[1, 0], self.R[2, 0]) # c2w R, 0 -> 3,\n\n @property\n def down(self) -> vec3: return vec3(self.R[0, 1], self.R[1, 1], self.R[2, 1]) # c2w R, 1 -> 3,\n\n @property\n def front(self) -> vec3: return vec3(self.R[0, 2], self.R[1, 2], self.R[2, 2]) # c2w R, 2 -> 3,\n\n @front.setter\n def front(self, v: vec3):\n front = v # the last row of R\n self.R[0, 2], self.R[1, 2], self.R[2, 2] = front.x, front.y, front.z\n right = glm.normalize(glm.cross(self.front, self.world_up)) # right\n self.R[0, 0], self.R[1, 0], self.R[2, 0] = right.x, right.y, right.z\n down = glm.cross(self.front, self.right) # down\n self.R[0, 1], self.R[1, 1], self.R[2, 1] = down.x, down.y, down.z\n\n @property\n def center(self): return -glm.transpose(self.R) @ self.T # 3,\n\n @center.setter\n def center(self, v: vec3):\n self.T = -self.R @ v # 3, 1\n\n @property\n def s(self): return self.K[1, 0]\n\n @s.setter\n def s(self, s): self.K[1, 0] = s\n\n @property\n def fx(self): return self.K[0, 0]\n\n @fx.setter\n def fx(self, v: float):\n v = min(v, 1e5)\n v = max(v, 1e-3)\n if self.lock_fx_fy:\n self.K[1, 1] = v / self.K[0, 0] * self.K[1, 1]\n self.K[0, 0] = v\n\n @property\n def fy(self): return self.K[1, 1]\n\n @fy.setter\n def fy(self, v: float):\n if self.lock_fx_fy:\n self.K[0, 0] = v / self.K[1, 1] * self.K[0, 0]\n self.K[1, 1] = v\n\n @property\n def cx(self): return self.K[2, 0]\n\n @cx.setter\n def cx(self, v: float):\n self.K[2, 0] = v\n\n @property\n def cy(self): return self.K[2, 1]\n\n @cy.setter\n def cy(self, v: float):\n self.K[2, 1] = v\n\n def begin_dragging(self,\n x: float, y: float,\n is_panning: bool,\n about_origin: bool,\n ):\n self.is_dragging = True\n self.is_panning = is_panning\n self.about_origin = about_origin\n self.drag_start = vec2([x, y])\n\n # Record internal states # ? Will this make a copy?\n self.drag_start_front = self.front # a recording\n self.drag_start_down = self.down\n self.drag_start_right = self.right\n self.drag_start_center = self.center\n self.drag_start_origin = self.origin\n self.drag_start_world_up = self.world_up\n\n # Need to find the max or min delta y to align with world_up\n dot = glm.dot(self.world_up, self.drag_start_front)\n self.drag_ymin = -np.arccos(-dot) + 0.01 # drag up, look down\n self.drag_ymax = np.pi + self.drag_ymin - 0.02 # remove the 0.01 of drag_ymin\n\n def end_dragging(self):\n self.is_dragging = False\n\n def update_dragging(self, x: float, y: float):\n if not self.is_dragging:\n return\n\n current = vec2(x, y)\n delta = current - self.drag_start\n delta /= max(self.H, self.W)\n delta *= -1\n\n if self.is_panning:\n delta *= self.movement_speed\n center_delta = delta[0] * self.drag_start_right + delta[1] * self.drag_start_down\n self.center = self.drag_start_center + center_delta\n if self.about_origin:\n self.origin = self.drag_start_origin + center_delta\n else:\n m = mat4(1.0)\n m = glm.rotate(m, delta.x % 2 * np.pi, self.world_up)\n m = glm.rotate(m, np.clip(delta.y, self.drag_ymin, self.drag_ymax), self.drag_start_right)\n self.front = m @ self.drag_start_front # might overshoot\n\n if self.about_origin:\n self.center = -m @ (self.origin - self.drag_start_center) + self.origin\n\n def move(self, x_offset: float, y_offset: float):\n speed_factor = 1e-1\n movement = y_offset * speed_factor\n movement = movement * self.front * self.movement_speed\n self.center += movement\n\n if self.is_dragging:\n self.drag_start_center += movement\n\n def to_batch(self):\n meta = dotdict()\n meta.H = torch.as_tensor(self.H)\n meta.W = torch.as_tensor(self.W)\n meta.K = torch.as_tensor(self.K.to_list()).mT\n meta.R = torch.as_tensor(self.R.to_list()).mT\n meta.T = torch.as_tensor(self.T.to_list())[..., None]\n meta.n = torch.as_tensor(self.n)\n meta.f = torch.as_tensor(self.f)\n meta.t = torch.as_tensor(self.t)\n meta.v = torch.as_tensor(self.v)\n meta.bounds = torch.as_tensor(self.bounds.to_list()) # no transpose for bounds\n\n # GUI related elements\n meta.movement_speed = torch.as_tensor(self.movement_speed)\n meta.origin = torch.as_tensor(self.origin.to_list())\n meta.world_up = torch.as_tensor(self.world_up.to_list())\n\n batch = dotdict()\n batch.update(meta)\n batch.meta.update(meta)\n return batch\n\n def to_easymocap(self):\n batch = self.to_batch()\n camera = to_numpy(batch)\n return camera\n\n def from_easymocap(self, camera: dict):\n batch = to_tensor(camera)\n self.from_batch(batch)\n return self\n\n def to_string(self) -> str:\n batch = to_list(self.to_batch().meta)\n return json.dumps(batch)\n\n def from_string(self, string: str):\n batch = to_tensor(dotdict(json.loads(string)), ignore_list=True)\n self.from_batch(batch)\n\n def from_batch(self, batch: dotdict):\n H, W, K, R, T, n, f, t, v, bounds = batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds\n\n # Batch (network input parameters)\n self.H = int(H)\n self.W = int(W)\n self.K = mat3(*K.mT.ravel())\n self.R = mat3(*R.mT.ravel())\n self.T = vec3(*T.ravel()) # 3,\n self.n = float(n)\n self.f = float(f)\n self.t = float(t)\n self.v = float(v)\n self.bounds = mat2x3(*bounds.ravel()) # 2, 3\n\n if 'movement_speed' in batch: self.movement_speed = float(batch.movement_speed)\n if 'origin' in batch: self.origin = vec3(*batch.origin.ravel()) # 3,\n if 'world_up' in batch: self.world_up = vec3(*batch.world_up.ravel()) # 3,\n return self\n\n def custom_pose(self, R: torch.Tensor, T: torch.Tensor, K: torch.Tensor):\n # self.K = mat3(*K.mT.ravel())\n self.R = mat3(*R.mT.ravel())\n self.T = vec3(*T.ravel())" }, { "identifier": "save_image", "path": "easyvolcap/utils/data_utils.py", "snippet": "def save_image(img_path: str, img: np.ndarray, jpeg_quality=75, png_compression=9, save_dtype=np.uint8):\n if isinstance(img, torch.Tensor): img = img.detach().cpu().numpy()\n if img.ndim == 4: img = np.concatenate(img, axis=0)\n if img.shape[0] < img.shape[-1] and (img.shape[0] == 3 or img.shape[0] == 4): img = np.transpose(img, (1, 2, 0))\n if np.issubdtype(img.dtype, np.integer):\n img = img / np.iinfo(img.dtype).max # to float\n if img.shape[-1] >= 3:\n if not img.flags['WRITEABLE']:\n img = img.copy() # avoid assignment only inputs\n img[..., :3] = img[..., [2, 1, 0]]\n if os.path.dirname(img_path):\n os.makedirs(os.path.dirname(img_path), exist_ok=True)\n if img_path.endswith('.png'):\n max = np.iinfo(save_dtype).max\n img = (img * max).clip(0, max).astype(save_dtype)\n elif img_path.endswith('.jpg'):\n img = img[..., :3] # only color\n img = (img * 255).clip(0, 255).astype(np.uint8)\n elif img_path.endswith('.hdr'):\n img = img[..., :3] # only color\n elif img_path.endswith('.exr'):\n # ... https://github.com/opencv/opencv/issues/21326\n os.environ[\"OPENCV_IO_ENABLE_OPENEXR\"] = \"1\"\n else:\n # should we try to discard alpha channel here?\n # exr could store alpha channel\n pass # no transformation for other unspecified file formats\n return cv2.imwrite(img_path, img, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality, cv2.IMWRITE_PNG_COMPRESSION, png_compression])" }, { "identifier": "common_opengl_options", "path": "easyvolcap/utils/gl_utils.py", "snippet": "def common_opengl_options():\n # Use program point size\n gl.glEnable(gl.GL_PROGRAM_POINT_SIZE)\n\n # Performs face culling\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glCullFace(gl.GL_BACK)\n\n # Performs alpha trans testing\n gl.glEnable(gl.GL_ALPHA_TEST)\n\n # Performs z-buffer testing\n gl.glEnable(gl.GL_DEPTH_TEST)\n # gl.glDepthMask(gl.GL_TRUE)\n gl.glDepthFunc(gl.GL_LEQUAL)\n # gl.glDepthRange(-1.0, 1.0)\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n\n # Enable some masking tests\n gl.glEnable(gl.GL_SCISSOR_TEST)\n\n # Enable this to correctly render points\n # https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310\n gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW\n # gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW\n\n # # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory.\n # # The second argument specifies that our pixels will be in bytes.\n # gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)" }, { "identifier": "linearize_depth", "path": "easyvolcap/utils/gl_utils.py", "snippet": "def linearize_depth(d, n: float, f: float):\n # 0-1 -> -1,1\n # ndc -> view\n return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n))" }, { "identifier": "my_tests", "path": "easyvolcap/utils/test_utils.py", "snippet": "@catch_throw\ndef my_tests(globals: dict = globals(), prefix: str = 'test'):\n # extract testing functions\n tests = {name: func for name, func in globals.items() if name.startswith(prefix)}\n # run tests\n pbar = tqdm(total=len(tests))\n for name, func in tests.items():\n pbar.desc = name\n pbar.refresh()\n\n func()\n log(f'{name}: {green(\"OK\")}')\n\n pbar.update(n=1)\n pbar.refresh()" } ]
from easyvolcap.utils.egl_utils import eglContextManager # must be imported before OpenGL.GL from os.path import join, dirname from easyvolcap.utils.console_utils import * from easyvolcap.utils.gl_utils import Quad, Mesh from easyvolcap.utils.viewer_utils import Camera from easyvolcap.utils.data_utils import save_image from easyvolcap.utils.gl_utils import common_opengl_options, linearize_depth from easyvolcap.utils.test_utils import my_tests import OpenGL.GL as gl import os import cv2 import torch import numpy as np
12,132
# This file tries to render a point cloud with large radius in multiple passes # And blend them accordingly with the chosen blend function # This will simulate a manual depth sorting and blending # I guess hardware are always faster than pure software implementations from __future__ import absolute_import, division, print_function # fmt: off # fmt: on WIDTH, HEIGHT = 512, 512 eglctx = eglContextManager(HEIGHT, WIDTH) # will create a new context common_opengl_options() # common init def test_point_splatting_single_pass(): render_w = 1024 render_h = 1024 gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) mesh_path = 'assets/meshes/bunny.ply' img_path = 'test_point_splatting_single_pass_rgb.png' dpt_path = 'test_point_splatting_single_pass_dpt.png' camera = Camera(H=render_h, W=render_w, K=torch.tensor([[render_w * 592 / 512, 0., render_w / 2], [0., render_h * 592 / 512, render_h / 2], [0., 0., 1.]]), R=torch.tensor([[0.9908, -0.1353, 0.0000], [-0.1341, -0.9815, -0.1365], [0.0185, 0.1353, -0.9906]]), T=torch.tensor([[0.0178], [0.0953], [0.3137]]), n=0.02, f=1, ) mesh = Mesh(filename=mesh_path, shade_flat=True, point_radius=0.015) mesh.render_type = Mesh.RenderType.POINTS mesh.offscreen_render(eglctx, camera) # TODO: offscreen rendering of points not working, don't know why # Read result gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0) img_buf = gl.glReadPixels(0, 0, render_w, render_h, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE) # MARK: SYNC img = np.frombuffer(img_buf, np.uint8).reshape(render_h, render_w, 4)[::-1]
# This file tries to render a point cloud with large radius in multiple passes # And blend them accordingly with the chosen blend function # This will simulate a manual depth sorting and blending # I guess hardware are always faster than pure software implementations from __future__ import absolute_import, division, print_function # fmt: off # fmt: on WIDTH, HEIGHT = 512, 512 eglctx = eglContextManager(HEIGHT, WIDTH) # will create a new context common_opengl_options() # common init def test_point_splatting_single_pass(): render_w = 1024 render_h = 1024 gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) mesh_path = 'assets/meshes/bunny.ply' img_path = 'test_point_splatting_single_pass_rgb.png' dpt_path = 'test_point_splatting_single_pass_dpt.png' camera = Camera(H=render_h, W=render_w, K=torch.tensor([[render_w * 592 / 512, 0., render_w / 2], [0., render_h * 592 / 512, render_h / 2], [0., 0., 1.]]), R=torch.tensor([[0.9908, -0.1353, 0.0000], [-0.1341, -0.9815, -0.1365], [0.0185, 0.1353, -0.9906]]), T=torch.tensor([[0.0178], [0.0953], [0.3137]]), n=0.02, f=1, ) mesh = Mesh(filename=mesh_path, shade_flat=True, point_radius=0.015) mesh.render_type = Mesh.RenderType.POINTS mesh.offscreen_render(eglctx, camera) # TODO: offscreen rendering of points not working, don't know why # Read result gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0) img_buf = gl.glReadPixels(0, 0, render_w, render_h, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE) # MARK: SYNC img = np.frombuffer(img_buf, np.uint8).reshape(render_h, render_w, 4)[::-1]
save_image(img_path, img)
4
2023-12-07 08:53:42+00:00
16k
alibaba/animate-anything
utils/lora_handler.py
[ { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition_mask.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, it will skip the normalization and activation layers in post-processing\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n up_block_types: Tuple[str] = (\"UpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\"),\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1024,\n attention_head_dim: Union[int, Tuple[int]] = 64,\n motion_mask = False,\n motion_strength = False,\n ):\n super().__init__()\n self.motion_mask = motion_mask\n self.motion_strength = motion_strength\n print(f\"motion mask {self.motion_mask}, motion_strength {self.motion_strength}\")\n self.sample_size = sample_size\n self.gradient_checkpointing = False\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_out_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n self.conv_in2 = nn.Conv2d(\n 5, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n self.time_proj = Timesteps(block_out_channels[0], True, 0)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n cond_proj_dim=block_out_channels[0],\n )\n\n self.motion_proj = Timesteps(block_out_channels[0], True, 0)\n self.motion_embedding = nn.Sequential(\n nn.Linear(timestep_input_dim, time_embed_dim), nn.SiLU(),\n nn.Linear(time_embed_dim, time_embed_dim))\n nn.init.zeros_(self.motion_embedding[-1].weight)\n nn.init.zeros_(self.motion_embedding[-1].bias)\n\n self.transformer_in = TransformerTemporalModel(\n num_attention_heads=8,\n attention_head_dim=attention_head_dim,\n in_channels=block_out_channels[0],\n num_layers=1,\n )\n\n # class embedding\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=False,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=False,\n )\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=False,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, value=False):\n self.gradient_checkpointing = value\n self.mid_block.gradient_checkpointing = value\n for module in self.down_blocks + self.up_blocks:\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value \n \n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n condition_latent: torch.Tensor,\n mask: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n motion = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Returns:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n sample = torch.cat([condition_latent, sample], dim=2)\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n num_frames = sample.shape[2]\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n if self.motion_strength and motion is not None:\n timestep_cond = self.motion_proj(motion).to(dtype=self.dtype)\n emb = self.time_embedding(t_emb, timestep_cond)\n #emb += self.motion_embedding(m_emb)\n else:\n emb = self.time_embedding(t_emb, timestep_cond)\n emb = emb.repeat_interleave(repeats=num_frames, dim=0)\n encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0)\n\n # 2. pre-process\n if self.motion_mask and mask is not None:\n mask = repeat(mask , 'b 1 1 h w -> (t b) 1 f h w', t=sample.shape[0]//mask.shape[0], f=sample.shape[2])\n sample = torch.cat([mask, sample], dim=1)\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in2(sample)\n else:\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in(sample)\n\n if num_frames > 1:\n if self.gradient_checkpointing:\n sample = transformer_g_c(self.transformer_in, sample, num_frames)\n else:\n sample = self.transformer_in(sample, num_frames=num_frames).sample\n\n # 3. down\n down_block_res_samples = (sample,)\n for i, downsample_block in enumerate(self.down_blocks):\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)\n \n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n num_frames=num_frames,\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n\n sample = self.conv_out(sample)\n\n # reshape to (batch, channel, framerate, width, height)\n sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)\n sample = sample[:,:,1:]\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)" }, { "identifier": "convert_unet_state_dict", "path": "utils/convert_diffusers_to_original_ms_text_to_video.py", "snippet": "def convert_unet_state_dict(unet_state_dict, strict_mapping=False):\n print ('Converting the UNET')\n # buyer beware: this is a *brittle* function,\n # and correct output requires that all of these pieces interact in\n # the exact order in which I have arranged them.\n mapping = {k: k for k in unet_state_dict.keys()}\n\n for sd_name, hf_name in unet_conversion_map:\n if strict_mapping:\n if hf_name in mapping:\n mapping[hf_name] = sd_name\n else:\n mapping[hf_name] = sd_name\n for k, v in mapping.items():\n if \"resnets\" in k:\n for sd_part, hf_part in unet_conversion_map_resnet:\n v = v.replace(hf_part, sd_part)\n mapping[k] = v\n # elif \"temp_convs\" in k:\n # for sd_part, hf_part in unet_conversion_map_resnet:\n # v = v.replace(hf_part, sd_part)\n # mapping[k] = v\n for k, v in mapping.items():\n for sd_part, hf_part in unet_conversion_map_layer:\n v = v.replace(hf_part, sd_part)\n mapping[k] = v\n \n\n # there must be a pattern, but I don't want to bother atm\n do_not_unsqueeze = [f'output_blocks.{i}.1.proj_out.weight' for i in range(3, 12)] + [f'output_blocks.{i}.1.proj_in.weight' for i in range(3, 12)] + ['middle_block.1.proj_in.weight', 'middle_block.1.proj_out.weight'] + [f'input_blocks.{i}.1.proj_out.weight' for i in [1, 2, 4, 5, 7, 8]] + [f'input_blocks.{i}.1.proj_in.weight' for i in [1, 2, 4, 5, 7, 8]]\n print (do_not_unsqueeze)\n\n new_state_dict = {v: (unet_state_dict[k].unsqueeze(-1) if ('proj_' in k and ('bias' not in k) and (k not in do_not_unsqueeze)) else unet_state_dict[k]) for k, v in mapping.items()}\n # HACK: idk why the hell it does not work with list comprehension\n for k, v in new_state_dict.items():\n has_k = False\n for n in do_not_unsqueeze:\n if k == n:\n has_k = True\n\n if has_k:\n v = v.squeeze(-1)\n new_state_dict[k] = v\n\n return new_state_dict" }, { "identifier": "convert_text_enc_state_dict_v20", "path": "utils/convert_diffusers_to_original_ms_text_to_video.py", "snippet": "def convert_text_enc_state_dict_v20(text_enc_dict):\n #print ('Converting the text encoder')\n new_state_dict = {}\n capture_qkv_weight = {}\n capture_qkv_bias = {}\n for k, v in text_enc_dict.items():\n if (\n k.endswith(\".self_attn.q_proj.weight\")\n or k.endswith(\".self_attn.k_proj.weight\")\n or k.endswith(\".self_attn.v_proj.weight\")\n ):\n k_pre = k[: -len(\".q_proj.weight\")]\n k_code = k[-len(\"q_proj.weight\")]\n if k_pre not in capture_qkv_weight:\n capture_qkv_weight[k_pre] = [None, None, None]\n capture_qkv_weight[k_pre][code2idx[k_code]] = v\n continue\n\n if (\n k.endswith(\".self_attn.q_proj.bias\")\n or k.endswith(\".self_attn.k_proj.bias\")\n or k.endswith(\".self_attn.v_proj.bias\")\n ):\n k_pre = k[: -len(\".q_proj.bias\")]\n k_code = k[-len(\"q_proj.bias\")]\n if k_pre not in capture_qkv_bias:\n capture_qkv_bias[k_pre] = [None, None, None]\n capture_qkv_bias[k_pre][code2idx[k_code]] = v\n continue\n\n relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k)\n new_state_dict[relabelled_key] = v\n\n for k_pre, tensors in capture_qkv_weight.items():\n if None in tensors:\n raise Exception(\"CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing\")\n relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)\n new_state_dict[relabelled_key + \".in_proj_weight\"] = torch.cat(tensors)\n\n for k_pre, tensors in capture_qkv_bias.items():\n if None in tensors:\n raise Exception(\"CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing\")\n relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)\n new_state_dict[relabelled_key + \".in_proj_bias\"] = torch.cat(tensors)\n\n return new_state_dict" }, { "identifier": "extract_lora_ups_down", "path": "utils/lora.py", "snippet": "def extract_lora_ups_down(model, target_replace_module=DEFAULT_TARGET_REPLACE):\n\n loras = []\n\n for _m, _n, _child_module in _find_modules(\n model,\n target_replace_module,\n search_class=[LoraInjectedLinear, LoraInjectedConv2d, LoraInjectedConv3d],\n ):\n loras.append((_child_module.lora_up, _child_module.lora_down))\n\n if len(loras) == 0:\n raise ValueError(\"No lora injected.\")\n\n return loras" }, { "identifier": "inject_trainable_lora_extended", "path": "utils/lora.py", "snippet": "def inject_trainable_lora_extended(\n model: nn.Module,\n target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,\n r: int = 4,\n loras=None, # path to lora .pt\n):\n \"\"\"\n inject lora into model, and returns lora parameter groups.\n \"\"\"\n\n require_grad_params = []\n names = []\n\n if loras != None:\n loras = torch.load(loras)\n\n for _module, name, _child_module in _find_modules(\n model, target_replace_module, search_class=[nn.Linear, nn.Conv2d, nn.Conv3d]\n ):\n if _child_module.__class__ == nn.Linear:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedLinear(\n _child_module.in_features,\n _child_module.out_features,\n _child_module.bias is not None,\n r=r,\n )\n _tmp.linear.weight = weight\n if bias is not None:\n _tmp.linear.bias = bias\n elif _child_module.__class__ == nn.Conv2d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv2d(\n _child_module.in_channels,\n _child_module.out_channels,\n _child_module.kernel_size,\n _child_module.stride,\n _child_module.padding,\n _child_module.dilation,\n _child_module.groups,\n _child_module.bias is not None,\n r=r,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias\n\n elif _child_module.__class__ == nn.Conv3d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv3d(\n _child_module.in_channels,\n _child_module.out_channels,\n bias=_child_module.bias is not None,\n kernel_size=_child_module.kernel_size,\n padding=_child_module.padding,\n r=r,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias \n # switch the module\n _tmp.to(_child_module.weight.device).to(_child_module.weight.dtype)\n if bias is not None:\n _tmp.to(_child_module.bias.device).to(_child_module.bias.dtype)\n \n _module._modules[name] = _tmp\n require_grad_params.append(_module._modules[name].lora_up.parameters())\n require_grad_params.append(_module._modules[name].lora_down.parameters())\n\n if loras != None:\n _module._modules[name].lora_up.weight = loras.pop(0)\n _module._modules[name].lora_down.weight = loras.pop(0)\n\n _module._modules[name].lora_up.weight.requires_grad = True\n _module._modules[name].lora_down.weight.requires_grad = True\n names.append(name)\n\n return require_grad_params, names" }, { "identifier": "save_lora_weight", "path": "utils/lora.py", "snippet": "def save_lora_weight(\n model,\n path=\"./lora.pt\",\n target_replace_module=DEFAULT_TARGET_REPLACE,\n): \n weights = []\n for _up, _down in extract_lora_ups_down(\n model, target_replace_module=target_replace_module\n ):\n weights.append(_up.weight.to(\"cpu\").to(torch.float32))\n weights.append(_down.weight.to(\"cpu\").to(torch.float32))\n\n torch.save(weights, path)" }, { "identifier": "train_patch_pipe", "path": "utils/lora.py", "snippet": "def train_patch_pipe(pipe, patch_unet, patch_text):\n if patch_unet:\n print(\"LoRA : Patching Unet\")\n collapse_lora(pipe.unet)\n monkeypatch_remove_lora(pipe.unet)\n\n if patch_text:\n print(\"LoRA : Patching text encoder\")\n\n collapse_lora(pipe.text_encoder)\n monkeypatch_remove_lora(pipe.text_encoder)" }, { "identifier": "monkeypatch_or_replace_lora", "path": "utils/lora.py", "snippet": "def monkeypatch_or_replace_lora(\n model,\n loras,\n target_replace_module=DEFAULT_TARGET_REPLACE,\n r: Union[int, List[int]] = 4,\n):\n for _module, name, _child_module in _find_modules(\n model, target_replace_module, search_class=[nn.Linear, LoraInjectedLinear]\n ):\n _source = (\n _child_module.linear\n if isinstance(_child_module, LoraInjectedLinear)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedLinear(\n _source.in_features,\n _source.out_features,\n _source.bias is not None,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n _tmp.linear.weight = weight\n\n if bias is not None:\n _tmp.linear.bias = bias\n\n # switch the module\n _module._modules[name] = _tmp\n\n up_weight = loras.pop(0)\n down_weight = loras.pop(0)\n\n _module._modules[name].lora_up.weight = nn.Parameter(\n up_weight.type(weight.dtype)\n )\n _module._modules[name].lora_down.weight = nn.Parameter(\n down_weight.type(weight.dtype)\n )\n\n _module._modules[name].to(weight.device)" }, { "identifier": "monkeypatch_or_replace_lora_extended", "path": "utils/lora.py", "snippet": "def monkeypatch_or_replace_lora_extended(\n model,\n loras,\n target_replace_module=DEFAULT_TARGET_REPLACE,\n r: Union[int, List[int]] = 4,\n):\n for _module, name, _child_module in _find_modules(\n model,\n target_replace_module,\n search_class=[\n nn.Linear, \n nn.Conv2d, \n nn.Conv3d,\n LoraInjectedLinear, \n LoraInjectedConv2d, \n LoraInjectedConv3d,\n ],\n ):\n\n if (_child_module.__class__ == nn.Linear) or (\n _child_module.__class__ == LoraInjectedLinear\n ):\n if len(loras[0].shape) != 2:\n continue\n\n _source = (\n _child_module.linear\n if isinstance(_child_module, LoraInjectedLinear)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedLinear(\n _source.in_features,\n _source.out_features,\n _source.bias is not None,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n _tmp.linear.weight = weight\n\n if bias is not None:\n _tmp.linear.bias = bias\n\n elif (_child_module.__class__ == nn.Conv2d) or (\n _child_module.__class__ == LoraInjectedConv2d\n ):\n if len(loras[0].shape) != 4:\n continue\n _source = (\n _child_module.conv\n if isinstance(_child_module, LoraInjectedConv2d)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedConv2d(\n _source.in_channels,\n _source.out_channels,\n _source.kernel_size,\n _source.stride,\n _source.padding,\n _source.dilation,\n _source.groups,\n _source.bias is not None,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n\n _tmp.conv.weight = weight\n\n if bias is not None:\n _tmp.conv.bias = bias\n\n elif _child_module.__class__ == nn.Conv3d or(\n _child_module.__class__ == LoraInjectedConv3d\n ):\n\n if len(loras[0].shape) != 5:\n continue\n\n _source = (\n _child_module.conv\n if isinstance(_child_module, LoraInjectedConv3d)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedConv3d(\n _source.in_channels,\n _source.out_channels,\n bias=_source.bias is not None,\n kernel_size=_source.kernel_size,\n padding=_source.padding,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n\n _tmp.conv.weight = weight\n\n if bias is not None:\n _tmp.conv.bias = bias\n\n # switch the module\n _module._modules[name] = _tmp\n\n up_weight = loras.pop(0)\n down_weight = loras.pop(0)\n\n _module._modules[name].lora_up.weight = nn.Parameter(\n up_weight.type(weight.dtype)\n )\n _module._modules[name].lora_down.weight = nn.Parameter(\n down_weight.type(weight.dtype)\n )\n\n _module._modules[name].to(weight.device)" }, { "identifier": "activate_lora_train", "path": "stable_lora/lora.py", "snippet": "def activate_lora_train(model, bias):\n def unfreeze():\n print(model.__class__.__name__ + \" LoRA set for training.\")\n return loralb.mark_only_lora_as_trainable(model, bias=bias)\n\n return unfreeze" }, { "identifier": "add_lora_to", "path": "stable_lora/lora.py", "snippet": "def add_lora_to(\n model, \n target_module=UNET_REPLACE, \n search_class=[torch.nn.Linear], \n r=32, \n dropout=0,\n lora_bias='none'\n):\n for module, name, child_module in find_modules(\n model, \n ancestor_class=target_module, \n search_class=search_class\n ):\n bias = hasattr(child_module, \"bias\")\n \n # Check if child module of the model has bias.\n if bias:\n if child_module.bias is None:\n bias = False\n\n # Check if the child module of the model is type Linear or Conv2d.\n if isinstance(child_module, torch.nn.Linear):\n l = create_lora_linear(child_module, r, dropout, bias=bias)\n\n if isinstance(child_module, torch.nn.Conv2d):\n l = create_lora_conv(child_module, r, dropout, bias=bias)\n\n if isinstance(child_module, torch.nn.Conv3d):\n l = create_lora_conv3d(child_module, r, dropout, bias=bias)\n\n if isinstance(child_module, torch.nn.Embedding):\n l = create_lora_emb(child_module, r)\n \n # If the model has bias and we wish to add it, use the child_modules in place\n if bias:\n l.bias = child_module.bias\n \n # Assign the frozen weight of model's Linear or Conv2d to the LoRA model.\n l.weight = child_module.weight\n\n # Replace the new LoRA model with the model's Linear or Conv2d module.\n module._modules[name] = l\n \n\n # Unfreeze only the newly added LoRA weights, but keep the model frozen.\n return activate_lora_train(model, lora_bias)" }, { "identifier": "save_lora", "path": "stable_lora/lora.py", "snippet": "def save_lora(\n unet=None, \n text_encoder=None, \n save_text_weights=False,\n output_dir=\"output\",\n lora_filename=\"lora.safetensors\",\n lora_bias='none', \n save_for_webui=True,\n only_webui=False,\n metadata=None,\n unet_dict_converter=None,\n text_dict_converter=None\n ):\n\n if not only_webui:\n # Create directory for the full LoRA weights.\n trainable_weights_dir = f\"{output_dir}/full_weights\"\n lora_out_file_full_weight = f\"{trainable_weights_dir}/{lora_filename}\"\n os.makedirs(trainable_weights_dir, exist_ok=True)\n\n ext = '.safetensors'\n # Create LoRA out filename.\n lora_out_file = f\"{output_dir}/webui_{lora_filename}{ext}\"\n\n if not only_webui:\n save_path_full_weights = lora_out_file_full_weight + ext\n\n save_path = lora_out_file\n\n if not only_webui:\n for i, model in enumerate([unet, text_encoder]):\n if save_text_weights and i == 1:\n non_webui_weights = save_path_full_weights.replace(ext, f\"_text_encoder{ext}\")\n\n else:\n non_webui_weights = save_path_full_weights.replace(ext, f\"_unet{ext}\")\n\n # Load only the LoRAs from the state dict.\n lora_dict = loralb.lora_state_dict(model, bias=lora_bias)\n \n # Save the models as fp32. This ensures we can finetune again without having to upcast. \n save_file(lora_dict, non_webui_weights)\n \n if save_for_webui:\n # Convert the keys to compvis model and webui\n unet_lora_dict = loralb.lora_state_dict(unet, bias=lora_bias) \n lora_dict_fp16 = unet_dict_converter(unet_lora_dict, strict_mapping=True)\n \n if save_text_weights:\n text_encoder_dict = loralb.lora_state_dict(text_encoder, bias=lora_bias)\n lora_dict_text_fp16 = text_dict_converter(text_encoder_dict)\n \n # Update the Unet dict to include text keys.\n lora_dict_fp16.update(lora_dict_text_fp16)\n\n # Cast tensors to fp16. It's assumed we won't be finetuning these.\n for k, v in lora_dict_fp16.items():\n lora_dict_fp16[k] = v.to(dtype=torch.float16)\n\n save_file(\n lora_dict_fp16, \n save_path, \n metadata=metadata\n )" }, { "identifier": "load_lora", "path": "stable_lora/lora.py", "snippet": "def load_lora(model, lora_path: str):\n try:\n if os.path.exists(lora_path):\n lora_dict = load_file(lora_path)\n model.load_state_dict(lora_dict, strict=False)\n\n except Exception as e:\n print(f\"Could not load your lora file: {e}\")" }, { "identifier": "set_mode_group", "path": "stable_lora/lora.py", "snippet": "def set_mode_group(models, train):\n for model in models: \n set_mode(model, train)\n model.train(train)" } ]
import os import torch import uuid from logging import warnings from typing import Union from types import SimpleNamespace from models.unet_3d_condition_mask import UNet3DConditionModel from transformers import CLIPTextModel from utils.convert_diffusers_to_original_ms_text_to_video import convert_unet_state_dict, convert_text_enc_state_dict_v20 from .lora import ( extract_lora_ups_down, inject_trainable_lora_extended, save_lora_weight, train_patch_pipe, monkeypatch_or_replace_lora, monkeypatch_or_replace_lora_extended ) from stable_lora.lora import ( activate_lora_train, add_lora_to, save_lora, load_lora, set_mode_group )
12,349
print(f"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...") except Exception as e: print(f"An error occured while loading a LoRA file: {e}") def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias): return_dict = lora_args.copy() if self.is_cloneofsimo_lora(): return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS) return_dict.update({ "model": model, "loras": self.get_lora_file_path(lora_path, model), "target_replace_module": replace_modules, "r": r }) if self.is_stable_lora(): KEYS = ['model', 'lora_path'] return_dict = filter_dict(return_dict, KEYS) return_dict.update({'model': model, 'lora_path': lora_path}) return return_dict def do_lora_injection( self, model, replace_modules, bias='none', dropout=0, r=4, lora_loader_args=None, ): REPLACE_MODULES = replace_modules params = None negation = None is_injection_hybrid = False if self.is_cloneofsimo_lora(): is_injection_hybrid = True injector_args = lora_loader_args params, negation = self.lora_injector(**injector_args) for _up, _down in extract_lora_ups_down( model, target_replace_module=REPLACE_MODULES): if all(x is not None for x in [_up, _down]): print(f"Lora successfully injected into {model.__class__.__name__}.") break return params, negation, is_injection_hybrid if self.is_stable_lora(): injector_args = lora_args.copy() injector_args = filter_dict(injector_args, keys=STABLE_LORA_KEYS) SEARCH_CLASS = [torch.nn.Linear, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.Embedding] injector_args.update({ "model": model, "target_module": REPLACE_MODULES, "search_class": SEARCH_CLASS, "r": r, "dropout": dropout, "lora_bias": self.lora_bias }) activator = self.lora_injector(**injector_args) activator() return params, negation, is_injection_hybrid def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16): params = None negation = None lora_loader_args = self.get_lora_func_args( lora_path, use_lora, model, replace_modules, r, dropout, self.lora_bias ) if use_lora: params, negation, is_injection_hybrid = self.do_lora_injection( model, replace_modules, bias=self.lora_bias, lora_loader_args=lora_loader_args, dropout=dropout, r=r ) if not is_injection_hybrid: self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args) params = model if params is None else params return params, negation def deactivate_lora_train(self, models, deactivate=True): """ Usage: Use before and after sampling previews. Currently only available for Stable LoRA. """ if self.is_stable_lora(): set_mode_group(models, not deactivate) def save_cloneofsimo_lora(self, model, save_path, step): def save_lora(model, name, condition, replace_modules, step, save_path): if condition and replace_modules is not None: save_path = f"{save_path}/{step}_{name}.pt"
FILE_BASENAMES = ['unet', 'text_encoder'] LORA_FILE_TYPES = ['.pt', '.safetensors'] CLONE_OF_SIMO_KEYS = ['model', 'loras', 'target_replace_module', 'r'] STABLE_LORA_KEYS = ['model', 'target_module', 'search_class', 'r', 'dropout', 'lora_bias'] lora_versions = dict( stable_lora = "stable_lora", cloneofsimo = "cloneofsimo" ) lora_func_types = dict( loader = "loader", injector = "injector" ) lora_args = dict( model = None, loras = None, target_replace_module = [], target_module = [], r = 4, search_class = [torch.nn.Linear], dropout = 0, lora_bias = 'none' ) LoraVersions = SimpleNamespace(**lora_versions) LoraFuncTypes = SimpleNamespace(**lora_func_types) LORA_VERSIONS = [LoraVersions.stable_lora, LoraVersions.cloneofsimo] LORA_FUNC_TYPES = [LoraFuncTypes.loader, LoraFuncTypes.injector] def filter_dict(_dict, keys=[]): if len(keys) == 0: assert "Keys cannot empty for filtering return dict." for k in keys: if k not in lora_args.keys(): assert f"{k} does not exist in available LoRA arguments" return {k: v for k, v in _dict.items() if k in keys} class LoraHandler(object): def __init__( self, version: LORA_VERSIONS = LoraVersions.cloneofsimo, use_unet_lora: bool = False, use_text_lora: bool = False, save_for_webui: bool = False, only_for_webui: bool = False, lora_bias: str = 'none', unet_replace_modules: list = ['UNet3DConditionModel'], text_encoder_replace_modules: list = ['CLIPEncoderLayer'] ): self.version = version self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader) self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector) self.lora_bias = lora_bias self.use_unet_lora = use_unet_lora self.use_text_lora = use_text_lora self.save_for_webui = save_for_webui self.only_for_webui = only_for_webui self.unet_replace_modules = unet_replace_modules self.text_encoder_replace_modules = text_encoder_replace_modules self.use_lora = any([use_text_lora, use_unet_lora]) if self.use_lora: print(f"Using LoRA Version: {self.version}") def is_cloneofsimo_lora(self): return self.version == LoraVersions.cloneofsimo def is_stable_lora(self): return self.version == LoraVersions.stable_lora def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader): if self.is_cloneofsimo_lora(): if func_type == LoraFuncTypes.loader: return monkeypatch_or_replace_lora_extended if func_type == LoraFuncTypes.injector: return inject_trainable_lora_extended if self.is_stable_lora(): if func_type == LoraFuncTypes.loader: return load_lora if func_type == LoraFuncTypes.injector: return add_lora_to assert "LoRA Version does not exist." def check_lora_ext(self, lora_file: str): return lora_file.endswith(tuple(LORA_FILE_TYPES)) def get_lora_file_path( self, lora_path: str, model: Union[UNet3DConditionModel, CLIPTextModel] ): if os.path.exists(lora_path): lora_filenames = [fns for fns in os.listdir(lora_path)] is_lora = self.check_lora_ext(lora_path) is_unet = isinstance(model, UNet3DConditionModel) is_text = isinstance(model, CLIPTextModel) idx = 0 if is_unet else 1 base_name = FILE_BASENAMES[idx] for lora_filename in lora_filenames: is_lora = self.check_lora_ext(lora_filename) if not is_lora: continue if base_name in lora_filename: return os.path.join(lora_path, lora_filename) return None def handle_lora_load(self, file_name:str, lora_loader_args: dict = None): self.lora_loader(**lora_loader_args) print(f"Successfully loaded LoRA from: {file_name}") def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,): try: lora_file = self.get_lora_file_path(lora_path, model) if lora_file is not None: lora_loader_args.update({"lora_path": lora_file}) self.handle_lora_load(lora_file, lora_loader_args) else: print(f"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...") except Exception as e: print(f"An error occured while loading a LoRA file: {e}") def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias): return_dict = lora_args.copy() if self.is_cloneofsimo_lora(): return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS) return_dict.update({ "model": model, "loras": self.get_lora_file_path(lora_path, model), "target_replace_module": replace_modules, "r": r }) if self.is_stable_lora(): KEYS = ['model', 'lora_path'] return_dict = filter_dict(return_dict, KEYS) return_dict.update({'model': model, 'lora_path': lora_path}) return return_dict def do_lora_injection( self, model, replace_modules, bias='none', dropout=0, r=4, lora_loader_args=None, ): REPLACE_MODULES = replace_modules params = None negation = None is_injection_hybrid = False if self.is_cloneofsimo_lora(): is_injection_hybrid = True injector_args = lora_loader_args params, negation = self.lora_injector(**injector_args) for _up, _down in extract_lora_ups_down( model, target_replace_module=REPLACE_MODULES): if all(x is not None for x in [_up, _down]): print(f"Lora successfully injected into {model.__class__.__name__}.") break return params, negation, is_injection_hybrid if self.is_stable_lora(): injector_args = lora_args.copy() injector_args = filter_dict(injector_args, keys=STABLE_LORA_KEYS) SEARCH_CLASS = [torch.nn.Linear, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.Embedding] injector_args.update({ "model": model, "target_module": REPLACE_MODULES, "search_class": SEARCH_CLASS, "r": r, "dropout": dropout, "lora_bias": self.lora_bias }) activator = self.lora_injector(**injector_args) activator() return params, negation, is_injection_hybrid def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16): params = None negation = None lora_loader_args = self.get_lora_func_args( lora_path, use_lora, model, replace_modules, r, dropout, self.lora_bias ) if use_lora: params, negation, is_injection_hybrid = self.do_lora_injection( model, replace_modules, bias=self.lora_bias, lora_loader_args=lora_loader_args, dropout=dropout, r=r ) if not is_injection_hybrid: self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args) params = model if params is None else params return params, negation def deactivate_lora_train(self, models, deactivate=True): """ Usage: Use before and after sampling previews. Currently only available for Stable LoRA. """ if self.is_stable_lora(): set_mode_group(models, not deactivate) def save_cloneofsimo_lora(self, model, save_path, step): def save_lora(model, name, condition, replace_modules, step, save_path): if condition and replace_modules is not None: save_path = f"{save_path}/{step}_{name}.pt"
save_lora_weight(model, save_path, replace_modules)
5
2023-12-07 08:26:29+00:00
16k
octo-models/octo
scripts/finetune.py
[ { "identifier": "make_single_dataset", "path": "octo/data/dataset.py", "snippet": "def make_single_dataset(\n dataset_kwargs: dict,\n *,\n train: bool,\n traj_transform_kwargs: dict = {},\n frame_transform_kwargs: dict = {},\n) -> dl.DLataset:\n \"\"\"Creates a single dataset from kwargs. Returns a dataset of trajectories.\n\n Args:\n dataset_kwargs: kwargs passed to `make_dataset_from_rlds` that are dataset-specific.\n train: whether this is a training or validation dataset.\n traj_transform_kwargs: kwargs passed to 'apply_trajectory_transforms'.\n frame_transform_kwargs: kwargs passed to 'get_frame_transforms'.\n \"\"\"\n dataset, dataset_statistics = make_dataset_from_rlds(\n **dataset_kwargs,\n train=train,\n )\n dataset = apply_trajectory_transforms(dataset, **traj_transform_kwargs, train=train)\n dataset = apply_frame_transforms(dataset, **frame_transform_kwargs, train=train)\n\n # this seems to reduce memory usage without affecting speed\n dataset = dataset.with_ram_budget(1)\n\n # save for later\n dataset.dataset_statistics = dataset_statistics\n return dataset" }, { "identifier": "OctoModel", "path": "octo/model/octo_model.py", "snippet": "class OctoModel:\n \"\"\"Recommended way of interacting with Octo models.\n\n Usage for inference:\n\n >>> model = OctoModel.load_pretrained(checkpoint_dir)\n >>> tasks = model.create_tasks(texts=[\"go to the red room\"])\n >>> # or tasks = model.create_tasks(goals={\"image_primary\": goal_images})\n >>> actions = model.sample_actions(observations, tasks, rng=jax.random.PRNGKey(0))\n >>> # Note: these are normalized actions (processed to mean 0 and std 1). To get the raw actions,\n # un-normalize them using model.dataset_statistics\n\n Usage for finetuning:\n\n >>> model = OctoModel.load_pretrained(checkpoint_dir)\n >>> train_state = octo.utils.train_utils.TrainState.create(\n rng=jax.random.PRNGKey(0),\n model=model,\n tx=optax.adamw(...)\n )\n >>> # access params through train_state.model.params\n >>> train_state, metrics = your_update_function(train_state, batch)\n >>> # when it's time to save (note that this only saves the model parameters,\n >>> # not the full optimizer state)\n >>> train_state.model.save_pretrained(step, save_dir)\n\n Usage for pretraining:\n\n >>> model = OctoModel.from_config(\n config,\n example_batch,\n text_processor\n ) # initializes params\n >>> # Continue as in finetuning example\n\n See full usage examples in train.py and finetune.py.\n\n \"\"\"\n\n module: OctoModule = struct.field(pytree_node=False)\n text_processor: TextProcessor = struct.field(pytree_node=False)\n config: Config = struct.field(pytree_node=False)\n params: Params\n example_batch: Data\n dataset_statistics: Optional[Data]\n\n def create_tasks(\n self, goals: Optional[Data] = None, texts: Optional[Sequence[str]] = None\n ):\n \"\"\"Creates tasks dict from goals and texts.\n\n Args:\n goals: if not None, dict of arrays with shape (batch_size, *)\n texts: if not None, list of texts of length batch_size\n\n Omit images to run the language-conditioned model, and omit texts to run the\n goal-conditioned model.\n \"\"\"\n assert goals is not None or texts is not None\n tasks = {\"pad_mask_dict\": {}}\n if goals is not None:\n tasks.update(goals)\n tasks[\"pad_mask_dict\"].update(\n {k: np.ones(v.shape[:1], dtype=bool) for k, v in goals.items()}\n )\n else:\n batch_size = len(texts)\n tasks.update(\n {\n k: np.zeros((batch_size, *v.shape[1:]), dtype=v.dtype)\n for k, v in self.example_batch[\"task\"].items()\n if k not in (\"pad_mask_dict\", \"language_instruction\")\n }\n )\n tasks[\"pad_mask_dict\"].update(\n {\n k: np.zeros(batch_size, dtype=bool)\n for k in tasks.keys()\n if k != \"pad_mask_dict\"\n }\n )\n\n if texts is not None:\n assert self.text_processor is not None\n tasks[\"language_instruction\"] = texts\n tasks[\"pad_mask_dict\"][\"language_instruction\"] = np.ones(\n len(texts), dtype=bool\n )\n else:\n batch_size = jax.tree_leaves(goals)[0].shape[0]\n tasks[\"language_instruction\"] = [\"\"] * batch_size\n tasks[\"pad_mask_dict\"][\"language_instruction\"] = np.zeros(\n batch_size, dtype=bool\n )\n\n if self.text_processor is not None:\n tasks[\"language_instruction\"] = self.text_processor.encode(\n tasks[\"language_instruction\"]\n )\n else:\n del tasks[\"language_instruction\"]\n\n _verify_shapes(tasks, \"tasks\", self.example_batch[\"task\"], starting_dim=1)\n return tasks\n\n @partial(jax.jit, static_argnames=(\"train\",))\n def run_transformer(\n self, observations: Data, tasks: Data, pad_mask: ArrayLike, train: bool = False\n ):\n \"\"\"Runs the transformer, but does shape checking on the inputs.\n\n Args:\n observations: dictionary of arrays of shape (batch_size, window_size, *shape).\n Shape must be consistent with self.example_batch[\"observation\"]\n tasks: dict of tasks of shape (batch_size, *shape)\n Shape must be consistent with self.example_batch[\"task\"]\n pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding\n train: whether to run in train mode\n \"\"\"\n _verify_shapes(\n observations,\n \"observations\",\n self.example_batch[\"observation\"],\n starting_dim=2,\n )\n _verify_shapes(tasks, \"tasks\", self.example_batch[\"task\"], starting_dim=1)\n\n return self.module.apply(\n {\"params\": self.params},\n observations,\n tasks,\n pad_mask,\n train=train,\n method=\"octo_transformer\",\n )\n\n @partial(jax.jit, static_argnames=(\"train\", \"sample_shape\", \"argmax\"))\n def sample_actions(\n self,\n observations: Data,\n tasks: Data,\n pad_mask: Optional[ArrayLike] = None,\n train: bool = False,\n argmax: bool = False,\n sample_shape: Tuple[int, ...] = (),\n rng: Optional[PRNGKey] = None,\n temperature: float = 1.0,\n ):\n \"\"\"Samples actions from the model. See `action_heads.py` for more info.\n\n Args:\n observations: dictionary of arrays of shape (batch_size, window_size, *)\n tasks: dict of tasks of shape (batch_size, *)\n pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding\n train: whether to run in train mode\n ...see `action_heads.py` for the rest of the kwargs.\n Returns:\n actions: (*sample_shape, batch_size, pred_horizon, action_dim)\n \"\"\"\n if pad_mask is None:\n pad_mask = observations[\"pad_mask\"]\n\n transformer_outputs = self.run_transformer(\n observations, tasks, pad_mask, train=train\n )\n action_head: ActionHead = self.module.bind({\"params\": self.params}).heads[\n \"action\"\n ]\n return action_head.predict_action(\n transformer_outputs,\n train=train,\n argmax=argmax,\n sample_shape=sample_shape,\n rng=rng,\n temperature=temperature,\n )\n\n @classmethod\n def load_pretrained(\n cls,\n checkpoint_path: str,\n step: Optional[int] = None,\n ) -> \"OctoModel\":\n \"\"\"Loads a model from a checkpoint that was saved via `save_pretrained`.\n\n Args:\n checkpoint_path (str): A path to either a directory of checkpoints or a single checkpoint.\n step (int, optional): If multiple checkpoints are present, which one to load. Defaults to the latest.\n \"\"\"\n if checkpoint_path.startswith(\"hf://\"):\n if step:\n raise ValueError(\n \"You can't set config['pretrained_step'] when loading from HuggingFace.\"\n )\n checkpoint_path = _download_from_huggingface(\n checkpoint_path.removeprefix(\"hf://\")\n )\n\n # load config\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"config.json\"), \"r\"\n ) as f:\n config = json.load(f)\n\n # load example batch\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"example_batch.msgpack\"), \"rb\"\n ) as f:\n example_batch = flax.serialization.msgpack_restore(f.read())\n # shim for migrating from \"tasks\" to \"task\"\n if \"tasks\" in example_batch:\n example_batch[\"task\"] = example_batch.pop(\"tasks\")\n\n logging.debug(\n \"Model was trained with observations: %s\",\n flax.core.pretty_repr(\n jax.tree_map(jnp.shape, example_batch[\"observation\"])\n ),\n )\n logging.debug(\n \"Model was trained with tasks: %s\",\n flax.core.pretty_repr(jax.tree_map(jnp.shape, example_batch[\"task\"])),\n )\n\n # load dataset statistics\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"dataset_statistics.json\"), \"r\"\n ) as f:\n dataset_statistics = json.load(f)\n dataset_statistics = jax.tree_map(\n np.array, dataset_statistics, is_leaf=lambda x: not isinstance(x, dict)\n )\n\n # create model def (an OctoModule)\n module = OctoModule.create(**config[\"model\"])\n # infer params shape without actually doing any computation\n params_shape = jax.eval_shape(\n partial(module.init, train=False),\n jax.random.PRNGKey(0),\n example_batch[\"observation\"],\n example_batch[\"task\"],\n example_batch[\"observation\"][\"pad_mask\"],\n )[\"params\"]\n # restore params, checking to make sure the shape matches\n checkpointer = orbax.checkpoint.CheckpointManager(\n checkpoint_path, orbax.checkpoint.PyTreeCheckpointer()\n )\n step = step if step is not None else checkpointer.latest_step()\n params = checkpointer.restore(step, params_shape)\n\n if config[\"text_processor\"] is not None:\n text_processor = ModuleSpec.instantiate(config[\"text_processor\"])()\n else:\n text_processor = None\n\n return cls(\n module=module,\n params=params,\n text_processor=text_processor,\n example_batch=example_batch,\n config=config,\n dataset_statistics=dataset_statistics,\n )\n\n def save_pretrained(\n self,\n step: int,\n checkpoint_path: Optional[str] = None,\n checkpoint_manager: Optional[orbax.checkpoint.CheckpointManager] = None,\n ):\n \"\"\"Saves a model, as well as corresponding metadata needed for `load_pretrained`. Takes either a\n pre-existing checkpoint manager (which already knows where to save the checkpoint) or a path to a\n directory to save the checkpoint to.\n\n Args:\n step (int): Step number.\n checkpoint_path (str, optional): Path to save the checkpoint.\n checkpoint_manager (optional): Checkpoint manager to save the checkpoint.\n params (optional): Params to save. If None, uses self.params.\n \"\"\"\n if (checkpoint_path is None) == (checkpoint_manager is None):\n raise ValueError(\n \"Must provide exactly one of checkpoint_path or checkpoint_manager.\"\n )\n if checkpoint_manager is None:\n checkpoint_manager = orbax.checkpoint.CheckpointManager(\n checkpoint_path, orbax.checkpoint.PyTreeCheckpointer()\n )\n if checkpoint_path is None:\n checkpoint_path = str(checkpoint_manager._directory)\n\n # save params\n checkpoint_manager.save(\n step,\n self.params,\n {\"save_args\": orbax_utils.save_args_from_target(self.params)},\n )\n\n if jax.process_index() == 0:\n # save config\n config_path = tf.io.gfile.join(checkpoint_path, \"config.json\")\n if not tf.io.gfile.exists(config_path):\n with tf.io.gfile.GFile(config_path, \"w\") as f:\n json.dump(self.config, f)\n\n # save example batch\n example_batch_path = tf.io.gfile.join(\n checkpoint_path, \"example_batch.msgpack\"\n )\n if not tf.io.gfile.exists(example_batch_path):\n with tf.io.gfile.GFile(example_batch_path, \"wb\") as f:\n f.write(flax.serialization.msgpack_serialize(self.example_batch))\n\n # save dataset statistics\n dataset_statistics_path = tf.io.gfile.join(\n checkpoint_path, \"dataset_statistics.json\"\n )\n if not tf.io.gfile.exists(dataset_statistics_path):\n with tf.io.gfile.GFile(dataset_statistics_path, \"w\") as f:\n json.dump(\n jax.tree_map(lambda x: x.tolist(), self.dataset_statistics),\n f,\n )\n\n @classmethod\n def from_config(\n cls,\n config: Config,\n example_batch: Data,\n text_processor: Optional[Any] = None,\n verbose: bool = False,\n rng: Optional[PRNGKey] = None,\n dataset_statistics: Optional[Data] = None,\n ):\n \"\"\"Initializes a model with a fresh set of weights from a given config + example_batch.\n\n Args:\n config (Dict[str, Any]): Config dict. The only required key is \"model\", but other configuration\n may be saved for posterity.\n example_batch (Dict[str, Any]): Example batch.\n text_processor (Any, optional): Preprocessor for text inputs.\n verbose (bool, optional): Whether to print out a summary of the model.\n rng (Optional[PRNGKey], optional): RNG key for initializing the model.\n dataset_statistics (Optional[Dict[str, Any]], optional): Dataset statistics.\n \"\"\"\n module = OctoModule.create(**config[\"model\"])\n rng = rng if rng is not None else jax.random.PRNGKey(0)\n example_batch = multihost_utils.process_allgather(example_batch)\n example_batch = jax.tree_map(lambda x: x[:1], example_batch)\n\n init_args = (\n example_batch[\"observation\"],\n example_batch[\"task\"],\n example_batch[\"observation\"][\"pad_mask\"],\n )\n\n if verbose:\n print(\n module.tabulate(rng, *init_args, train=False, verbose=True, depth=2)\n ) # Prints out the parameter count of our model, and tokenizer details\n\n @jax.jit\n def _init(rng):\n return module.init(rng, *init_args, train=False)\n\n params = _init(rng)[\"params\"]\n\n return cls(\n module=module,\n params=params,\n text_processor=text_processor,\n example_batch=example_batch,\n config=config,\n dataset_statistics=dataset_statistics,\n )\n\n def get_pretty_spec(self):\n \"\"\"Brief summary of the model's expected inputs and outputs.\"\"\"\n # TODO: generalize this to print out proprio when it is being tokenized\n window_size = self.example_batch[\"observation\"][\"pad_mask\"].shape[1]\n\n observation_space = {\n k: (\"batch\", \"history_window\", *v.shape[2:])\n for k, v in self.example_batch[\"observation\"].items()\n if k.startswith(\"image\")\n }\n task_space = {\n k: (\"batch\", *v.shape[1:])\n for k, v in self.example_batch[\"task\"].items()\n if k.startswith(\"image\")\n }\n if self.text_processor is not None:\n task_space[\"language_instruction\"] = jax.tree_map(\n lambda arr: (\"batch\", *arr.shape[1:]),\n self.example_batch[\"task\"][\"language_instruction\"],\n )\n\n try:\n action_head = self.module.heads[\"action\"]\n action_head_repr = str(action_head.__class__)\n action_dim, pred_horizon = action_head.action_dim, action_head.pred_horizon\n except:\n action_head_repr, action_dim, pred_horizon = \"\", None, None\n\n return SPEC_TEMPLATE.format(\n window_size=window_size,\n observation_space=flax.core.pretty_repr(observation_space),\n task_space=flax.core.pretty_repr(task_space),\n action_head_repr=action_head_repr,\n action_dim=action_dim,\n pred_horizon=pred_horizon,\n )" }, { "identifier": "initialize_compilation_cache", "path": "octo/utils/jax_utils.py", "snippet": "def initialize_compilation_cache(\n cache_dir=os.path.expanduser(\"~/.jax_compilation_cache\"),\n):\n \"\"\"Initializes the Jax persistent compilation cache.\"\"\"\n compilation_cache.initialize_cache(cache_dir)\n for logger in [logging.getLogger(name) for name in logging.root.manager.loggerDict]:\n logger.addFilter(\n lambda record: \"Not writing persistent cache entry for\"\n not in record.getMessage()\n )" }, { "identifier": "ModuleSpec", "path": "octo/utils/spec.py", "snippet": "class ModuleSpec(TypedDict):\n \"\"\"A JSON-serializable representation of a function or class with some default args and kwargs to pass to\n it. Useful for specifying a particular class or function in a config file, while keeping it serializable\n and overridable from the command line using ml_collections.\n\n Usage:\n\n # Preferred way to create a spec:\n >>> from octo.model.components.transformer import Transformer\n >>> spec = ModuleSpec.create(Transformer, num_layers=3)\n # Same as above using the fully qualified import string:\n >>> spec = ModuleSpec.create(\"octo.model.components.transformer:Transformer\", num_layers=3)\n\n # Usage:\n >>> ModuleSpec.instantiate(spec) == partial(Transformer, num_layers=3)\n # can pass additional kwargs at instantiation time\n >>> transformer = ModuleSpec.instantiate(spec, num_heads=8)\n\n Note: ModuleSpec is just an alias for a dictionary (that is strongly typed), not a real class. So from\n your code's perspective, it is just a dictionary.\n\n module (str): The module the callable is located in\n name (str): The name of the callable in the module\n args (tuple): The args to pass to the callable\n kwargs (dict): The kwargs to pass to the callable\n \"\"\"\n\n module: str\n name: str\n args: Tuple[Any, ...]\n kwargs: Dict[str, Any]\n\n @staticmethod\n def create(callable_or_full_name: Union[str, callable], *args, **kwargs) -> \"ModuleSpec\": # type: ignore\n \"\"\"Create a module spec from a callable or import string.\n\n Args:\n callable_or_full_name (str or object): Either the object itself or a fully qualified import string\n (e.g. \"octo.model.components.transformer:Transformer\")\n args (tuple, optional): Passed into callable upon instantiation.\n kwargs (dict, optional): Passed into callable upon instantiation.\n \"\"\"\n if isinstance(callable_or_full_name, str):\n assert callable_or_full_name.count(\":\") == 1, (\n \"If passing in a string, it must be a fully qualified import string \"\n \"(e.g. 'octo.model.components.transformer:Transformer')\"\n )\n module, name = callable_or_full_name.split(\":\")\n else:\n module, name = _infer_full_name(callable_or_full_name)\n\n return ModuleSpec(module=module, name=name, args=args, kwargs=kwargs)\n\n @staticmethod\n def instantiate(spec: \"ModuleSpec\"): # type: ignore\n if set(spec.keys()) != {\"module\", \"name\", \"args\", \"kwargs\"}:\n raise ValueError(\n f\"Expected ModuleSpec, but got {spec}. \"\n \"ModuleSpec must have keys 'module', 'name', 'args', and 'kwargs'.\"\n )\n cls = _import_from_string(spec[\"module\"], spec[\"name\"])\n return partial(cls, *spec[\"args\"], **spec[\"kwargs\"])" }, { "identifier": "RolloutVisualizationCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class RolloutVisualizationCallback(Callback):\n visualizer_kwargs_list: Sequence[Mapping[str, Any]]\n text_processor: TextProcessor\n trajs_for_rollouts: int\n model_pred_horizon: int\n history_length: int\n modes_to_evaluate: str = (\"text_conditioned\", \"image_conditioned\")\n\n def __post_init__(self):\n self.zero_text = jax.tree_map(lambda x: x[0], self.text_processor.encode(\"\"))\n\n self.rollout_visualizers = [\n RolloutVisualizer(\n text_processor=self.text_processor,\n history_length=self.history_length,\n action_chunk=self.model_pred_horizon\n if \"pred_horizon\" not in kwargs\n else kwargs[\"pred_horizon\"],\n **kwargs,\n )\n for kwargs in self.visualizer_kwargs_list\n ]\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n modal_policy_fns = {\n mode: partial(\n get_policy_sampled_actions,\n train_state,\n zero_text=self.zero_text,\n samples_per_state=1,\n policy_mode=mode,\n )\n for mode in self.modes_to_evaluate\n }\n for rollout_visualizer in self.rollout_visualizers:\n for mode, policy_fn in modal_policy_fns.items():\n logging.info(f\"Running rollouts for {rollout_visualizer.env_name}\")\n rollout_infos = rollout_visualizer.run_rollouts(\n policy_fn, n_rollouts=self.trajs_for_rollouts\n )\n wandb_metrics[\n f\"rollouts_{rollout_visualizer.env_name}_chunk{rollout_visualizer.action_chunk}/{mode}\"\n ] = rollout_infos\n\n return wandb_metrics" }, { "identifier": "SaveCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class SaveCallback(Callback):\n \"\"\"Callback that saves checkpoints to `save_dir`. If `save_dir` is None, does nothing.\"\"\"\n\n save_dir: Optional[str]\n\n def __post_init__(self):\n if self.save_dir is not None:\n if not self.save_dir.startswith(\"gs://\"):\n self.save_dir = os.path.abspath(self.save_dir)\n if jax.process_index() == 0:\n tf.io.gfile.makedirs(self.save_dir)\n logging.info(f\"Created {self.save_dir}\")\n # make checkpointers\n # only keep latest full TrainState\n self.state_checkpointer = orbax.checkpoint.CheckpointManager(\n tf.io.gfile.join(self.save_dir, \"state\"),\n orbax.checkpoint.PyTreeCheckpointer(),\n options=orbax.checkpoint.CheckpointManagerOptions(\n max_to_keep=1,\n ),\n )\n # keep every params checkpoint\n self.params_checkpointer = orbax.checkpoint.CheckpointManager(\n self.save_dir,\n orbax.checkpoint.PyTreeCheckpointer(),\n )\n\n def __call__(self, train_state: TrainState, step: int):\n if self.save_dir is not None:\n train_state.model.save_pretrained(\n step, checkpoint_manager=self.params_checkpointer\n )\n self.state_checkpointer.save(\n step,\n train_state,\n {\"save_args\": orbax_utils.save_args_from_target(train_state)},\n )" }, { "identifier": "ValidationCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class ValidationCallback(Callback):\n loss_fn: Callable\n process_batch_fn: Callable[[Data], Data]\n text_processor: Optional[TextProcessor]\n val_dataset_kwargs_list: Sequence[Mapping[str, Any]]\n dataset_kwargs: Mapping[str, Any]\n val_shuffle_buffer_size: int\n num_val_batches: int\n modes_to_evaluate: Sequence[str] = (\"text_conditioned\", \"image_conditioned\")\n train: bool = False\n\n def __post_init__(self):\n if self.text_processor is not None:\n self.zero_text = jax.tree_map(\n lambda x: x[0], self.text_processor.encode(\"\")\n )\n self.val_iterators = {}\n for single_dataset_kwargs in self.val_dataset_kwargs_list:\n val_dataset = create_validation_dataset(\n single_dataset_kwargs,\n self.dataset_kwargs[\"traj_transform_kwargs\"],\n self.dataset_kwargs[\"frame_transform_kwargs\"],\n train=self.train,\n )\n val_iterator = (\n val_dataset.unbatch()\n .shuffle(self.val_shuffle_buffer_size)\n .repeat()\n .batch(self.dataset_kwargs[\"batch_size\"])\n .iterator(prefetch=0)\n )\n val_iterator = map(self.process_batch_fn, val_iterator)\n self.val_iterators[single_dataset_kwargs[\"name\"]] = val_iterator\n\n @partial(\n jax.jit,\n out_shardings=jax.sharding.PositionalSharding(jax.devices()).replicate(),\n )\n def eval_step(state: TrainState, batch: Data):\n loss_fn_partial = partial(\n self.loss_fn,\n params=state.model.params,\n rng=state.rng,\n train=False,\n )\n all_tasks = {}\n\n if \"base\" in self.modes_to_evaluate:\n all_tasks[\"base\"] = batch[\"task\"]\n if \"image_conditioned\" in self.modes_to_evaluate:\n all_tasks[\"image_conditioned\"] = remove_text(\n batch[\"task\"], self.zero_text\n )\n if \"text_conditioned\" in self.modes_to_evaluate:\n all_tasks[\"text_conditioned\"] = remove_images(batch[\"task\"])\n\n if \"unconditioned\" in self.modes_to_evaluate:\n all_tasks[\"unconditioned\"] = remove_text(\n remove_images(batch[\"task\"]), self.zero_text\n )\n return {\n k: loss_fn_partial(batch=flax.core.copy(batch, {\"task\": tasks}))[1]\n for k, tasks in all_tasks.items()\n }\n\n self.eval_step = eval_step\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n for name, val_data_iter in self.val_iterators.items():\n metrics = []\n for _, batch in tqdm.tqdm(\n zip(range(self.num_val_batches), val_data_iter),\n total=self.num_val_batches,\n desc=name,\n ):\n metrics.append(self.eval_step(train_state, batch))\n metrics = jax.tree_map(lambda *xs: np.mean(xs), *metrics)\n wandb_metrics[f\"validation_{name}\"] = metrics\n return wandb_metrics" }, { "identifier": "VisualizationCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class VisualizationCallback(Callback):\n text_processor: TextProcessor\n val_dataset_kwargs_list: Sequence[Mapping[str, Any]]\n dataset_kwargs: Mapping[str, Any]\n eval_batch_size: int\n trajs_for_metrics: int\n trajs_for_viz: int\n samples_per_state: int\n modes_to_evaluate: str = (\"text_conditioned\", \"image_conditioned\")\n train: bool = False\n\n def __post_init__(self):\n self.zero_text = jax.tree_map(lambda x: x[0], self.text_processor.encode(\"\"))\n\n self.visualizers = {}\n for single_dataset_kwargs in self.val_dataset_kwargs_list:\n val_dataset = create_validation_dataset(\n single_dataset_kwargs,\n self.dataset_kwargs[\"traj_transform_kwargs\"],\n self.dataset_kwargs[\"frame_transform_kwargs\"],\n train=self.train,\n )\n self.visualizers[single_dataset_kwargs[\"name\"]] = Visualizer(\n val_dataset, text_processor=self.text_processor, freeze_trajs=False\n )\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n modal_policy_fns = {\n mode: batched_apply(\n partial(\n get_policy_sampled_actions,\n train_state,\n zero_text=self.zero_text,\n samples_per_state=self.samples_per_state,\n policy_mode=mode,\n ),\n self.eval_batch_size,\n )\n for mode in self.modes_to_evaluate\n }\n\n for name, visualizer in self.visualizers.items():\n for mode, policy_fn in modal_policy_fns.items():\n if self.trajs_for_metrics > 0:\n raw_infos = visualizer.raw_evaluations(\n policy_fn, max_trajs=self.trajs_for_metrics\n )\n metrics = visualizer.metrics_for_wandb(raw_infos)\n wandb_metrics[f\"offline_metrics_{name}/{mode}\"] = metrics\n if self.trajs_for_viz > 0:\n images = visualizer.visualize_for_wandb(\n policy_fn, max_trajs=self.trajs_for_viz\n )\n wandb_metrics[f\"visualizations_{name}/{mode}\"] = images\n return wandb_metrics" }, { "identifier": "check_config_diff", "path": "octo/utils/train_utils.py", "snippet": "def check_config_diff(new_conf: Config, old_conf: Config, silent: bool = False):\n \"\"\"Checks for differences between new config and old config dicts.\"\"\"\n new_conf_flat = flax.traverse_util.flatten_dict(\n new_conf.to_dict() if isinstance(new_conf, ConfigDict) else new_conf\n )\n old_conf_flat = flax.traverse_util.flatten_dict(\n old_conf.to_dict() if isinstance(old_conf, ConfigDict) else old_conf\n )\n\n # check for missing / new keys\n if set(new_conf_flat.keys()) != set(old_conf_flat.keys()) and not silent:\n logging.info(\n \"New config contains extra items: %s\",\n set(new_conf_flat.keys()) - set(old_conf_flat.keys()),\n )\n logging.info(\n \"New config doesn't contain items: %s\",\n set(old_conf_flat.keys()) - set(new_conf_flat.keys()),\n )\n\n # print differing key values\n mismatched_keys = {\n k: (new_conf_flat[k], old_conf_flat[k])\n for k in new_conf_flat\n if k in old_conf_flat and new_conf_flat[k] != old_conf_flat[k]\n }\n if mismatched_keys and not silent:\n logging.info(\n \"New config contains keys with new values: %s\",\n flax.core.pretty_repr(mismatched_keys),\n )\n return mismatched_keys or (set(new_conf_flat.keys()) != set(old_conf_flat.keys()))" }, { "identifier": "create_optimizer", "path": "octo/utils/train_utils.py", "snippet": "def create_optimizer(\n params_or_params_shape: Params, **kwargs: dict\n) -> optax.GradientTransformation:\n \"\"\"Creates optimizer for Octo.\n\n kwargs are the kwargs for optax.adamw; if the \"learning_rate\" key is a dict, it is interpreted\n as the kwargs for create_lr_schedule (see above), otherwise it is interpreted as a constant\n learning rate.\n\n If clip_gradient is specified, then gradient clipping is applied. If frozen_keys is specified,\n then those parameters are frozen (i.e. not updated) during training.\n\n Returns:\n tx: an Optax optimizer\n lr_callable: Function that takes the current step and returns the learning rate\n \"\"\"\n if isinstance(kwargs[\"learning_rate\"], dict):\n lr_callable = create_lr_schedule(**kwargs[\"learning_rate\"])\n else:\n lr_callable = lambda _: kwargs[\"learning_rate\"]\n kwargs[\"learning_rate\"] = lr_callable\n\n # Following ViT, timm, MAE: this mask skips weight decay on biases and LayerNorm parameters\n wd_mask = jax.tree_util.tree_map_with_path(\n lambda path, x: \"kernel\" in jax.tree_util.keystr(path), params_or_params_shape\n )\n\n clip_gradient = kwargs.pop(\"clip_gradient\", None)\n frozen_keys = kwargs.pop(\"frozen_keys\", None)\n grad_accumulation_steps = kwargs.pop(\"grad_accumulation_steps\", None)\n\n tx = optax.adamw(mu_dtype=jnp.bfloat16, **kwargs, mask=wd_mask)\n if grad_accumulation_steps:\n tx = optax.MultiSteps(tx, grad_accumulation_steps)\n if clip_gradient is not None:\n tx = optax.chain(\n optax.clip_by_global_norm(clip_gradient),\n tx,\n )\n\n if frozen_keys:\n tx, param_partitions = freeze_weights(\n tx, params_or_params_shape, frozen_keys, return_partitions=True\n )\n zero_frozen_params = lambda params: jax.tree_map(\n lambda x, y: x if y == \"trainable\" else jnp.zeros(()),\n params,\n param_partitions,\n )\n param_norm_callable = lambda params: optax.global_norm(\n zero_frozen_params(params)\n )\n else:\n param_norm_callable = optax.global_norm\n\n return tx, lr_callable, param_norm_callable" }, { "identifier": "format_name_with_config", "path": "octo/utils/train_utils.py", "snippet": "def format_name_with_config(name, config):\n \"\"\"Formats a name string with a config dict.\n\n Formatting keys may be specified as {key} or {full_path_to_key_with_underscores}.\n\n Example:\n name = \"model_{model_type}_{model_size}\"\n config = {\"model_type\": \"transformer\", \"model_size\": \"small\"}\n format_name_with_config(name, config) -> \"model_transformer_small\"\n \"\"\"\n config_flat = flax.traverse_util.flatten_dict(config, sep=\"_\")\n config_final = {k.split(\"_\")[-1]: v for k, v in config_flat.items()}\n format_dict = {**config_final, **config_flat}\n return name.format(**format_dict)" }, { "identifier": "merge_params", "path": "octo/utils/train_utils.py", "snippet": "def merge_params(target_params: Params, pretrained_params: Params) -> Params:\n \"\"\"Copies pre-trained params into target_params for every param that has corresponding key + shape.\"\"\"\n flat_target_params = flax.traverse_util.flatten_dict(target_params)\n flat_pretrained_params = flax.traverse_util.flatten_dict(pretrained_params)\n keys_to_update = [\n k\n for k in flat_target_params\n if k in flat_pretrained_params\n and flat_target_params[k].shape == flat_pretrained_params[k].shape\n ]\n missing_keys = [k for k in flat_target_params if k not in flat_pretrained_params]\n shape_mismatch_keys = [\n k\n for k in flat_target_params\n if k in flat_pretrained_params\n and flat_target_params[k].shape != flat_pretrained_params[k].shape\n ]\n\n for key in keys_to_update:\n logging.debug(f\"Param copied from pre-trained: {'.'.join(key)}\")\n if missing_keys or shape_mismatch_keys:\n logging.info(\"########## Parameters skipped during model loading: ##########\")\n for key in missing_keys:\n logging.info(\n f\"Param missing in pre-trained model, skipping: {'.'.join(key)}\"\n )\n for key in shape_mismatch_keys:\n logging.info(\n f\"Param with differing shape in pre-trained model, skipping: {'.'.join(key)}\"\n )\n\n flat_target_params = flax.core.copy(\n flat_target_params, {k: flat_pretrained_params[k] for k in keys_to_update}\n )\n target_params = flax.traverse_util.unflatten_dict(flat_target_params)\n return target_params" }, { "identifier": "process_text", "path": "octo/utils/train_utils.py", "snippet": "def process_text(batch: Data, text_processor: Optional[TextProcessor]) -> Data:\n \"\"\"Encodes the language instruction inside the tasks for a batch.\n\n If the text processor is None, removes language entirely from the tasks.\n Expects batch to be a nested dictionary, where\n batch[\"task\"][\"language_instruction\"] is a sequence of byte strings\n \"\"\"\n if text_processor is None:\n batch[\"task\"].pop(\"language_instruction\")\n else:\n batch[\"task\"][\"language_instruction\"] = text_processor.encode(\n [s.decode(\"utf-8\") for s in batch[\"task\"][\"language_instruction\"]]\n )\n return batch" }, { "identifier": "Timer", "path": "octo/utils/train_utils.py", "snippet": "class Timer:\n \"\"\"\n Timer utility. Usage:\n\n timer = Timer()\n with timer(\"foo\"):\n do_something()\n\n timer.tick(\"bar\")\n do_something_else()\n timer.tock(\"bar\")\n\n timer.get_average_times() -> {\"foo\": 0.1, \"bar\": 0.2}\n \"\"\"\n\n def __init__(self):\n self.reset()\n\n @contextmanager\n def __call__(self, key):\n self.tick(key)\n try:\n yield None\n finally:\n self.tock(key)\n\n def reset(self):\n self.counts = defaultdict(int)\n self.times = defaultdict(float)\n self.start_times = {}\n\n def tick(self, key):\n if key in self.start_times:\n raise ValueError(f\"Timer is already ticking for key: {key}\")\n self.start_times[key] = time.time()\n\n def tock(self, key):\n if key not in self.start_times:\n raise ValueError(f\"Timer is not ticking for key: {key}\")\n self.counts[key] += 1\n self.times[key] += time.time() - self.start_times[key]\n del self.start_times[key]\n\n def get_average_times(self, reset=True):\n ret = {key: self.times[key] / self.counts[key] for key in self.counts}\n if reset:\n self.reset()\n return ret" }, { "identifier": "TrainState", "path": "octo/utils/train_utils.py", "snippet": "class TrainState:\n rng: PRNGKey\n model: OctoModel\n step: int\n opt_state: optax.OptState\n tx: optax.GradientTransformation = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n rng: PRNGKey,\n model: OctoModel,\n tx: optax.GradientTransformation,\n ):\n opt_state = tx.init(model.params)\n return cls(\n rng=rng,\n model=model,\n step=0,\n opt_state=opt_state,\n tx=tx,\n )\n\n def apply_gradients(self, *, grads, rng):\n updates, new_opt_state = self.tx.update(\n grads, self.opt_state, self.model.params\n )\n new_params = optax.apply_updates(self.model.params, updates)\n\n return self.replace(\n step=self.step + 1,\n model=self.model.replace(params=new_params),\n opt_state=new_opt_state,\n rng=rng,\n )" } ]
import datetime import imp import os import flax import jax import optax import tensorflow as tf import tqdm import wandb from functools import partial from absl import app, flags, logging from flax.traverse_util import flatten_dict from jax.sharding import Mesh, NamedSharding, PartitionSpec from ml_collections import config_flags, ConfigDict from octo.data.dataset import make_single_dataset from octo.model.octo_model import OctoModel from octo.utils.jax_utils import initialize_compilation_cache from octo.utils.spec import ModuleSpec from octo.utils.train_callbacks import ( RolloutVisualizationCallback, SaveCallback, ValidationCallback, VisualizationCallback, ) from octo.utils.train_utils import ( check_config_diff, create_optimizer, format_name_with_config, merge_params, process_text, Timer, TrainState, ) from jax_smi import initialise_tracking # type: ignore
11,508
tx=tx, rng=rng, ) ######### # # Save all metadata # ######### if FLAGS.config.save_dir is not None: save_dir = tf.io.gfile.join( FLAGS.config.save_dir, FLAGS.config.wandb.project, FLAGS.config.wandb.group or "", wandb_id, ) wandb.config.update(dict(save_dir=save_dir), allow_val_change=True) logging.info("Saving to %s", save_dir) save_callback = SaveCallback(save_dir) # Add window_size to top of config, to make eval easier new_config = ConfigDict(model.config) new_config["window_size"] = example_batch["observation"]["pad_mask"].shape[1] model = model.replace(config=new_config) # Save finetuning config since it's not saved by SaveCallback, i.e. as part of model.save_pretrained() with open( tf.io.gfile.join(save_dir, "finetune_config.json"), "w" ) as config_file: config_file.write(FLAGS.config.to_json_best_effort()) else: save_dir = None save_callback = SaveCallback(None) logging.warning("save_dir not passed in, not saving checkpoints") example_batch_spec = jax.tree_map( lambda arr: (arr.shape, str(arr.dtype)), example_batch ) wandb.config.update( dict(example_batch_spec=example_batch_spec), allow_val_change=True ) ######### # # Define loss, train_step, and eval_step # ######### def loss_fn(params, batch, rng, train=True): bound_module = model.module.bind({"params": params}, rngs={"dropout": rng}) transformer_embeddings = bound_module.octo_transformer( batch["observation"], batch["task"], batch["observation"]["pad_mask"], train=train, ) action_loss, action_metrics = bound_module.heads["action"].loss( transformer_embeddings, # Action head knows to pull out the action readout_key batch["action"], pad_mask=batch["observation"]["pad_mask"], train=train, ) return action_loss, action_metrics # Data parallelism # Model is replicated across devices, data is split across devices @partial( jax.jit, in_shardings=[replicated_sharding, dp_sharding], ) def train_step(state, batch): rng, dropout_rng = jax.random.split(state.rng) (loss, info), grads = jax.value_and_grad(loss_fn, has_aux=True)( state.model.params, batch, dropout_rng, train=True ) # Gradient Metrics (TODO: Does the finetuner need these?) ### grad_norm = optax.global_norm(grads) updates, _ = state.tx.update(grads, state.opt_state, state.model.params) update_norm = optax.global_norm(updates) info.update( { "grad_norm": grad_norm, "update_norm": update_norm, "param_norm": param_norm_callable(state.model.params), "learning_rate": lr_callable(state.step), } ) # End Debug Metrics # new_state = state.apply_gradients(grads=grads, rng=rng) return new_state, info ######### # # Build validation & visualization callbacks # ######### if FLAGS.config.modality == "image_conditioned": modes_to_evaluate = ["image_conditioned"] elif FLAGS.config.modality == "text_conditioned": modes_to_evaluate = ["text_conditioned"] elif FLAGS.config.modality == "multimodal": modes_to_evaluate = ["image_conditioned", "text_conditioned"] else: modes_to_evaluate = ["base"] dataset_kwargs_list = [FLAGS.config.dataset_kwargs] val_callback = ValidationCallback( loss_fn=loss_fn, process_batch_fn=process_batch, text_processor=text_processor, val_dataset_kwargs_list=dataset_kwargs_list, dataset_kwargs=FLAGS.config, modes_to_evaluate=modes_to_evaluate, **FLAGS.config.val_kwargs, )
try: initialise_tracking() except ImportError: pass FLAGS = flags.FLAGS flags.DEFINE_string("name", "experiment", "Experiment name.") flags.DEFINE_bool("debug", False, "Debug config (no wandb logging)") default_config_file = os.path.join( os.path.dirname(__file__), "configs/finetune_config.py" ) config_flags.DEFINE_config_file( "config", default_config_file, "File path to the training hyperparameter configuration.", lock_config=False, ) def main(_): initialize_compilation_cache() devices = jax.devices() logging.info( f""" Octo Finetuning Script ====================== Pretrained model: {FLAGS.config.pretrained_path} Finetuning Dataset: {FLAGS.config.dataset_kwargs.name} Data dir: {FLAGS.config.dataset_kwargs.data_dir} Task Modality: {FLAGS.config.modality} Finetuning Mode: {FLAGS.config.finetuning_mode} # Devices: {jax.device_count()} Batch size: {FLAGS.config.batch_size} ({FLAGS.config.batch_size // len(devices) } per device) # Steps: {FLAGS.config.num_steps} """ ) ######### # # Setup Jax Data Parallelism # ######### assert ( FLAGS.config.batch_size % len(devices) == 0 ), f"Batch size ({FLAGS.config.batch_size}) must be divisible by the number of devices ({len(devices)})" assert ( FLAGS.config.viz_kwargs.eval_batch_size % len(devices) == 0 ), f"Eval batch size ({FLAGS.config.viz_kwargs.eval_batch_size}) must be divisible by the number of devices ({len(devices)})" # create a 1D mesh with a single axis named "batch" mesh = Mesh(jax.devices(), axis_names="batch") # Our batches will be data-parallel sharded -- each device will get a slice of the batch dp_sharding = NamedSharding(mesh, PartitionSpec("batch")) # Our model will be replicated across devices (we are only doing data parallelism, not model parallelism) replicated_sharding = NamedSharding(mesh, PartitionSpec()) # prevent tensorflow from using GPU memory since it's only used for data loading tf.config.set_visible_devices([], "GPU") ######### # # Setup WandB # ######### name = format_name_with_config( FLAGS.name, FLAGS.config.to_dict(), ) wandb_id = "{name}_{time}".format( name=name, time=datetime.datetime.now().strftime("%Y%m%d_%H%M%S"), ) wandb.init( config=FLAGS.config.to_dict(), id=wandb_id, name=name, mode="disabled" if FLAGS.debug else None, **FLAGS.config.wandb, ) ######### # # Load Pretrained model + optionally modify config # ######### pretrained_model = OctoModel.load_pretrained( FLAGS.config.pretrained_path, step=FLAGS.config.pretrained_step, ) flat_config = flax.traverse_util.flatten_dict( pretrained_model.config, keep_empty_nodes=True ) for d_key in flax.traverse_util.flatten_dict( FLAGS.config.get("config_delete_keys", ConfigDict()).to_dict() ): for c_key in list(flat_config.keys()): if ".".join(c_key).startswith(".".join(d_key)): del flat_config[c_key] config = ConfigDict(flax.traverse_util.unflatten_dict(flat_config)) config.update(FLAGS.config.get("update_config", ConfigDict())) config = config.to_dict() check_config_diff(config, pretrained_model.config) ######### # # Setup Data Loader # ######### # create text processor if config["text_processor"] is None: text_processor = None else: text_processor = ModuleSpec.instantiate(config["text_processor"])() def process_batch(batch): batch = process_text(batch, text_processor) del batch["dataset_name"] return batch # load standardize_fn from `path/to/file.py:fn_name` format if ( standardize_fn := FLAGS.config["dataset_kwargs"].get("standardize_fn", None) ) is not None: path, name = standardize_fn.split(":") # imp is deprecated, but it's also what ml_collections uses standardize_fn = getattr(imp.load_source("standardize_fn", path), name) del FLAGS.config["dataset_kwargs"]["standardize_fn"] FLAGS.config["dataset_kwargs"]["standardize_fn"] = standardize_fn dataset = make_single_dataset( FLAGS.config.dataset_kwargs, traj_transform_kwargs=FLAGS.config.traj_transform_kwargs, frame_transform_kwargs=FLAGS.config.frame_transform_kwargs, train=True, ) train_data_iter = ( dataset.repeat() .unbatch() .shuffle(FLAGS.config.shuffle_buffer_size) .batch(FLAGS.config.batch_size) .iterator() ) train_data_iter = map(process_batch, train_data_iter) example_batch = next(train_data_iter) ######### # # Load Pretrained Model # ######### rng = jax.random.PRNGKey(FLAGS.config.seed) rng, init_rng = jax.random.split(rng) model = OctoModel.from_config( config, example_batch, text_processor, rng=init_rng, dataset_statistics=dataset.dataset_statistics, ) merged_params = merge_params(model.params, pretrained_model.params) model = model.replace(params=merged_params) del pretrained_model ######### # # Setup Optimizer and Train State # ######### params = model.params if FLAGS.config.optimizer.frozen_keys is None: FLAGS.config.optimizer.frozen_keys = model.config["optimizer"]["frozen_keys"] tx, lr_callable, param_norm_callable = create_optimizer( params, **FLAGS.config.optimizer.to_dict(), ) train_state = TrainState.create( model=model, tx=tx, rng=rng, ) ######### # # Save all metadata # ######### if FLAGS.config.save_dir is not None: save_dir = tf.io.gfile.join( FLAGS.config.save_dir, FLAGS.config.wandb.project, FLAGS.config.wandb.group or "", wandb_id, ) wandb.config.update(dict(save_dir=save_dir), allow_val_change=True) logging.info("Saving to %s", save_dir) save_callback = SaveCallback(save_dir) # Add window_size to top of config, to make eval easier new_config = ConfigDict(model.config) new_config["window_size"] = example_batch["observation"]["pad_mask"].shape[1] model = model.replace(config=new_config) # Save finetuning config since it's not saved by SaveCallback, i.e. as part of model.save_pretrained() with open( tf.io.gfile.join(save_dir, "finetune_config.json"), "w" ) as config_file: config_file.write(FLAGS.config.to_json_best_effort()) else: save_dir = None save_callback = SaveCallback(None) logging.warning("save_dir not passed in, not saving checkpoints") example_batch_spec = jax.tree_map( lambda arr: (arr.shape, str(arr.dtype)), example_batch ) wandb.config.update( dict(example_batch_spec=example_batch_spec), allow_val_change=True ) ######### # # Define loss, train_step, and eval_step # ######### def loss_fn(params, batch, rng, train=True): bound_module = model.module.bind({"params": params}, rngs={"dropout": rng}) transformer_embeddings = bound_module.octo_transformer( batch["observation"], batch["task"], batch["observation"]["pad_mask"], train=train, ) action_loss, action_metrics = bound_module.heads["action"].loss( transformer_embeddings, # Action head knows to pull out the action readout_key batch["action"], pad_mask=batch["observation"]["pad_mask"], train=train, ) return action_loss, action_metrics # Data parallelism # Model is replicated across devices, data is split across devices @partial( jax.jit, in_shardings=[replicated_sharding, dp_sharding], ) def train_step(state, batch): rng, dropout_rng = jax.random.split(state.rng) (loss, info), grads = jax.value_and_grad(loss_fn, has_aux=True)( state.model.params, batch, dropout_rng, train=True ) # Gradient Metrics (TODO: Does the finetuner need these?) ### grad_norm = optax.global_norm(grads) updates, _ = state.tx.update(grads, state.opt_state, state.model.params) update_norm = optax.global_norm(updates) info.update( { "grad_norm": grad_norm, "update_norm": update_norm, "param_norm": param_norm_callable(state.model.params), "learning_rate": lr_callable(state.step), } ) # End Debug Metrics # new_state = state.apply_gradients(grads=grads, rng=rng) return new_state, info ######### # # Build validation & visualization callbacks # ######### if FLAGS.config.modality == "image_conditioned": modes_to_evaluate = ["image_conditioned"] elif FLAGS.config.modality == "text_conditioned": modes_to_evaluate = ["text_conditioned"] elif FLAGS.config.modality == "multimodal": modes_to_evaluate = ["image_conditioned", "text_conditioned"] else: modes_to_evaluate = ["base"] dataset_kwargs_list = [FLAGS.config.dataset_kwargs] val_callback = ValidationCallback( loss_fn=loss_fn, process_batch_fn=process_batch, text_processor=text_processor, val_dataset_kwargs_list=dataset_kwargs_list, dataset_kwargs=FLAGS.config, modes_to_evaluate=modes_to_evaluate, **FLAGS.config.val_kwargs, )
viz_callback = VisualizationCallback(
7
2023-12-13 09:58:56+00:00
16k
modelscope/richdreamer
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from einops import rearrange, repeat from functools import partial from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import (AutoencoderKL, IdentityFirstStage, VQModelInterface,) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like,) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl,) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import (count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat,)
12,558
sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised )
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised )
noise = noise_like(x.shape, device, repeat_noise)
7
2023-12-06 07:53:11+00:00
16k
rehg-lab/RAVE
annotator/oneformer/detectron2/export/caffe2_patch.py
[ { "identifier": "poolers", "path": "annotator/oneformer/detectron2/modeling/poolers.py", "snippet": "def assign_boxes_to_levels(\r\n box_lists: List[Boxes],\r\n min_level: int,\r\n max_level: int,\r\n canonical_box_size: int,\r\n canonical_level: int,\r\n):\r\ndef _convert_boxes_to_pooler_format(boxes: torch.Tensor, sizes: torch.Tensor) -> torch.Tensor:\r\ndef convert_boxes_to_pooler_format(box_lists: List[Boxes]):\r\ndef _create_zeros(\r\n batch_target: Optional[torch.Tensor],\r\n channels: int,\r\n height: int,\r\n width: int,\r\n like_tensor: torch.Tensor,\r\n) -> torch.Tensor:\r\n def __init__(\r\n self,\r\n output_size,\r\n scales,\r\n sampling_ratio,\r\n pooler_type,\r\n canonical_box_size=224,\r\n canonical_level=4,\r\n ):\r\n def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]):\r\nclass ROIPooler(nn.Module):\r" }, { "identifier": "rpn", "path": "annotator/oneformer/detectron2/modeling/proposal_generator/rpn.py", "snippet": "RPN_HEAD_REGISTRY = Registry(\"RPN_HEAD\")\r\n N = pred_anchor_deltas[0].shape[0]\r\n B = anchors_i.tensor.size(1)\r\ndef build_rpn_head(cfg, input_shape):\r\n def __init__(\r\n self, *, in_channels: int, num_anchors: int, box_dim: int = 4, conv_dims: List[int] = (-1,)\r\n ):\r\n def _get_rpn_conv(self, in_channels, out_channels):\r\n def from_config(cls, cfg, input_shape):\r\n def forward(self, features: List[torch.Tensor]):\r\n def __init__(\r\n self,\r\n *,\r\n in_features: List[str],\r\n head: nn.Module,\r\n anchor_generator: nn.Module,\r\n anchor_matcher: Matcher,\r\n box2box_transform: Box2BoxTransform,\r\n batch_size_per_image: int,\r\n positive_fraction: float,\r\n pre_nms_topk: Tuple[float, float],\r\n post_nms_topk: Tuple[float, float],\r\n nms_thresh: float = 0.7,\r\n min_box_size: float = 0.0,\r\n anchor_boundary_thresh: float = -1.0,\r\n loss_weight: Union[float, Dict[str, float]] = 1.0,\r\n box_reg_loss_type: str = \"smooth_l1\",\r\n smooth_l1_beta: float = 0.0,\r\n ):\r\n def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):\r\n def _subsample_labels(self, label):\r\n def label_and_sample_anchors(\r\n self, anchors: List[Boxes], gt_instances: List[Instances]\r\n ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:\r\n def losses(\r\n self,\r\n anchors: List[Boxes],\r\n pred_objectness_logits: List[torch.Tensor],\r\n gt_labels: List[torch.Tensor],\r\n pred_anchor_deltas: List[torch.Tensor],\r\n gt_boxes: List[torch.Tensor],\r\n ) -> Dict[str, torch.Tensor]:\r\n def forward(\r\n self,\r\n images: ImageList,\r\n features: Dict[str, torch.Tensor],\r\n gt_instances: Optional[List[Instances]] = None,\r\n ):\r\n def predict_proposals(\r\n self,\r\n anchors: List[Boxes],\r\n pred_objectness_logits: List[torch.Tensor],\r\n pred_anchor_deltas: List[torch.Tensor],\r\n image_sizes: List[Tuple[int, int]],\r\n ):\r\n def _decode_proposals(self, anchors: List[Boxes], pred_anchor_deltas: List[torch.Tensor]):\r\nclass StandardRPNHead(nn.Module):\r\nclass RPN(nn.Module):\r" }, { "identifier": "keypoint_head", "path": "annotator/oneformer/detectron2/modeling/roi_heads/keypoint_head.py", "snippet": "_TOTAL_SKIPPED = 0\r\nROI_KEYPOINT_HEAD_REGISTRY = Registry(\"ROI_KEYPOINT_HEAD\")\r\n N, K, H, W = pred_keypoint_logits.shape\r\ndef build_keypoint_head(cfg, input_shape):\r\ndef keypoint_rcnn_loss(pred_keypoint_logits, instances, normalizer):\r\ndef keypoint_rcnn_inference(pred_keypoint_logits: torch.Tensor, pred_instances: List[Instances]):\r\n def __init__(self, *, num_keypoints, loss_weight=1.0, loss_normalizer=1.0):\r\n def from_config(cls, cfg, input_shape):\r\n def forward(self, x, instances: List[Instances]):\r\n def layers(self, x):\r\n def __init__(self, input_shape, *, num_keypoints, conv_dims, **kwargs):\r\n def from_config(cls, cfg, input_shape):\r\n def layers(self, x):\r\nclass BaseKeypointRCNNHead(nn.Module):\r\nclass KRCNNConvDeconvUpsampleHead(BaseKeypointRCNNHead, nn.Sequential):\r" }, { "identifier": "mask_head", "path": "annotator/oneformer/detectron2/modeling/roi_heads/mask_head.py", "snippet": "ROI_MASK_HEAD_REGISTRY = Registry(\"ROI_MASK_HEAD\")\r\ndef mask_rcnn_loss(pred_mask_logits: torch.Tensor, instances: List[Instances], vis_period: int = 0):\r\ndef mask_rcnn_inference(pred_mask_logits: torch.Tensor, pred_instances: List[Instances]):\r\n def __init__(self, *, loss_weight: float = 1.0, vis_period: int = 0):\r\n def from_config(cls, cfg, input_shape):\r\n def forward(self, x, instances: List[Instances]):\r\n def layers(self, x):\r\n def __init__(self, input_shape: ShapeSpec, *, num_classes, conv_dims, conv_norm=\"\", **kwargs):\r\n def from_config(cls, cfg, input_shape):\r\n def layers(self, x):\r\ndef build_mask_head(cfg, input_shape):\r\nclass BaseMaskRCNNHead(nn.Module):\r\nclass MaskRCNNConvUpsampleHead(BaseMaskRCNNHead, nn.Sequential):\r" }, { "identifier": "FastRCNNOutputLayers", "path": "annotator/oneformer/detectron2/modeling/roi_heads/fast_rcnn.py", "snippet": "class FastRCNNOutputLayers(nn.Module):\r\n \"\"\"\r\n Two linear layers for predicting Fast R-CNN outputs:\r\n\r\n 1. proposal-to-detection box regression deltas\r\n 2. classification scores\r\n \"\"\"\r\n\r\n @configurable\r\n def __init__(\r\n self,\r\n input_shape: ShapeSpec,\r\n *,\r\n box2box_transform,\r\n num_classes: int,\r\n test_score_thresh: float = 0.0,\r\n test_nms_thresh: float = 0.5,\r\n test_topk_per_image: int = 100,\r\n cls_agnostic_bbox_reg: bool = False,\r\n smooth_l1_beta: float = 0.0,\r\n box_reg_loss_type: str = \"smooth_l1\",\r\n loss_weight: Union[float, Dict[str, float]] = 1.0,\r\n use_fed_loss: bool = False,\r\n use_sigmoid_ce: bool = False,\r\n get_fed_loss_cls_weights: Optional[Callable] = None,\r\n fed_loss_num_classes: int = 50,\r\n ):\r\n \"\"\"\r\n NOTE: this interface is experimental.\r\n\r\n Args:\r\n input_shape (ShapeSpec): shape of the input feature to this module\r\n box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):\r\n num_classes (int): number of foreground classes\r\n test_score_thresh (float): threshold to filter predictions results.\r\n test_nms_thresh (float): NMS threshold for prediction results.\r\n test_topk_per_image (int): number of top predictions to produce per image.\r\n cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression\r\n smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if\r\n `box_reg_loss_type` is \"smooth_l1\"\r\n box_reg_loss_type (str): Box regression loss type. One of: \"smooth_l1\", \"giou\",\r\n \"diou\", \"ciou\"\r\n loss_weight (float|dict): weights to use for losses. Can be single float for weighting\r\n all losses, or a dict of individual weightings. Valid dict keys are:\r\n * \"loss_cls\": applied to classification loss\r\n * \"loss_box_reg\": applied to box regression loss\r\n use_fed_loss (bool): whether to use federated loss which samples additional negative\r\n classes to calculate the loss\r\n use_sigmoid_ce (bool): whether to calculate the loss using weighted average of binary\r\n cross entropy with logits. This could be used together with federated loss\r\n get_fed_loss_cls_weights (Callable): a callable which takes dataset name and frequency\r\n weight power, and returns the probabilities to sample negative classes for\r\n federated loss. The implementation can be found in\r\n detectron2/data/detection_utils.py\r\n fed_loss_num_classes (int): number of federated classes to keep in total\r\n \"\"\"\r\n super().__init__()\r\n if isinstance(input_shape, int): # some backward compatibility\r\n input_shape = ShapeSpec(channels=input_shape)\r\n self.num_classes = num_classes\r\n input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)\r\n # prediction layer for num_classes foreground classes and one background class (hence + 1)\r\n self.cls_score = nn.Linear(input_size, num_classes + 1)\r\n num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes\r\n box_dim = len(box2box_transform.weights)\r\n self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)\r\n\r\n nn.init.normal_(self.cls_score.weight, std=0.01)\r\n nn.init.normal_(self.bbox_pred.weight, std=0.001)\r\n for l in [self.cls_score, self.bbox_pred]:\r\n nn.init.constant_(l.bias, 0)\r\n\r\n self.box2box_transform = box2box_transform\r\n self.smooth_l1_beta = smooth_l1_beta\r\n self.test_score_thresh = test_score_thresh\r\n self.test_nms_thresh = test_nms_thresh\r\n self.test_topk_per_image = test_topk_per_image\r\n self.box_reg_loss_type = box_reg_loss_type\r\n if isinstance(loss_weight, float):\r\n loss_weight = {\"loss_cls\": loss_weight, \"loss_box_reg\": loss_weight}\r\n self.loss_weight = loss_weight\r\n self.use_fed_loss = use_fed_loss\r\n self.use_sigmoid_ce = use_sigmoid_ce\r\n self.fed_loss_num_classes = fed_loss_num_classes\r\n\r\n if self.use_fed_loss:\r\n assert self.use_sigmoid_ce, \"Please use sigmoid cross entropy loss with federated loss\"\r\n fed_loss_cls_weights = get_fed_loss_cls_weights()\r\n assert (\r\n len(fed_loss_cls_weights) == self.num_classes\r\n ), \"Please check the provided fed_loss_cls_weights. Their size should match num_classes\"\r\n self.register_buffer(\"fed_loss_cls_weights\", fed_loss_cls_weights)\r\n\r\n @classmethod\r\n def from_config(cls, cfg, input_shape):\r\n return {\r\n \"input_shape\": input_shape,\r\n \"box2box_transform\": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),\r\n # fmt: off\r\n \"num_classes\" : cfg.MODEL.ROI_HEADS.NUM_CLASSES,\r\n \"cls_agnostic_bbox_reg\" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,\r\n \"smooth_l1_beta\" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,\r\n \"test_score_thresh\" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,\r\n \"test_nms_thresh\" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,\r\n \"test_topk_per_image\" : cfg.TEST.DETECTIONS_PER_IMAGE,\r\n \"box_reg_loss_type\" : cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE,\r\n \"loss_weight\" : {\"loss_box_reg\": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT}, # noqa\r\n \"use_fed_loss\" : cfg.MODEL.ROI_BOX_HEAD.USE_FED_LOSS,\r\n \"use_sigmoid_ce\" : cfg.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE,\r\n \"get_fed_loss_cls_weights\" : lambda: get_fed_loss_cls_weights(dataset_names=cfg.DATASETS.TRAIN, freq_weight_power=cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER), # noqa\r\n \"fed_loss_num_classes\" : cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CLASSES,\r\n # fmt: on\r\n }\r\n\r\n def forward(self, x):\r\n \"\"\"\r\n Args:\r\n x: per-region features of shape (N, ...) for N bounding boxes to predict.\r\n\r\n Returns:\r\n (Tensor, Tensor):\r\n First tensor: shape (N,K+1), scores for each of the N box. Each row contains the\r\n scores for K object categories and 1 background class.\r\n\r\n Second tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4),\r\n or (N,4) for class-agnostic regression.\r\n \"\"\"\r\n if x.dim() > 2:\r\n x = torch.flatten(x, start_dim=1)\r\n scores = self.cls_score(x)\r\n proposal_deltas = self.bbox_pred(x)\r\n return scores, proposal_deltas\r\n\r\n def losses(self, predictions, proposals):\r\n \"\"\"\r\n Args:\r\n predictions: return values of :meth:`forward()`.\r\n proposals (list[Instances]): proposals that match the features that were used\r\n to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,\r\n ``gt_classes`` are expected.\r\n\r\n Returns:\r\n Dict[str, Tensor]: dict of losses\r\n \"\"\"\r\n scores, proposal_deltas = predictions\r\n\r\n # parse classification outputs\r\n gt_classes = (\r\n cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)\r\n )\r\n _log_classification_stats(scores, gt_classes)\r\n\r\n # parse box regression outputs\r\n if len(proposals):\r\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4\r\n assert not proposal_boxes.requires_grad, \"Proposals should not require gradients!\"\r\n # If \"gt_boxes\" does not exist, the proposals must be all negative and\r\n # should not be included in regression loss computation.\r\n # Here we just use proposal_boxes as an arbitrary placeholder because its\r\n # value won't be used in self.box_reg_loss().\r\n gt_boxes = cat(\r\n [(p.gt_boxes if p.has(\"gt_boxes\") else p.proposal_boxes).tensor for p in proposals],\r\n dim=0,\r\n )\r\n else:\r\n proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)\r\n\r\n if self.use_sigmoid_ce:\r\n loss_cls = self.sigmoid_cross_entropy_loss(scores, gt_classes)\r\n else:\r\n loss_cls = cross_entropy(scores, gt_classes, reduction=\"mean\")\r\n\r\n losses = {\r\n \"loss_cls\": loss_cls,\r\n \"loss_box_reg\": self.box_reg_loss(\r\n proposal_boxes, gt_boxes, proposal_deltas, gt_classes\r\n ),\r\n }\r\n return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}\r\n\r\n # Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/fed_loss.py # noqa\r\n # with slight modifications\r\n def get_fed_loss_classes(self, gt_classes, num_fed_loss_classes, num_classes, weight):\r\n \"\"\"\r\n Args:\r\n gt_classes: a long tensor of shape R that contains the gt class label of each proposal.\r\n num_fed_loss_classes: minimum number of classes to keep when calculating federated loss.\r\n Will sample negative classes if number of unique gt_classes is smaller than this value.\r\n num_classes: number of foreground classes\r\n weight: probabilities used to sample negative classes\r\n\r\n Returns:\r\n Tensor:\r\n classes to keep when calculating the federated loss, including both unique gt\r\n classes and sampled negative classes.\r\n \"\"\"\r\n unique_gt_classes = torch.unique(gt_classes)\r\n prob = unique_gt_classes.new_ones(num_classes + 1).float()\r\n prob[-1] = 0\r\n if len(unique_gt_classes) < num_fed_loss_classes:\r\n prob[:num_classes] = weight.float().clone()\r\n prob[unique_gt_classes] = 0\r\n sampled_negative_classes = torch.multinomial(\r\n prob, num_fed_loss_classes - len(unique_gt_classes), replacement=False\r\n )\r\n fed_loss_classes = torch.cat([unique_gt_classes, sampled_negative_classes])\r\n else:\r\n fed_loss_classes = unique_gt_classes\r\n return fed_loss_classes\r\n\r\n # Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/custom_fast_rcnn.py#L113 # noqa\r\n # with slight modifications\r\n def sigmoid_cross_entropy_loss(self, pred_class_logits, gt_classes):\r\n \"\"\"\r\n Args:\r\n pred_class_logits: shape (N, K+1), scores for each of the N box. Each row contains the\r\n scores for K object categories and 1 background class\r\n gt_classes: a long tensor of shape R that contains the gt class label of each proposal.\r\n \"\"\"\r\n if pred_class_logits.numel() == 0:\r\n return pred_class_logits.new_zeros([1])[0]\r\n\r\n N = pred_class_logits.shape[0]\r\n K = pred_class_logits.shape[1] - 1\r\n\r\n target = pred_class_logits.new_zeros(N, K + 1)\r\n target[range(len(gt_classes)), gt_classes] = 1\r\n target = target[:, :K]\r\n\r\n cls_loss = F.binary_cross_entropy_with_logits(\r\n pred_class_logits[:, :-1], target, reduction=\"none\"\r\n )\r\n\r\n if self.use_fed_loss:\r\n fed_loss_classes = self.get_fed_loss_classes(\r\n gt_classes,\r\n num_fed_loss_classes=self.fed_loss_num_classes,\r\n num_classes=K,\r\n weight=self.fed_loss_cls_weights,\r\n )\r\n fed_loss_classes_mask = fed_loss_classes.new_zeros(K + 1)\r\n fed_loss_classes_mask[fed_loss_classes] = 1\r\n fed_loss_classes_mask = fed_loss_classes_mask[:K]\r\n weight = fed_loss_classes_mask.view(1, K).expand(N, K).float()\r\n else:\r\n weight = 1\r\n\r\n loss = torch.sum(cls_loss * weight) / N\r\n return loss\r\n\r\n def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes):\r\n \"\"\"\r\n Args:\r\n proposal_boxes/gt_boxes are tensors with the same shape (R, 4 or 5).\r\n pred_deltas has shape (R, 4 or 5), or (R, num_classes * (4 or 5)).\r\n gt_classes is a long tensor of shape R, the gt class label of each proposal.\r\n R shall be the number of proposals.\r\n \"\"\"\r\n box_dim = proposal_boxes.shape[1] # 4 or 5\r\n # Regression loss is only computed for foreground proposals (those matched to a GT)\r\n fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0]\r\n if pred_deltas.shape[1] == box_dim: # cls-agnostic regression\r\n fg_pred_deltas = pred_deltas[fg_inds]\r\n else:\r\n fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[\r\n fg_inds, gt_classes[fg_inds]\r\n ]\r\n\r\n loss_box_reg = _dense_box_regression_loss(\r\n [proposal_boxes[fg_inds]],\r\n self.box2box_transform,\r\n [fg_pred_deltas.unsqueeze(0)],\r\n [gt_boxes[fg_inds]],\r\n ...,\r\n self.box_reg_loss_type,\r\n self.smooth_l1_beta,\r\n )\r\n\r\n # The reg loss is normalized using the total number of regions (R), not the number\r\n # of foreground regions even though the box regression loss is only defined on\r\n # foreground regions. Why? Because doing so gives equal training influence to\r\n # each foreground example. To see how, consider two different minibatches:\r\n # (1) Contains a single foreground region\r\n # (2) Contains 100 foreground regions\r\n # If we normalize by the number of foreground regions, the single example in\r\n # minibatch (1) will be given 100 times as much influence as each foreground\r\n # example in minibatch (2). Normalizing by the total number of regions, R,\r\n # means that the single example in minibatch (1) and each of the 100 examples\r\n # in minibatch (2) are given equal influence.\r\n return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty\r\n\r\n def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]):\r\n \"\"\"\r\n Args:\r\n predictions: return values of :meth:`forward()`.\r\n proposals (list[Instances]): proposals that match the features that were\r\n used to compute predictions. The ``proposal_boxes`` field is expected.\r\n\r\n Returns:\r\n list[Instances]: same as `fast_rcnn_inference`.\r\n list[Tensor]: same as `fast_rcnn_inference`.\r\n \"\"\"\r\n boxes = self.predict_boxes(predictions, proposals)\r\n scores = self.predict_probs(predictions, proposals)\r\n image_shapes = [x.image_size for x in proposals]\r\n return fast_rcnn_inference(\r\n boxes,\r\n scores,\r\n image_shapes,\r\n self.test_score_thresh,\r\n self.test_nms_thresh,\r\n self.test_topk_per_image,\r\n )\r\n\r\n def predict_boxes_for_gt_classes(self, predictions, proposals):\r\n \"\"\"\r\n Args:\r\n predictions: return values of :meth:`forward()`.\r\n proposals (list[Instances]): proposals that match the features that were used\r\n to compute predictions. The fields ``proposal_boxes``, ``gt_classes`` are expected.\r\n\r\n Returns:\r\n list[Tensor]:\r\n A list of Tensors of predicted boxes for GT classes in case of\r\n class-specific box head. Element i of the list has shape (Ri, B), where Ri is\r\n the number of proposals for image i and B is the box dimension (4 or 5)\r\n \"\"\"\r\n if not len(proposals):\r\n return []\r\n scores, proposal_deltas = predictions\r\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)\r\n N, B = proposal_boxes.shape\r\n predict_boxes = self.box2box_transform.apply_deltas(\r\n proposal_deltas, proposal_boxes\r\n ) # Nx(KxB)\r\n\r\n K = predict_boxes.shape[1] // B\r\n if K > 1:\r\n gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)\r\n # Some proposals are ignored or have a background class. Their gt_classes\r\n # cannot be used as index.\r\n gt_classes = gt_classes.clamp_(0, K - 1)\r\n\r\n predict_boxes = predict_boxes.view(N, K, B)[\r\n torch.arange(N, dtype=torch.long, device=predict_boxes.device), gt_classes\r\n ]\r\n num_prop_per_image = [len(p) for p in proposals]\r\n return predict_boxes.split(num_prop_per_image)\r\n\r\n def predict_boxes(\r\n self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]\r\n ):\r\n \"\"\"\r\n Args:\r\n predictions: return values of :meth:`forward()`.\r\n proposals (list[Instances]): proposals that match the features that were\r\n used to compute predictions. The ``proposal_boxes`` field is expected.\r\n\r\n Returns:\r\n list[Tensor]:\r\n A list of Tensors of predicted class-specific or class-agnostic boxes\r\n for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is\r\n the number of proposals for image i and B is the box dimension (4 or 5)\r\n \"\"\"\r\n if not len(proposals):\r\n return []\r\n _, proposal_deltas = predictions\r\n num_prop_per_image = [len(p) for p in proposals]\r\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)\r\n predict_boxes = self.box2box_transform.apply_deltas(\r\n proposal_deltas,\r\n proposal_boxes,\r\n ) # Nx(KxB)\r\n return predict_boxes.split(num_prop_per_image)\r\n\r\n def predict_probs(\r\n self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]\r\n ):\r\n \"\"\"\r\n Args:\r\n predictions: return values of :meth:`forward()`.\r\n proposals (list[Instances]): proposals that match the features that were\r\n used to compute predictions.\r\n\r\n Returns:\r\n list[Tensor]:\r\n A list of Tensors of predicted class probabilities for each image.\r\n Element i has shape (Ri, K + 1), where Ri is the number of proposals for image i.\r\n \"\"\"\r\n scores, _ = predictions\r\n num_inst_per_image = [len(p) for p in proposals]\r\n if self.use_sigmoid_ce:\r\n probs = scores.sigmoid()\r\n else:\r\n probs = F.softmax(scores, dim=-1)\r\n return probs.split(num_inst_per_image, dim=0)\r" }, { "identifier": "Caffe2Compatible", "path": "annotator/oneformer/detectron2/export/c10.py", "snippet": "class Caffe2Compatible(object):\r\n \"\"\"\r\n A model can inherit this class to indicate that it can be traced and deployed with caffe2.\r\n \"\"\"\r\n\r\n def _get_tensor_mode(self):\r\n return self._tensor_mode\r\n\r\n def _set_tensor_mode(self, v):\r\n self._tensor_mode = v\r\n\r\n tensor_mode = property(_get_tensor_mode, _set_tensor_mode)\r\n \"\"\"\r\n If true, the model expects C2-style tensor only inputs/outputs format.\r\n \"\"\"\r" }, { "identifier": "Caffe2FastRCNNOutputsInference", "path": "annotator/oneformer/detectron2/export/c10.py", "snippet": "class Caffe2FastRCNNOutputsInference:\r\n def __init__(self, tensor_mode):\r\n self.tensor_mode = tensor_mode # whether the output is caffe2 tensor mode\r\n\r\n def __call__(self, box_predictor, predictions, proposals):\r\n \"\"\"equivalent to FastRCNNOutputLayers.inference\"\"\"\r\n num_classes = box_predictor.num_classes\r\n score_thresh = box_predictor.test_score_thresh\r\n nms_thresh = box_predictor.test_nms_thresh\r\n topk_per_image = box_predictor.test_topk_per_image\r\n is_rotated = len(box_predictor.box2box_transform.weights) == 5\r\n\r\n if is_rotated:\r\n box_dim = 5\r\n assert box_predictor.box2box_transform.weights[4] == 1, (\r\n \"The weights for Rotated BBoxTransform in C2 have only 4 dimensions,\"\r\n + \" thus enforcing the angle weight to be 1 for now\"\r\n )\r\n box2box_transform_weights = box_predictor.box2box_transform.weights[:4]\r\n else:\r\n box_dim = 4\r\n box2box_transform_weights = box_predictor.box2box_transform.weights\r\n\r\n class_logits, box_regression = predictions\r\n if num_classes + 1 == class_logits.shape[1]:\r\n class_prob = F.softmax(class_logits, -1)\r\n else:\r\n assert num_classes == class_logits.shape[1]\r\n class_prob = F.sigmoid(class_logits)\r\n # BoxWithNMSLimit will infer num_classes from the shape of the class_prob\r\n # So append a zero column as placeholder for the background class\r\n class_prob = torch.cat((class_prob, torch.zeros(class_prob.shape[0], 1)), dim=1)\r\n\r\n assert box_regression.shape[1] % box_dim == 0\r\n cls_agnostic_bbox_reg = box_regression.shape[1] // box_dim == 1\r\n\r\n input_tensor_mode = proposals[0].proposal_boxes.tensor.shape[1] == box_dim + 1\r\n\r\n proposal_boxes = proposals[0].proposal_boxes\r\n if isinstance(proposal_boxes, Caffe2Boxes):\r\n rois = Caffe2Boxes.cat([p.proposal_boxes for p in proposals])\r\n elif isinstance(proposal_boxes, RotatedBoxes):\r\n rois = RotatedBoxes.cat([p.proposal_boxes for p in proposals])\r\n elif isinstance(proposal_boxes, Boxes):\r\n rois = Boxes.cat([p.proposal_boxes for p in proposals])\r\n else:\r\n raise NotImplementedError(\r\n 'Expected proposals[0].proposal_boxes to be type \"Boxes\", '\r\n f\"instead got {type(proposal_boxes)}\"\r\n )\r\n\r\n device, dtype = rois.tensor.device, rois.tensor.dtype\r\n if input_tensor_mode:\r\n im_info = proposals[0].image_size\r\n rois = rois.tensor\r\n else:\r\n im_info = torch.tensor(\r\n [[sz[0], sz[1], 1.0] for sz in [x.image_size for x in proposals]]\r\n )\r\n batch_ids = cat(\r\n [\r\n torch.full((b, 1), i, dtype=dtype, device=device)\r\n for i, b in enumerate(len(p) for p in proposals)\r\n ],\r\n dim=0,\r\n )\r\n rois = torch.cat([batch_ids, rois.tensor], dim=1)\r\n\r\n roi_pred_bbox, roi_batch_splits = torch.ops._caffe2.BBoxTransform(\r\n to_device(rois, \"cpu\"),\r\n to_device(box_regression, \"cpu\"),\r\n to_device(im_info, \"cpu\"),\r\n weights=box2box_transform_weights,\r\n apply_scale=True,\r\n rotated=is_rotated,\r\n angle_bound_on=True,\r\n angle_bound_lo=-180,\r\n angle_bound_hi=180,\r\n clip_angle_thresh=1.0,\r\n legacy_plus_one=False,\r\n )\r\n roi_pred_bbox = to_device(roi_pred_bbox, device)\r\n roi_batch_splits = to_device(roi_batch_splits, device)\r\n\r\n nms_outputs = torch.ops._caffe2.BoxWithNMSLimit(\r\n to_device(class_prob, \"cpu\"),\r\n to_device(roi_pred_bbox, \"cpu\"),\r\n to_device(roi_batch_splits, \"cpu\"),\r\n score_thresh=float(score_thresh),\r\n nms=float(nms_thresh),\r\n detections_per_im=int(topk_per_image),\r\n soft_nms_enabled=False,\r\n soft_nms_method=\"linear\",\r\n soft_nms_sigma=0.5,\r\n soft_nms_min_score_thres=0.001,\r\n rotated=is_rotated,\r\n cls_agnostic_bbox_reg=cls_agnostic_bbox_reg,\r\n input_boxes_include_bg_cls=False,\r\n output_classes_include_bg_cls=False,\r\n legacy_plus_one=False,\r\n )\r\n roi_score_nms = to_device(nms_outputs[0], device)\r\n roi_bbox_nms = to_device(nms_outputs[1], device)\r\n roi_class_nms = to_device(nms_outputs[2], device)\r\n roi_batch_splits_nms = to_device(nms_outputs[3], device)\r\n roi_keeps_nms = to_device(nms_outputs[4], device)\r\n roi_keeps_size_nms = to_device(nms_outputs[5], device)\r\n if not self.tensor_mode:\r\n roi_class_nms = roi_class_nms.to(torch.int64)\r\n\r\n roi_batch_ids = cat(\r\n [\r\n torch.full((b, 1), i, dtype=dtype, device=device)\r\n for i, b in enumerate(int(x.item()) for x in roi_batch_splits_nms)\r\n ],\r\n dim=0,\r\n )\r\n\r\n roi_class_nms = alias(roi_class_nms, \"class_nms\")\r\n roi_score_nms = alias(roi_score_nms, \"score_nms\")\r\n roi_bbox_nms = alias(roi_bbox_nms, \"bbox_nms\")\r\n roi_batch_splits_nms = alias(roi_batch_splits_nms, \"batch_splits_nms\")\r\n roi_keeps_nms = alias(roi_keeps_nms, \"keeps_nms\")\r\n roi_keeps_size_nms = alias(roi_keeps_size_nms, \"keeps_size_nms\")\r\n\r\n results = InstancesList(\r\n im_info=im_info,\r\n indices=roi_batch_ids[:, 0],\r\n extra_fields={\r\n \"pred_boxes\": Caffe2Boxes(roi_bbox_nms),\r\n \"scores\": roi_score_nms,\r\n \"pred_classes\": roi_class_nms,\r\n },\r\n )\r\n\r\n if not self.tensor_mode:\r\n results = InstancesList.to_d2_instances_list(results)\r\n batch_splits = roi_batch_splits_nms.int().tolist()\r\n kept_indices = list(roi_keeps_nms.to(torch.int64).split(batch_splits))\r\n else:\r\n results = [results]\r\n kept_indices = [roi_keeps_nms]\r\n\r\n return results, kept_indices\r" }, { "identifier": "Caffe2KeypointRCNNInference", "path": "annotator/oneformer/detectron2/export/c10.py", "snippet": "class Caffe2KeypointRCNNInference:\r\n def __init__(self, use_heatmap_max_keypoint):\r\n self.use_heatmap_max_keypoint = use_heatmap_max_keypoint\r\n\r\n def __call__(self, pred_keypoint_logits, pred_instances):\r\n # just return the keypoint heatmap for now,\r\n # there will be option to call HeatmapMaxKeypointOp\r\n output = alias(pred_keypoint_logits, \"kps_score\")\r\n if all(isinstance(x, InstancesList) for x in pred_instances):\r\n assert len(pred_instances) == 1\r\n if self.use_heatmap_max_keypoint:\r\n device = output.device\r\n output = torch.ops._caffe2.HeatmapMaxKeypoint(\r\n to_device(output, \"cpu\"),\r\n pred_instances[0].pred_boxes.tensor,\r\n should_output_softmax=True, # worth make it configerable?\r\n )\r\n output = to_device(output, device)\r\n output = alias(output, \"keypoints_out\")\r\n pred_instances[0].set(\"pred_keypoints\", output)\r\n return pred_keypoint_logits\r" }, { "identifier": "Caffe2MaskRCNNInference", "path": "annotator/oneformer/detectron2/export/c10.py", "snippet": "class Caffe2MaskRCNNInference:\r\n def __call__(self, pred_mask_logits, pred_instances):\r\n \"\"\"equivalent to mask_head.mask_rcnn_inference\"\"\"\r\n if all(isinstance(x, InstancesList) for x in pred_instances):\r\n assert len(pred_instances) == 1\r\n mask_probs_pred = pred_mask_logits.sigmoid()\r\n mask_probs_pred = alias(mask_probs_pred, \"mask_fcn_probs\")\r\n pred_instances[0].set(\"pred_masks\", mask_probs_pred)\r\n else:\r\n mask_rcnn_inference(pred_mask_logits, pred_instances)\r" }, { "identifier": "Caffe2ROIPooler", "path": "annotator/oneformer/detectron2/export/c10.py", "snippet": "class Caffe2ROIPooler(Caffe2Compatible, poolers.ROIPooler):\r\n @staticmethod\r\n def c2_preprocess(box_lists):\r\n assert all(isinstance(x, Boxes) for x in box_lists)\r\n if all(isinstance(x, Caffe2Boxes) for x in box_lists):\r\n # input is pure-tensor based\r\n assert len(box_lists) == 1\r\n pooler_fmt_boxes = box_lists[0].tensor\r\n else:\r\n pooler_fmt_boxes = poolers.convert_boxes_to_pooler_format(box_lists)\r\n return pooler_fmt_boxes\r\n\r\n def forward(self, x, box_lists):\r\n assert not self.training\r\n\r\n pooler_fmt_boxes = self.c2_preprocess(box_lists)\r\n num_level_assignments = len(self.level_poolers)\r\n\r\n if num_level_assignments == 1:\r\n if isinstance(self.level_poolers[0], ROIAlignRotated):\r\n c2_roi_align = torch.ops._caffe2.RoIAlignRotated\r\n aligned = True\r\n else:\r\n c2_roi_align = torch.ops._caffe2.RoIAlign\r\n aligned = self.level_poolers[0].aligned\r\n\r\n x0 = x[0]\r\n if x0.is_quantized:\r\n x0 = x0.dequantize()\r\n\r\n out = c2_roi_align(\r\n x0,\r\n pooler_fmt_boxes,\r\n order=\"NCHW\",\r\n spatial_scale=float(self.level_poolers[0].spatial_scale),\r\n pooled_h=int(self.output_size[0]),\r\n pooled_w=int(self.output_size[1]),\r\n sampling_ratio=int(self.level_poolers[0].sampling_ratio),\r\n aligned=aligned,\r\n )\r\n return out\r\n\r\n device = pooler_fmt_boxes.device\r\n assert (\r\n self.max_level - self.min_level + 1 == 4\r\n ), \"Currently DistributeFpnProposals only support 4 levels\"\r\n fpn_outputs = torch.ops._caffe2.DistributeFpnProposals(\r\n to_device(pooler_fmt_boxes, \"cpu\"),\r\n roi_canonical_scale=self.canonical_box_size,\r\n roi_canonical_level=self.canonical_level,\r\n roi_max_level=self.max_level,\r\n roi_min_level=self.min_level,\r\n legacy_plus_one=False,\r\n )\r\n fpn_outputs = [to_device(x, device) for x in fpn_outputs]\r\n\r\n rois_fpn_list = fpn_outputs[:-1]\r\n rois_idx_restore_int32 = fpn_outputs[-1]\r\n\r\n roi_feat_fpn_list = []\r\n for roi_fpn, x_level, pooler in zip(rois_fpn_list, x, self.level_poolers):\r\n if isinstance(pooler, ROIAlignRotated):\r\n c2_roi_align = torch.ops._caffe2.RoIAlignRotated\r\n aligned = True\r\n else:\r\n c2_roi_align = torch.ops._caffe2.RoIAlign\r\n aligned = bool(pooler.aligned)\r\n\r\n if x_level.is_quantized:\r\n x_level = x_level.dequantize()\r\n\r\n roi_feat_fpn = c2_roi_align(\r\n x_level,\r\n roi_fpn,\r\n order=\"NCHW\",\r\n spatial_scale=float(pooler.spatial_scale),\r\n pooled_h=int(self.output_size[0]),\r\n pooled_w=int(self.output_size[1]),\r\n sampling_ratio=int(pooler.sampling_ratio),\r\n aligned=aligned,\r\n )\r\n roi_feat_fpn_list.append(roi_feat_fpn)\r\n\r\n roi_feat_shuffled = cat(roi_feat_fpn_list, dim=0)\r\n assert roi_feat_shuffled.numel() > 0 and rois_idx_restore_int32.numel() > 0, (\r\n \"Caffe2 export requires tracing with a model checkpoint + input that can produce valid\"\r\n \" detections. But no detections were obtained with the given checkpoint and input!\"\r\n )\r\n roi_feat = torch.ops._caffe2.BatchPermutation(roi_feat_shuffled, rois_idx_restore_int32)\r\n return roi_feat\r" }, { "identifier": "Caffe2RPN", "path": "annotator/oneformer/detectron2/export/c10.py", "snippet": "class Caffe2RPN(Caffe2Compatible, rpn.RPN):\r\n @classmethod\r\n def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):\r\n ret = super(Caffe2Compatible, cls).from_config(cfg, input_shape)\r\n assert tuple(cfg.MODEL.RPN.BBOX_REG_WEIGHTS) == (1.0, 1.0, 1.0, 1.0) or tuple(\r\n cfg.MODEL.RPN.BBOX_REG_WEIGHTS\r\n ) == (1.0, 1.0, 1.0, 1.0, 1.0)\r\n return ret\r\n\r\n def _generate_proposals(\r\n self, images, objectness_logits_pred, anchor_deltas_pred, gt_instances=None\r\n ):\r\n assert isinstance(images, ImageList)\r\n if self.tensor_mode:\r\n im_info = images.image_sizes\r\n else:\r\n im_info = torch.tensor([[im_sz[0], im_sz[1], 1.0] for im_sz in images.image_sizes]).to(\r\n images.tensor.device\r\n )\r\n assert isinstance(im_info, torch.Tensor)\r\n\r\n rpn_rois_list = []\r\n rpn_roi_probs_list = []\r\n for scores, bbox_deltas, cell_anchors_tensor, feat_stride in zip(\r\n objectness_logits_pred,\r\n anchor_deltas_pred,\r\n [b for (n, b) in self.anchor_generator.cell_anchors.named_buffers()],\r\n self.anchor_generator.strides,\r\n ):\r\n scores = scores.detach()\r\n bbox_deltas = bbox_deltas.detach()\r\n\r\n rpn_rois, rpn_roi_probs = torch.ops._caffe2.GenerateProposals(\r\n scores,\r\n bbox_deltas,\r\n im_info,\r\n cell_anchors_tensor,\r\n spatial_scale=1.0 / feat_stride,\r\n pre_nms_topN=self.pre_nms_topk[self.training],\r\n post_nms_topN=self.post_nms_topk[self.training],\r\n nms_thresh=self.nms_thresh,\r\n min_size=self.min_box_size,\r\n # correct_transform_coords=True, # deprecated argument\r\n angle_bound_on=True, # Default\r\n angle_bound_lo=-180,\r\n angle_bound_hi=180,\r\n clip_angle_thresh=1.0, # Default\r\n legacy_plus_one=False,\r\n )\r\n rpn_rois_list.append(rpn_rois)\r\n rpn_roi_probs_list.append(rpn_roi_probs)\r\n\r\n # For FPN in D2, in RPN all proposals from different levels are concated\r\n # together, ranked and picked by top post_nms_topk. Then in ROIPooler\r\n # it calculates level_assignments and calls the RoIAlign from\r\n # the corresponding level.\r\n\r\n if len(objectness_logits_pred) == 1:\r\n rpn_rois = rpn_rois_list[0]\r\n rpn_roi_probs = rpn_roi_probs_list[0]\r\n else:\r\n assert len(rpn_rois_list) == len(rpn_roi_probs_list)\r\n rpn_post_nms_topN = self.post_nms_topk[self.training]\r\n\r\n device = rpn_rois_list[0].device\r\n input_list = [to_device(x, \"cpu\") for x in (rpn_rois_list + rpn_roi_probs_list)]\r\n\r\n # TODO remove this after confirming rpn_max_level/rpn_min_level\r\n # is not needed in CollectRpnProposals.\r\n feature_strides = list(self.anchor_generator.strides)\r\n rpn_min_level = int(math.log2(feature_strides[0]))\r\n rpn_max_level = int(math.log2(feature_strides[-1]))\r\n assert (rpn_max_level - rpn_min_level + 1) == len(\r\n rpn_rois_list\r\n ), \"CollectRpnProposals requires continuous levels\"\r\n\r\n rpn_rois = torch.ops._caffe2.CollectRpnProposals(\r\n input_list,\r\n # NOTE: in current implementation, rpn_max_level and rpn_min_level\r\n # are not needed, only the subtraction of two matters and it\r\n # can be infer from the number of inputs. Keep them now for\r\n # consistency.\r\n rpn_max_level=2 + len(rpn_rois_list) - 1,\r\n rpn_min_level=2,\r\n rpn_post_nms_topN=rpn_post_nms_topN,\r\n )\r\n rpn_rois = to_device(rpn_rois, device)\r\n rpn_roi_probs = []\r\n\r\n proposals = self.c2_postprocess(im_info, rpn_rois, rpn_roi_probs, self.tensor_mode)\r\n return proposals, {}\r\n\r\n def forward(self, images, features, gt_instances=None):\r\n assert not self.training\r\n features = [features[f] for f in self.in_features]\r\n objectness_logits_pred, anchor_deltas_pred = self.rpn_head(features)\r\n return self._generate_proposals(\r\n images,\r\n objectness_logits_pred,\r\n anchor_deltas_pred,\r\n gt_instances,\r\n )\r\n\r\n @staticmethod\r\n def c2_postprocess(im_info, rpn_rois, rpn_roi_probs, tensor_mode):\r\n proposals = InstancesList(\r\n im_info=im_info,\r\n indices=rpn_rois[:, 0],\r\n extra_fields={\r\n \"proposal_boxes\": Caffe2Boxes(rpn_rois),\r\n \"objectness_logits\": (torch.Tensor, rpn_roi_probs),\r\n },\r\n )\r\n if not tensor_mode:\r\n proposals = InstancesList.to_d2_instances_list(proposals)\r\n else:\r\n proposals = [proposals]\r\n return proposals\r" } ]
import contextlib import torch from unittest import mock from annotator.oneformer.detectron2.modeling import poolers from annotator.oneformer.detectron2.modeling.proposal_generator import rpn from annotator.oneformer.detectron2.modeling.roi_heads import keypoint_head, mask_head from annotator.oneformer.detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers from .c10 import ( Caffe2Compatible, Caffe2FastRCNNOutputsInference, Caffe2KeypointRCNNInference, Caffe2MaskRCNNInference, Caffe2ROIPooler, Caffe2RPN, )
10,983
# Copyright (c) Facebook, Inc. and its affiliates. class GenericMixin(object): pass class Caffe2CompatibleConverter(object): """ A GenericUpdater which implements the `create_from` interface, by modifying module object and assign it with another class replaceCls. """ def __init__(self, replaceCls): self.replaceCls = replaceCls def create_from(self, module): # update module's class to the new class assert isinstance(module, torch.nn.Module) if issubclass(self.replaceCls, GenericMixin): # replaceCls should act as mixin, create a new class on-the-fly new_class = type( "{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__), (self.replaceCls, module.__class__), {}, # {"new_method": lambda self: ...}, ) module.__class__ = new_class else: # replaceCls is complete class, this allow arbitrary class swap module.__class__ = self.replaceCls # initialize Caffe2Compatible if isinstance(module, Caffe2Compatible): module.tensor_mode = False return module def patch(model, target, updater, *args, **kwargs): """ recursively (post-order) update all modules with the target type and its subclasses, make a initialization/composition/inheritance/... via the updater.create_from. """ for name, module in model.named_children(): model._modules[name] = patch(module, target, updater, *args, **kwargs) if isinstance(model, target): return updater.create_from(model, *args, **kwargs) return model def patch_generalized_rcnn(model): ccc = Caffe2CompatibleConverter model = patch(model, rpn.RPN, ccc(Caffe2RPN)) model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler)) return model @contextlib.contextmanager def mock_fastrcnn_outputs_inference(
# Copyright (c) Facebook, Inc. and its affiliates. class GenericMixin(object): pass class Caffe2CompatibleConverter(object): """ A GenericUpdater which implements the `create_from` interface, by modifying module object and assign it with another class replaceCls. """ def __init__(self, replaceCls): self.replaceCls = replaceCls def create_from(self, module): # update module's class to the new class assert isinstance(module, torch.nn.Module) if issubclass(self.replaceCls, GenericMixin): # replaceCls should act as mixin, create a new class on-the-fly new_class = type( "{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__), (self.replaceCls, module.__class__), {}, # {"new_method": lambda self: ...}, ) module.__class__ = new_class else: # replaceCls is complete class, this allow arbitrary class swap module.__class__ = self.replaceCls # initialize Caffe2Compatible if isinstance(module, Caffe2Compatible): module.tensor_mode = False return module def patch(model, target, updater, *args, **kwargs): """ recursively (post-order) update all modules with the target type and its subclasses, make a initialization/composition/inheritance/... via the updater.create_from. """ for name, module in model.named_children(): model._modules[name] = patch(module, target, updater, *args, **kwargs) if isinstance(model, target): return updater.create_from(model, *args, **kwargs) return model def patch_generalized_rcnn(model): ccc = Caffe2CompatibleConverter model = patch(model, rpn.RPN, ccc(Caffe2RPN)) model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler)) return model @contextlib.contextmanager def mock_fastrcnn_outputs_inference(
tensor_mode, check=True, box_predictor_type=FastRCNNOutputLayers
4
2023-12-05 02:51:53+00:00
16k
u2seg/U2Seg
detectron2/data/build.py
[ { "identifier": "configurable", "path": "detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\n \"\"\"\n Decorate a function or a class's __init__ method so that it can be called\n with a :class:`CfgNode` object using a :func:`from_config` function that translates\n :class:`CfgNode` to arguments.\n\n Examples:\n ::\n # Usage 1: Decorator on __init__:\n class A:\n @configurable\n def __init__(self, a, b=2, c=3):\n pass\n\n @classmethod\n def from_config(cls, cfg): # 'cfg' must be the first argument\n # Returns kwargs to be passed to __init__\n return {\"a\": cfg.A, \"b\": cfg.B}\n\n a1 = A(a=1, b=2) # regular construction\n a2 = A(cfg) # construct with a cfg\n a3 = A(cfg, b=3, c=4) # construct with extra overwrite\n\n # Usage 2: Decorator on any function. Needs an extra from_config argument:\n @configurable(from_config=lambda cfg: {\"a: cfg.A, \"b\": cfg.B})\n def a_func(a, b=2, c=3):\n pass\n\n a1 = a_func(a=1, b=2) # regular call\n a2 = a_func(cfg) # call with a cfg\n a3 = a_func(cfg, b=3, c=4) # call with extra overwrite\n\n Args:\n init_func (callable): a class's ``__init__`` method in usage 1. The\n class must have a ``from_config`` classmethod which takes `cfg` as\n the first argument.\n from_config (callable): the from_config function in usage 2. It must take `cfg`\n as its first argument.\n \"\"\"\n\n if init_func is not None:\n assert (\n inspect.isfunction(init_func)\n and from_config is None\n and init_func.__name__ == \"__init__\"\n ), \"Incorrect use of @configurable. Check API documentation for examples.\"\n\n @functools.wraps(init_func)\n def wrapped(self, *args, **kwargs):\n try:\n from_config_func = type(self).from_config\n except AttributeError as e:\n raise AttributeError(\n \"Class with @configurable must have a 'from_config' classmethod.\"\n ) from e\n if not inspect.ismethod(from_config_func):\n raise TypeError(\"Class with @configurable must have a 'from_config' classmethod.\")\n\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)\n init_func(self, **explicit_args)\n else:\n init_func(self, *args, **kwargs)\n\n return wrapped\n\n else:\n if from_config is None:\n return configurable # @configurable() is made equivalent to @configurable\n assert inspect.isfunction(\n from_config\n ), \"from_config argument of configurable must be a function!\"\n\n def wrapper(orig_func):\n @functools.wraps(orig_func)\n def wrapped(*args, **kwargs):\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config, *args, **kwargs)\n return orig_func(**explicit_args)\n else:\n return orig_func(*args, **kwargs)\n\n wrapped.from_config = from_config\n return wrapped\n\n return wrapper" }, { "identifier": "BoxMode", "path": "detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr" }, { "identifier": "get_world_size", "path": "detectron2/utils/comm.py", "snippet": "def get_world_size() -> int:\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "seed_all_rng", "path": "detectron2/utils/env.py", "snippet": "def seed_all_rng(seed=None):\n \"\"\"\n Set the random seed for the RNG in torch, numpy and python.\n\n Args:\n seed (int): if None, will use a strong random seed.\n \"\"\"\n if seed is None:\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n logger = logging.getLogger(__name__)\n logger.info(\"Using a generated random seed {}\".format(seed))\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)" }, { "identifier": "PathManager", "path": "detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "_log_api_usage", "path": "detectron2/utils/logger.py", "snippet": "def _log_api_usage(identifier: str):\n \"\"\"\n Internal function used to log the usage of different detectron2 components\n inside facebook's infra.\n \"\"\"\n torch._C._log_api_usage_once(\"detectron2.\" + identifier)" }, { "identifier": "log_first_n", "path": "detectron2/utils/logger.py", "snippet": "def log_first_n(lvl, msg, n=1, *, name=None, key=\"caller\"):\n \"\"\"\n Log only for the first n times.\n\n Args:\n lvl (int): the logging level\n msg (str):\n n (int):\n name (str): name of the logger to use. Will use the caller's module by default.\n key (str or tuple[str]): the string(s) can be one of \"caller\" or\n \"message\", which defines how to identify duplicated logs.\n For example, if called with `n=1, key=\"caller\"`, this function\n will only log the first call from the same caller, regardless of\n the message content.\n If called with `n=1, key=\"message\"`, this function will log the\n same content only once, even if they are called from different places.\n If called with `n=1, key=(\"caller\", \"message\")`, this function\n will not log only if the same caller has logged the same message before.\n \"\"\"\n if isinstance(key, str):\n key = (key,)\n assert len(key) > 0\n\n caller_module, caller_key = _find_caller()\n hash_key = ()\n if \"caller\" in key:\n hash_key = hash_key + caller_key\n if \"message\" in key:\n hash_key = hash_key + (msg,)\n\n _LOG_COUNTER[hash_key] += 1\n if _LOG_COUNTER[hash_key] <= n:\n logging.getLogger(name or caller_module).log(lvl, msg)" }, { "identifier": "DatasetCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" }, { "identifier": "AspectRatioGroupedDataset", "path": "detectron2/data/common.py", "snippet": "class AspectRatioGroupedDataset(data.IterableDataset):\n \"\"\"\n Batch data that have similar aspect ratio together.\n In this implementation, images whose aspect ratio < (or >) 1 will\n be batched together.\n This improves training speed because the images then need less padding\n to form a batch.\n\n It assumes the underlying dataset produces dicts with \"width\" and \"height\" keys.\n It will then produce a list of original dicts with length = batch_size,\n all with similar aspect ratios.\n \"\"\"\n\n def __init__(self, dataset, batch_size):\n \"\"\"\n Args:\n dataset: an iterable. Each element must be a dict with keys\n \"width\" and \"height\", which will be used to batch data.\n batch_size (int):\n \"\"\"\n self.dataset = dataset\n self.batch_size = batch_size\n self._buckets = [[] for _ in range(2)]\n # Hard-coded two aspect ratio groups: w > h and w < h.\n # Can add support for more aspect ratio groups, but doesn't seem useful\n\n def __iter__(self):\n for d in self.dataset:\n w, h = d[\"width\"], d[\"height\"]\n bucket_id = 0 if w > h else 1\n bucket = self._buckets[bucket_id]\n bucket.append(d)\n if len(bucket) == self.batch_size:\n data = bucket[:]\n # Clear bucket first, because code after yield is not\n # guaranteed to execute\n del bucket[:]\n yield data" }, { "identifier": "DatasetFromList", "path": "detectron2/data/common.py", "snippet": "class DatasetFromList(data.Dataset):\n \"\"\"\n Wrap a list to a torch Dataset. It produces elements of the list as data.\n \"\"\"\n\n def __init__(\n self,\n lst: list,\n copy: bool = True,\n serialize: Union[bool, Callable] = True,\n ):\n \"\"\"\n Args:\n lst (list): a list which contains elements to produce.\n copy (bool): whether to deepcopy the element when producing it,\n so that the result can be modified in place without affecting the\n source in the list.\n serialize (bool or callable): whether to serialize the stroage to other\n backend. If `True`, the default serialize method will be used, if given\n a callable, the callable will be used as serialize method.\n \"\"\"\n self._lst = lst\n self._copy = copy\n if not isinstance(serialize, (bool, Callable)):\n raise TypeError(f\"Unsupported type for argument `serailzie`: {serialize}\")\n self._serialize = serialize is not False\n\n if self._serialize:\n serialize_method = (\n serialize\n if isinstance(serialize, Callable)\n else _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD\n )\n logger.info(f\"Serializing the dataset using: {serialize_method}\")\n self._lst = serialize_method(self._lst)\n\n def __len__(self):\n return len(self._lst)\n\n def __getitem__(self, idx):\n if self._copy and not self._serialize:\n return copy.deepcopy(self._lst[idx])\n else:\n return self._lst[idx]" }, { "identifier": "MapDataset", "path": "detectron2/data/common.py", "snippet": "class MapDataset(data.Dataset):\n \"\"\"\n Map a function over the elements in a dataset.\n \"\"\"\n\n def __init__(self, dataset, map_func):\n \"\"\"\n Args:\n dataset: a dataset where map function is applied. Can be either\n map-style or iterable dataset. When given an iterable dataset,\n the returned object will also be an iterable dataset.\n map_func: a callable which maps the element in dataset. map_func can\n return None to skip the data (e.g. in case of errors).\n How None is handled depends on the style of `dataset`.\n If `dataset` is map-style, it randomly tries other elements.\n If `dataset` is iterable, it skips the data and tries the next.\n \"\"\"\n self._dataset = dataset\n self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work\n\n self._rng = random.Random(42)\n self._fallback_candidates = set(range(len(dataset)))\n\n def __new__(cls, dataset, map_func):\n is_iterable = isinstance(dataset, data.IterableDataset)\n if is_iterable:\n return _MapIterableDataset(dataset, map_func)\n else:\n return super().__new__(cls)\n\n def __getnewargs__(self):\n return self._dataset, self._map_func\n\n def __len__(self):\n return len(self._dataset)\n\n def __getitem__(self, idx):\n retry_count = 0\n cur_idx = int(idx)\n\n while True:\n data = self._map_func(self._dataset[cur_idx])\n if data is not None:\n self._fallback_candidates.add(cur_idx)\n return data\n\n # _map_func fails for this idx, use a random new index from the pool\n retry_count += 1\n self._fallback_candidates.discard(cur_idx)\n cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]\n\n if retry_count >= 3:\n logger = logging.getLogger(__name__)\n logger.warning(\n \"Failed to apply `_map_func` for idx: {}, retry count: {}\".format(\n idx, retry_count\n )\n )" }, { "identifier": "ToIterableDataset", "path": "detectron2/data/common.py", "snippet": "class ToIterableDataset(data.IterableDataset):\n \"\"\"\n Convert an old indices-based (also called map-style) dataset\n to an iterable-style dataset.\n \"\"\"\n\n def __init__(\n self,\n dataset: data.Dataset,\n sampler: Sampler,\n shard_sampler: bool = True,\n shard_chunk_size: int = 1,\n ):\n \"\"\"\n Args:\n dataset: an old-style dataset with ``__getitem__``\n sampler: a cheap iterable that produces indices to be applied on ``dataset``.\n shard_sampler: whether to shard the sampler based on the current pytorch data loader\n worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple\n workers, it is responsible for sharding its data based on worker id so that workers\n don't produce identical data.\n\n Most samplers (like our TrainingSampler) do not shard based on dataloader worker id\n and this argument should be set to True. But certain samplers may be already\n sharded, in that case this argument should be set to False.\n shard_chunk_size: when sharding the sampler, each worker will\n \"\"\"\n assert not isinstance(dataset, data.IterableDataset), dataset\n assert isinstance(sampler, Sampler), sampler\n self.dataset = dataset\n self.sampler = sampler\n self.shard_sampler = shard_sampler\n self.shard_chunk_size = shard_chunk_size\n\n def __iter__(self):\n if not self.shard_sampler:\n sampler = self.sampler\n else:\n # With map-style dataset, `DataLoader(dataset, sampler)` runs the\n # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))`\n # will run sampler in every of the N worker. So we should only keep 1/N of the ids on\n # each worker. The assumption is that sampler is cheap to iterate so it's fine to\n # discard ids in workers.\n sampler = _shard_iterator_dataloader_worker(self.sampler, self.shard_chunk_size)\n for idx in sampler:\n yield self.dataset[idx]\n\n def __len__(self):\n return len(self.sampler)" }, { "identifier": "DatasetMapper", "path": "detectron2/data/dataset_mapper.py", "snippet": "class DatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by the model.\n\n This is the default callable to be used to map your dataset dict into training data.\n You may need to follow it to implement your own one for customized logic,\n such as a different way to read or transform images.\n See :doc:`/tutorials/data_loading` for details.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies cropping/geometric transforms to the image and annotations\n 3. Prepare data and annotations to Tensor and :class:`Instances`\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n use_instance_mask: bool = False,\n use_keypoint: bool = False,\n instance_mask_format: str = \"polygon\",\n keypoint_hflip_indices: Optional[np.ndarray] = None,\n precomputed_proposal_topk: Optional[int] = None,\n recompute_boxes: bool = False,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n use_instance_mask: whether to process instance segmentation annotations, if available\n use_keypoint: whether to process keypoint annotations if available\n instance_mask_format: one of \"polygon\" or \"bitmask\". Process instance segmentation\n masks into this format.\n keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`\n precomputed_proposal_topk: if given, will load pre-computed\n proposals from dataset_dict and keep the top k proposals for each image.\n recompute_boxes: whether to overwrite bounding box annotations\n by computing tight bounding boxes from instance mask annotations.\n \"\"\"\n if recompute_boxes:\n assert use_instance_mask, \"recompute_boxes requires instance masks\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = T.AugmentationList(augmentations)\n self.image_format = image_format\n self.use_instance_mask = use_instance_mask\n self.instance_mask_format = instance_mask_format\n self.use_keypoint = use_keypoint\n self.keypoint_hflip_indices = keypoint_hflip_indices\n self.proposal_topk = precomputed_proposal_topk\n self.recompute_boxes = recompute_boxes\n # fmt: on\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True):\n augs = utils.build_augmentation(cfg, is_train)\n if cfg.INPUT.CROP.ENABLED and is_train:\n augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))\n recompute_boxes = cfg.MODEL.MASK_ON\n else:\n recompute_boxes = False\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"use_instance_mask\": cfg.MODEL.MASK_ON,\n \"instance_mask_format\": cfg.INPUT.MASK_FORMAT,\n \"use_keypoint\": cfg.MODEL.KEYPOINT_ON,\n \"recompute_boxes\": recompute_boxes,\n }\n\n if cfg.MODEL.KEYPOINT_ON:\n ret[\"keypoint_hflip_indices\"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)\n\n if cfg.MODEL.LOAD_PROPOSALS:\n ret[\"precomputed_proposal_topk\"] = (\n cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN\n if is_train\n else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST\n )\n return ret\n\n def _transform_annotations(self, dataset_dict, transforms, image_shape):\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n if not self.use_instance_mask:\n anno.pop(\"segmentation\", None)\n if not self.use_keypoint:\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(\n obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices\n )\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n instances = utils.annotations_to_instances(\n annos, image_shape, mask_format=self.instance_mask_format\n )\n\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n if self.recompute_boxes:\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n dataset_dict[\"instances\"] = utils.filter_empty_instances(instances)\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n # USER: Write your own image loading if it's not from a file\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.image_format)\n utils.check_image_size(dataset_dict, image)\n\n # USER: Remove if you don't do semantic/panoptic segmentation.\n if \"sem_seg_file_name\" in dataset_dict:\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\"), \"L\").squeeze(2)\n else:\n sem_seg_gt = None\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n transforms = self.augmentations(aug_input)\n image, sem_seg_gt = aug_input.image, aug_input.sem_seg\n\n image_shape = image.shape[:2] # h, w\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n # USER: Remove if you don't use pre-computed proposals.\n # Most users would not need this feature.\n if self.proposal_topk is not None:\n utils.transform_proposals(\n dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk\n )\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n dataset_dict.pop(\"sem_seg_file_name\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n self._transform_annotations(dataset_dict, transforms, image_shape)\n\n return dataset_dict" }, { "identifier": "check_metadata_consistency", "path": "detectron2/data/detection_utils.py", "snippet": "def check_metadata_consistency(key, dataset_names):\n \"\"\"\n Check that the datasets have consistent metadata.\n\n Args:\n key (str): a metadata key\n dataset_names (list[str]): a list of dataset names\n\n Raises:\n AttributeError: if the key does not exist in the metadata\n ValueError: if the given datasets do not have the same metadata values defined by key\n \"\"\"\n if len(dataset_names) == 0:\n return\n logger = logging.getLogger(__name__)\n entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]\n for idx, entry in enumerate(entries_per_dataset):\n if entry != entries_per_dataset[0]:\n logger.error(\n \"Metadata '{}' for dataset '{}' is '{}'\".format(key, dataset_names[idx], str(entry))\n )\n logger.error(\n \"Metadata '{}' for dataset '{}' is '{}'\".format(\n key, dataset_names[0], str(entries_per_dataset[0])\n )\n )\n raise ValueError(\"Datasets have different metadata '{}'!\".format(key))" }, { "identifier": "InferenceSampler", "path": "detectron2/data/samplers/distributed_sampler.py", "snippet": "class InferenceSampler(Sampler):\n \"\"\"\n Produce indices for inference across all workers.\n Inference needs to run on the __exact__ set of samples,\n therefore when the total number of samples is not divisible by the number of workers,\n this sampler produces different number of samples on different workers.\n \"\"\"\n\n def __init__(self, size: int):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n \"\"\"\n self._size = size\n assert size > 0\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n self._local_indices = self._get_local_indices(size, self._world_size, self._rank)\n\n @staticmethod\n def _get_local_indices(total_size, world_size, rank):\n shard_size = total_size // world_size\n left = total_size % world_size\n shard_sizes = [shard_size + int(r < left) for r in range(world_size)]\n\n begin = sum(shard_sizes[:rank])\n end = min(sum(shard_sizes[: rank + 1]), total_size)\n return range(begin, end)\n\n def __iter__(self):\n yield from self._local_indices\n\n def __len__(self):\n return len(self._local_indices)" }, { "identifier": "RandomSubsetTrainingSampler", "path": "detectron2/data/samplers/distributed_sampler.py", "snippet": "class RandomSubsetTrainingSampler(TrainingSampler):\n \"\"\"\n Similar to TrainingSampler, but only sample a random subset of indices.\n This is useful when you want to estimate the accuracy vs data-number curves by\n training the model with different subset_ratio.\n \"\"\"\n\n def __init__(\n self,\n size: int,\n subset_ratio: float,\n shuffle: bool = True,\n seed_shuffle: Optional[int] = None,\n seed_subset: Optional[int] = None,\n ):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n subset_ratio (float): the ratio of subset data to sample from the underlying dataset\n shuffle (bool): whether to shuffle the indices or not\n seed_shuffle (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n seed_subset (int): the seed to randomize the subset to be sampled.\n Must be the same across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n super().__init__(size=size, shuffle=shuffle, seed=seed_shuffle)\n\n assert 0.0 < subset_ratio <= 1.0\n self._size_subset = int(size * subset_ratio)\n assert self._size_subset > 0\n if seed_subset is None:\n seed_subset = comm.shared_random_seed()\n self._seed_subset = int(seed_subset)\n\n # randomly generate the subset indexes to be sampled from\n g = torch.Generator()\n g.manual_seed(self._seed_subset)\n indexes_randperm = torch.randperm(self._size, generator=g)\n self._indexes_subset = indexes_randperm[: self._size_subset]\n\n logger.info(\"Using RandomSubsetTrainingSampler......\")\n logger.info(f\"Randomly sample {self._size_subset} data from the original {self._size} data\")\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed) # self._seed equals seed_shuffle from __init__()\n while True:\n if self._shuffle:\n # generate a random permutation to shuffle self._indexes_subset\n randperm = torch.randperm(self._size_subset, generator=g)\n yield from self._indexes_subset[randperm].tolist()\n else:\n yield from self._indexes_subset.tolist()" }, { "identifier": "RepeatFactorTrainingSampler", "path": "detectron2/data/samplers/distributed_sampler.py", "snippet": "class RepeatFactorTrainingSampler(Sampler):\n \"\"\"\n Similar to TrainingSampler, but a sample may appear more times than others based\n on its \"repeat factor\". This is suitable for training on class imbalanced datasets like LVIS.\n \"\"\"\n\n def __init__(self, repeat_factors, *, shuffle=True, seed=None):\n \"\"\"\n Args:\n repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's\n full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.\n shuffle (bool): whether to shuffle the indices or not\n seed (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n self._shuffle = shuffle\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n # Split into whole number (_int_part) and fractional (_frac_part) parts.\n self._int_part = torch.trunc(repeat_factors)\n self._frac_part = repeat_factors - self._int_part\n\n @staticmethod\n def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):\n \"\"\"\n Compute (fractional) per-image repeat factors based on category frequency.\n The repeat factor for an image is a function of the frequency of the rarest\n category labeled in that image. The \"frequency of category c\" in [0, 1] is defined\n as the fraction of images in the training set (without repeats) in which category c\n appears.\n See :paper:`lvis` (>= v2) Appendix B.2.\n\n Args:\n dataset_dicts (list[dict]): annotations in Detectron2 dataset format.\n repeat_thresh (float): frequency threshold below which data is repeated.\n If the frequency is half of `repeat_thresh`, the image will be\n repeated twice.\n\n Returns:\n torch.Tensor:\n the i-th element is the repeat factor for the dataset image at index i.\n \"\"\"\n # 1. For each category c, compute the fraction of images that contain it: f(c)\n category_freq = defaultdict(int)\n for dataset_dict in dataset_dicts: # For each image (without repeats)\n cat_ids = {ann[\"category_id\"] for ann in dataset_dict[\"annotations\"]}\n for cat_id in cat_ids:\n category_freq[cat_id] += 1\n num_images = len(dataset_dicts)\n for k, v in category_freq.items():\n category_freq[k] = v / num_images\n\n # 2. For each category c, compute the category-level repeat factor:\n # r(c) = max(1, sqrt(t / f(c)))\n category_rep = {\n cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))\n for cat_id, cat_freq in category_freq.items()\n }\n\n # 3. For each image I, compute the image-level repeat factor:\n # r(I) = max_{c in I} r(c)\n rep_factors = []\n for dataset_dict in dataset_dicts:\n cat_ids = {ann[\"category_id\"] for ann in dataset_dict[\"annotations\"]}\n rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)\n rep_factors.append(rep_factor)\n\n return torch.tensor(rep_factors, dtype=torch.float32)\n\n def _get_epoch_indices(self, generator):\n \"\"\"\n Create a list of dataset indices (with repeats) to use for one epoch.\n\n Args:\n generator (torch.Generator): pseudo random number generator used for\n stochastic rounding.\n\n Returns:\n torch.Tensor: list of dataset indices to use in one epoch. Each index\n is repeated based on its calculated repeat factor.\n \"\"\"\n # Since repeat factors are fractional, we use stochastic rounding so\n # that the target repeat factor is achieved in expectation over the\n # course of training\n rands = torch.rand(len(self._frac_part), generator=generator)\n rep_factors = self._int_part + (rands < self._frac_part).float()\n # Construct a list of indices in which we repeat images as specified\n indices = []\n for dataset_index, rep_factor in enumerate(rep_factors):\n indices.extend([dataset_index] * int(rep_factor.item()))\n return torch.tensor(indices, dtype=torch.int64)\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed)\n while True:\n # Sample indices with repeats determined by stochastic rounding; each\n # \"epoch\" may have a slightly different size due to the rounding.\n indices = self._get_epoch_indices(g)\n if self._shuffle:\n randperm = torch.randperm(len(indices), generator=g)\n yield from indices[randperm].tolist()\n else:\n yield from indices.tolist()" }, { "identifier": "TrainingSampler", "path": "detectron2/data/samplers/distributed_sampler.py", "snippet": "class TrainingSampler(Sampler):\n \"\"\"\n In training, we only care about the \"infinite stream\" of training data.\n So this sampler produces an infinite stream of indices and\n all workers cooperate to correctly shuffle the indices and sample different indices.\n\n The samplers in each worker effectively produces `indices[worker_id::num_workers]`\n where `indices` is an infinite stream of indices consisting of\n `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)\n or `range(size) + range(size) + ...` (if shuffle is False)\n\n Note that this sampler does not shard based on pytorch DataLoader worker id.\n A sampler passed to pytorch DataLoader is used only with map-style dataset\n and will not be executed inside workers.\n But if this sampler is used in a way that it gets execute inside a dataloader\n worker, then extra work needs to be done to shard its outputs based on worker id.\n This is required so that workers don't produce identical data.\n :class:`ToIterableDataset` implements this logic.\n This note is true for all samplers in detectron2.\n \"\"\"\n\n def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n shuffle (bool): whether to shuffle the indices or not\n seed (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n if not isinstance(size, int):\n raise TypeError(f\"TrainingSampler(size=) expects an int. Got type {type(size)}.\")\n if size <= 0:\n raise ValueError(f\"TrainingSampler(size=) expects a positive int. Got {size}.\")\n self._size = size\n self._shuffle = shuffle\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed)\n while True:\n if self._shuffle:\n yield from torch.randperm(self._size, generator=g).tolist()\n else:\n yield from torch.arange(self._size).tolist()" } ]
import itertools import logging import numpy as np import operator import pickle import torch import torch.utils.data as torchdata from collections import OrderedDict, defaultdict from typing import Any, Callable, Dict, List, Optional, Union from tabulate import tabulate from termcolor import colored from detectron2.config import configurable from detectron2.structures import BoxMode from detectron2.utils.comm import get_world_size from detectron2.utils.env import seed_all_rng from detectron2.utils.file_io import PathManager from detectron2.utils.logger import _log_api_usage, log_first_n from .catalog import DatasetCatalog, MetadataCatalog from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset from .dataset_mapper import DatasetMapper from .detection_utils import check_metadata_consistency from .samplers import ( InferenceSampler, RandomSubsetTrainingSampler, RepeatFactorTrainingSampler, TrainingSampler, )
12,621
if isinstance(dataset, torchdata.IterableDataset): assert sampler is None, "sampler must be None if dataset is IterableDataset" else: if sampler is None: sampler = TrainingSampler(len(dataset)) assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}" return build_batch_data_loader( dataset, sampler, total_batch_size, aspect_ratio_grouping=aspect_ratio_grouping, num_workers=num_workers, collate_fn=collate_fn, **kwargs ) def _test_loader_from_config(cfg, dataset_name, mapper=None): """ Uses the given `dataset_name` argument (instead of the names in cfg), because the standard practice is to evaluate each test set individually (not combining them). """ if isinstance(dataset_name, str): dataset_name = [dataset_name] dataset = get_detection_dataset_dicts( dataset_name, filter_empty=False, proposal_files=[ cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name ] if cfg.MODEL.LOAD_PROPOSALS else None, ) if mapper is None: mapper = DatasetMapper(cfg, False) return { "dataset": dataset, "mapper": mapper, "num_workers": cfg.DATALOADER.NUM_WORKERS, "sampler": InferenceSampler(len(dataset)) if not isinstance(dataset, torchdata.IterableDataset) else None, } @configurable(from_config=_test_loader_from_config) def build_detection_test_loader( dataset: Union[List[Any], torchdata.Dataset], *, mapper: Callable[[Dict[str, Any]], Any], sampler: Optional[torchdata.Sampler] = None, batch_size: int = 1, num_workers: int = 0, collate_fn: Optional[Callable[[List[Any]], Any]] = None, ) -> torchdata.DataLoader: """ Similar to `build_detection_train_loader`, with default batch size = 1, and sampler = :class:`InferenceSampler`. This sampler coordinates all workers to produce the exact set of all samples. Args: dataset: a list of dataset dicts, or a pytorch dataset (either map-style or iterable). They can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper: a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``. sampler: a sampler that produces indices to be applied on ``dataset``. Default to :class:`InferenceSampler`, which splits the dataset across all workers. Sampler must be None if `dataset` is iterable. batch_size: the batch size of the data loader to be created. Default to 1 image per worker since this is the standard when reporting inference time in papers. num_workers: number of parallel data loading workers collate_fn: same as the argument of `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of data. Returns: DataLoader: a torch DataLoader, that loads the given detection dataset, with test-time transformation and batching. Examples: :: data_loader = build_detection_test_loader( DatasetRegistry.get("my_test"), mapper=DatasetMapper(...)) # or, instantiate with a CfgNode: data_loader = build_detection_test_loader(cfg, "my_test") """ if isinstance(dataset, list): dataset = DatasetFromList(dataset, copy=False) if mapper is not None: dataset = MapDataset(dataset, mapper) if isinstance(dataset, torchdata.IterableDataset): assert sampler is None, "sampler must be None if dataset is IterableDataset" else: if sampler is None: sampler = InferenceSampler(len(dataset)) return torchdata.DataLoader( dataset, batch_size=batch_size, sampler=sampler, drop_last=False, num_workers=num_workers, collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, ) def trivial_batch_collator(batch): """ A batch collator that does nothing. """ return batch def worker_init_reset_seed(worker_id): initial_seed = torch.initial_seed() % 2**31
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains the default logic to build a dataloader for training or testing. """ __all__ = [ "build_batch_data_loader", "build_detection_train_loader", "build_detection_test_loader", "get_detection_dataset_dicts", "load_proposals_into_dataset", "print_instances_class_histogram", ] def filter_images_with_only_crowd_annotations(dataset_dicts): """ Filter out images with none annotations or only crowd annotations (i.e., images without non-crowd annotations). A common training-time preprocessing on COCO dataset. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format, but filtered. """ num_before = len(dataset_dicts) def valid(anns): for ann in anns: if ann.get("iscrowd", 0) == 0: return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with no usable annotations. {} images left.".format( num_before - num_after, num_after ) ) return dataset_dicts def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): """ Filter out images with too few number of keypoints. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format as dataset_dicts, but filtered. """ num_before = len(dataset_dicts) def visible_keypoints_in_image(dic): # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility annotations = dic["annotations"] return sum( (np.array(ann["keypoints"][2::3]) > 0).sum() for ann in annotations if "keypoints" in ann ) dataset_dicts = [ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image ] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with fewer than {} keypoints.".format( num_before - num_after, min_keypoints_per_image ) ) return dataset_dicts def load_proposals_into_dataset(dataset_dicts, proposal_file): """ Load precomputed object proposals into the dataset. The proposal file should be a pickled dict with the following keys: - "ids": list[int] or list[str], the image ids - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores corresponding to the boxes. - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file)) with PathManager.open(proposal_file, "rb") as f: proposals = pickle.load(f, encoding="latin1") # Rename the key names in D1 proposal files rename_keys = {"indexes": "ids", "scores": "objectness_logits"} for key in rename_keys: if key in proposals: proposals[rename_keys[key]] = proposals.pop(key) # Fetch the indexes of all proposals that are in the dataset # Convert image_id to str since they could be int. img_ids = set({str(record["image_id"]) for record in dataset_dicts}) id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS for record in dataset_dicts: # Get the index of the proposal i = id_to_index[str(record["image_id"])] boxes = proposals["boxes"][i] objectness_logits = proposals["objectness_logits"][i] # Sort the proposals in descending order of the scores inds = objectness_logits.argsort()[::-1] record["proposal_boxes"] = boxes[inds] record["proposal_objectness_logits"] = objectness_logits[inds] record["proposal_bbox_mode"] = bbox_mode return dataset_dicts def print_instances_class_histogram(dataset_dicts, class_names): """ Args: dataset_dicts (list[dict]): list of dataset dicts. class_names (list[str]): list of class names (zero-indexed). """ num_classes = len(class_names) hist_bins = np.arange(num_classes + 1) histogram = np.zeros((num_classes,), dtype=int) for entry in dataset_dicts: annos = entry["annotations"] classes = np.asarray( [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=int ) if len(classes): assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}" assert ( classes.max() < num_classes ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes" histogram += np.histogram(classes, bins=hist_bins)[0] N_COLS = min(6, len(class_names) * 2) def short_name(x): # make long class names shorter. useful for lvis if len(x) > 13: return x[:11] + ".." return x data = list( itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) ) total_num_instances = sum(data[1::2]) data.extend([None] * (N_COLS - (len(data) % N_COLS))) if num_classes > 1: data.extend(["total", total_num_instances]) data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) table = tabulate( data, headers=["category", "#instances"] * (N_COLS // 2), tablefmt="pipe", numalign="left", stralign="center", ) log_first_n( logging.INFO, "Distribution of instances among all {} categories:\n".format(num_classes) + colored(table, "cyan"), key="message", ) def get_detection_dataset_dicts( names, filter_empty=True, min_keypoints=0, proposal_files=None, check_consistency=True, ): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. check_consistency (bool): whether to check if datasets have consistent metadata. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names available_datasets = DatasetCatalog.keys() names_set = set(names) if not names_set.issubset(available_datasets): logger = logging.getLogger(__name__) logger.warning( "The following dataset names are not registered in the DatasetCatalog: " f"{names_set - available_datasets}. " f"Available datasets are {available_datasets}" ) dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names] if isinstance(dataset_dicts[0], torchdata.Dataset): if len(dataset_dicts) > 1: # ConcatDataset does not work for iterable style dataset. # We could support concat for iterable as well, but it's often # not a good idea to concat iterables anyway. return torchdata.ConcatDataset(dataset_dicts) return dataset_dicts[0] for dataset_name, dicts in zip(names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) if proposal_files is not None: assert len(names) == len(proposal_files) # load precomputed proposals from proposal files dataset_dicts = [ load_proposals_into_dataset(dataset_i_dicts, proposal_file) for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) ] dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) has_instances = "annotations" in dataset_dicts[0] if filter_empty and has_instances: dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) if min_keypoints > 0 and has_instances: dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) if check_consistency and has_instances: try: class_names = MetadataCatalog.get(names[0]).thing_classes check_metadata_consistency("thing_classes", names) print_instances_class_histogram(dataset_dicts, class_names) except AttributeError: # class names are not available for this dataset pass assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names)) return dataset_dicts def build_batch_data_loader( dataset, sampler, total_batch_size, *, aspect_ratio_grouping=False, num_workers=0, collate_fn=None, drop_last: bool = True, **kwargs, ): """ Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are: 1. support aspect ratio grouping options 2. use no "batch collation", because this is common for detection training Args: dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices. Must be provided iff. ``dataset`` is a map-style dataset. total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see :func:`build_detection_train_loader`. drop_last (bool): if ``True``, the dataloader will drop incomplete batches. Returns: iterable[list]. Length of each list is the batch size of the current GPU. Each element in the list comes from the dataset. """ world_size = get_world_size() assert ( total_batch_size > 0 and total_batch_size % world_size == 0 ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format( total_batch_size, world_size ) batch_size = total_batch_size // world_size logger = logging.getLogger(__name__) logger.info("Making batched data loader with batch_size=%d", batch_size) if isinstance(dataset, torchdata.IterableDataset): assert sampler is None, "sampler must be None if dataset is IterableDataset" else: dataset = ToIterableDataset(dataset, sampler, shard_chunk_size=batch_size) if aspect_ratio_grouping: assert drop_last, "Aspect ratio grouping will drop incomplete batches." data_loader = torchdata.DataLoader( dataset, num_workers=num_workers, collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements worker_init_fn=worker_init_reset_seed, **kwargs ) # yield individual mapped dict data_loader = AspectRatioGroupedDataset(data_loader, batch_size) if collate_fn is None: return data_loader return MapDataset(data_loader, collate_fn) else: return torchdata.DataLoader( dataset, batch_size=batch_size, drop_last=drop_last, num_workers=num_workers, collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, worker_init_fn=worker_init_reset_seed, **kwargs ) def _get_train_datasets_repeat_factors(cfg) -> Dict[str, float]: repeat_factors = cfg.DATASETS.TRAIN_REPEAT_FACTOR assert all(len(tup) == 2 for tup in repeat_factors) name_to_weight = defaultdict(lambda: 1, dict(repeat_factors)) # The sampling weights map should only contain datasets in train config unrecognized = set(name_to_weight.keys()) - set(cfg.DATASETS.TRAIN) assert not unrecognized, f"unrecognized datasets: {unrecognized}" logger = logging.getLogger(__name__) logger.info(f"Found repeat factors: {list(name_to_weight.items())}") # pyre-fixme[7]: Expected `Dict[str, float]` but got `DefaultDict[typing.Any, int]`. return name_to_weight def _build_weighted_sampler(cfg, enable_category_balance=False): dataset_repeat_factors = _get_train_datasets_repeat_factors(cfg) # OrderedDict to guarantee order of values() consistent with repeat factors dataset_name_to_dicts = OrderedDict( { name: get_detection_dataset_dicts( [name], filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) for name in cfg.DATASETS.TRAIN } ) # Repeat factor for every sample in the dataset repeat_factors = [ [dataset_repeat_factors[dsname]] * len(dataset_name_to_dicts[dsname]) for dsname in cfg.DATASETS.TRAIN ] repeat_factors = list(itertools.chain.from_iterable(repeat_factors)) repeat_factors = torch.tensor(repeat_factors) logger = logging.getLogger(__name__) if enable_category_balance: """ 1. Calculate repeat factors using category frequency for each dataset and then merge them. 2. Element wise dot producting the dataset frequency repeat factors with the category frequency repeat factors gives the final repeat factors. """ category_repeat_factors = [ RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( dataset_dict, cfg.DATALOADER.REPEAT_THRESHOLD ) for dataset_dict in dataset_name_to_dicts.values() ] # flatten the category repeat factors from all datasets category_repeat_factors = list(itertools.chain.from_iterable(category_repeat_factors)) category_repeat_factors = torch.tensor(category_repeat_factors) repeat_factors = torch.mul(category_repeat_factors, repeat_factors) repeat_factors = repeat_factors / torch.min(repeat_factors) logger.info( "Using WeightedCategoryTrainingSampler with repeat_factors={}".format( cfg.DATASETS.TRAIN_REPEAT_FACTOR ) ) else: logger.info( "Using WeightedTrainingSampler with repeat_factors={}".format( cfg.DATASETS.TRAIN_REPEAT_FACTOR ) ) sampler = RepeatFactorTrainingSampler(repeat_factors) return sampler def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): if dataset is None: dataset = get_detection_dataset_dicts( cfg.DATASETS.TRAIN, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0]) if mapper is None: mapper = DatasetMapper(cfg, True) if sampler is None: sampler_name = cfg.DATALOADER.SAMPLER_TRAIN logger = logging.getLogger(__name__) if isinstance(dataset, torchdata.IterableDataset): logger.info("Not using any sampler since the dataset is IterableDataset.") sampler = None else: logger.info("Using training sampler {}".format(sampler_name)) if sampler_name == "TrainingSampler": sampler = TrainingSampler(len(dataset)) elif sampler_name == "RepeatFactorTrainingSampler": repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( dataset, cfg.DATALOADER.REPEAT_THRESHOLD ) sampler = RepeatFactorTrainingSampler(repeat_factors) elif sampler_name == "RandomSubsetTrainingSampler": sampler = RandomSubsetTrainingSampler( len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO ) elif sampler_name == "WeightedTrainingSampler": sampler = _build_weighted_sampler(cfg) elif sampler_name == "WeightedCategoryTrainingSampler": sampler = _build_weighted_sampler(cfg, enable_category_balance=True) else: raise ValueError("Unknown training sampler: {}".format(sampler_name)) return { "dataset": dataset, "sampler": sampler, "mapper": mapper, "total_batch_size": cfg.SOLVER.IMS_PER_BATCH, "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING, "num_workers": cfg.DATALOADER.NUM_WORKERS, } @configurable(from_config=_train_loader_from_config) def build_detection_train_loader( dataset, *, mapper, sampler=None, total_batch_size, aspect_ratio_grouping=True, num_workers=0, collate_fn=None, **kwargs ): """ Build a dataloader for object detection with some default features. Args: dataset (list or torch.utils.data.Dataset): a list of dataset dicts, or a pytorch dataset (either map-style or iterable). It can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper (callable): a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices to be applied on ``dataset``. If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`, which coordinates an infinite random shuffle sequence across all workers. Sampler must be None if ``dataset`` is iterable. total_batch_size (int): total batch size across all workers. aspect_ratio_grouping (bool): whether to group images with similar aspect ratio for efficiency. When enabled, it requires each element in dataset be a dict with keys "width" and "height". num_workers (int): number of parallel data loading workers collate_fn: a function that determines how to do batching, same as the argument of `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of data. No collation is OK for small batch size and simple data structures. If your batch size is large and each sample contains too many small tensors, it's more efficient to collate them in data loader. Returns: torch.utils.data.DataLoader: a dataloader. Each output from it is a ``list[mapped_element]`` of length ``total_batch_size / num_workers``, where ``mapped_element`` is produced by the ``mapper``. """ if isinstance(dataset, list): dataset = DatasetFromList(dataset, copy=False) if mapper is not None: dataset = MapDataset(dataset, mapper) if isinstance(dataset, torchdata.IterableDataset): assert sampler is None, "sampler must be None if dataset is IterableDataset" else: if sampler is None: sampler = TrainingSampler(len(dataset)) assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}" return build_batch_data_loader( dataset, sampler, total_batch_size, aspect_ratio_grouping=aspect_ratio_grouping, num_workers=num_workers, collate_fn=collate_fn, **kwargs ) def _test_loader_from_config(cfg, dataset_name, mapper=None): """ Uses the given `dataset_name` argument (instead of the names in cfg), because the standard practice is to evaluate each test set individually (not combining them). """ if isinstance(dataset_name, str): dataset_name = [dataset_name] dataset = get_detection_dataset_dicts( dataset_name, filter_empty=False, proposal_files=[ cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name ] if cfg.MODEL.LOAD_PROPOSALS else None, ) if mapper is None: mapper = DatasetMapper(cfg, False) return { "dataset": dataset, "mapper": mapper, "num_workers": cfg.DATALOADER.NUM_WORKERS, "sampler": InferenceSampler(len(dataset)) if not isinstance(dataset, torchdata.IterableDataset) else None, } @configurable(from_config=_test_loader_from_config) def build_detection_test_loader( dataset: Union[List[Any], torchdata.Dataset], *, mapper: Callable[[Dict[str, Any]], Any], sampler: Optional[torchdata.Sampler] = None, batch_size: int = 1, num_workers: int = 0, collate_fn: Optional[Callable[[List[Any]], Any]] = None, ) -> torchdata.DataLoader: """ Similar to `build_detection_train_loader`, with default batch size = 1, and sampler = :class:`InferenceSampler`. This sampler coordinates all workers to produce the exact set of all samples. Args: dataset: a list of dataset dicts, or a pytorch dataset (either map-style or iterable). They can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper: a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``. sampler: a sampler that produces indices to be applied on ``dataset``. Default to :class:`InferenceSampler`, which splits the dataset across all workers. Sampler must be None if `dataset` is iterable. batch_size: the batch size of the data loader to be created. Default to 1 image per worker since this is the standard when reporting inference time in papers. num_workers: number of parallel data loading workers collate_fn: same as the argument of `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of data. Returns: DataLoader: a torch DataLoader, that loads the given detection dataset, with test-time transformation and batching. Examples: :: data_loader = build_detection_test_loader( DatasetRegistry.get("my_test"), mapper=DatasetMapper(...)) # or, instantiate with a CfgNode: data_loader = build_detection_test_loader(cfg, "my_test") """ if isinstance(dataset, list): dataset = DatasetFromList(dataset, copy=False) if mapper is not None: dataset = MapDataset(dataset, mapper) if isinstance(dataset, torchdata.IterableDataset): assert sampler is None, "sampler must be None if dataset is IterableDataset" else: if sampler is None: sampler = InferenceSampler(len(dataset)) return torchdata.DataLoader( dataset, batch_size=batch_size, sampler=sampler, drop_last=False, num_workers=num_workers, collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, ) def trivial_batch_collator(batch): """ A batch collator that does nothing. """ return batch def worker_init_reset_seed(worker_id): initial_seed = torch.initial_seed() % 2**31
seed_all_rng(initial_seed + worker_id)
3
2023-12-05 01:13:31+00:00
16k
upfusion3d/upfusion
scripts/run_distillation.py
[ { "identifier": "NeRFNetwork", "path": "external/nerf/network_grid.py", "snippet": "class NeRFNetwork(NeRFRenderer):\n def __init__(self, \n opt,\n num_layers=3,\n hidden_dim=64,\n num_layers_bg=2,\n hidden_dim_bg=64,\n ):\n \n super().__init__(opt)\n\n self.num_layers = num_layers\n self.hidden_dim = hidden_dim\n\n self.encoder, self.in_dim = get_encoder('tiledgrid', input_dim=3, log2_hashmap_size=16, desired_resolution=2048 * self.bound)\n\n self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True)\n\n # background network\n if self.bg_radius > 0:\n self.num_layers_bg = num_layers_bg \n self.hidden_dim_bg = hidden_dim_bg\n \n # use a very simple network to avoid it learning the prompt...\n # self.encoder_bg, self.in_dim_bg = get_encoder('tiledgrid', input_dim=2, num_levels=4, desired_resolution=2048)\n self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3)\n\n self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True)\n \n else:\n self.bg_net = None\n\n # add a density blob to the scene center\n def gaussian(self, x):\n # x: [B, N, 3]\n \n d = (x ** 2).sum(-1)\n g = 5 * torch.exp(-d / (2 * 0.2 ** 2))\n\n return g\n\n def common_forward(self, x):\n # x: [N, 3], in [-bound, bound]\n\n # sigma\n h = self.encoder(x, bound=self.bound)\n\n h = self.sigma_net(h)\n\n sigma = trunc_exp(h[..., 0] + self.gaussian(x))\n albedo = torch.sigmoid(h[..., 1:])\n\n return sigma, albedo\n \n # ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192\n def finite_difference_normal(self, x, epsilon=1e-2):\n # x: [N, 3]\n dx_pos, _ = self.common_forward((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dx_neg, _ = self.common_forward((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dy_pos, _ = self.common_forward((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dy_neg, _ = self.common_forward((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dz_pos, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound))\n dz_neg, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound))\n \n normal = torch.stack([\n 0.5 * (dx_pos - dx_neg) / epsilon, \n 0.5 * (dy_pos - dy_neg) / epsilon, \n 0.5 * (dz_pos - dz_neg) / epsilon\n ], dim=-1)\n\n return normal\n\n @torch.no_grad()\n def common_forward_smooth(self, x, radius=1e-2, k=5):\n # x: [N, 3], in [-bound, bound]\n\n x_repeat = x.unsqueeze(0).repeat(k, 1, 1)\n eps = torch.rand_like(x_repeat) * radius * 2 - radius\n x_sample = x_repeat + eps\n\n sigma_list, albedo_list = [], []\n for xi in range(len(x_sample)):\n # sigma\n h = self.encoder(x_sample[xi], bound=self.bound)\n\n h = self.sigma_net(h)\n\n sigma = trunc_exp(h[..., 0] + self.gaussian(x))\n albedo = torch.sigmoid(h[..., 1:])\n\n sigma_list.append(sigma)\n albedo_list.append(albedo)\n sigma = torch.stack(sigma_list, dim=0).mean(dim=0)\n albedo = torch.stack(albedo_list, dim=0).mean(dim=0)\n return sigma, albedo\n\n @torch.no_grad()\n def finite_difference_normal_smooth(self, x, epsilon=3e-1):\n # x: [N, 3]\n # eps 1e-2 | radius\n # eps 3e-1 | radius 3e-1\n radius = 3e-1\n k = 10\n dx_pos, _ = self.common_forward_smooth((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound), radius=radius, k=k)\n dx_neg, _ = self.common_forward_smooth((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound), radius=radius, k=k)\n dy_pos, _ = self.common_forward_smooth((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound), radius=radius, k=k)\n dy_neg, _ = self.common_forward_smooth((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound), radius=radius, k=k)\n dz_pos, _ = self.common_forward_smooth((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound), radius=radius, k=k)\n dz_neg, _ = self.common_forward_smooth((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound), radius=radius, k=k)\n \n normal = torch.stack([\n 0.5 * (dx_pos - dx_neg) / epsilon, \n 0.5 * (dy_pos - dy_neg) / epsilon, \n 0.5 * (dz_pos - dz_neg) / epsilon\n ], dim=-1)\n\n return normal\n\n\n def normal(self, x, smooth=False):\n \n if smooth:\n normal = self.finite_difference_normal_smooth(x)\n else:\n normal = self.finite_difference_normal(x)\n normal = safe_normalize(normal)\n normal[torch.isnan(normal)] = 0\n\n return normal\n\n \n def forward(self, x, d, l=None, ratio=1, shading='textureless'):\n # x: [N, 3], in [-bound, bound]\n # d: [N, 3], view direction, nomalized in [-1, 1]\n # l: [3], plane light direction, nomalized in [-1, 1]\n # ratio: scalar, ambient ratio, 1 == no shading (albedo only), 0 == only shading (textureless)\n\n if shading == 'albedo':\n # no need to query normal\n sigma, color = self.common_forward(x)\n normal = None\n \n else:\n # query normal\n\n sigma, albedo = self.common_forward(x)\n if shading == 'textureless' or shading == 'normal':\n normal = self.normal(x, smooth=True)\n else:\n normal = self.normal(x)\n\n # lambertian shading\n lambertian = ratio + (1 - ratio) * (normal @ -l).clamp(min=0) # [N,]\n\n if shading == 'textureless':\n color = lambertian.unsqueeze(-1).repeat(1, 3)*0.8 + .2\n elif shading == 'normal':\n color = (normal + 1) / 2\n else: # 'lambertian'\n color = albedo * lambertian.unsqueeze(-1)\n \n return sigma, color, normal\n\n \n def density(self, x):\n # x: [N, 3], in [-bound, bound]\n \n sigma, albedo = self.common_forward(x)\n \n return {\n 'sigma': sigma,\n 'albedo': albedo,\n }\n\n\n def background(self, d):\n\n h = self.encoder_bg(d) # [N, C]\n \n h = self.bg_net(h)\n\n # sigmoid activation for rgb\n rgbs = torch.sigmoid(h)\n\n return rgbs\n\n # optimizer utils\n def get_params(self, lr):\n\n params = [\n {'params': self.encoder.parameters(), 'lr': lr * 10},\n {'params': self.sigma_net.parameters(), 'lr': lr},\n ] \n\n if self.bg_radius > 0:\n params.append({'params': self.encoder_bg.parameters(), 'lr': lr * 10})\n params.append({'params': self.bg_net.parameters(), 'lr': lr})\n\n return params" }, { "identifier": "get_lpips_fn", "path": "external/sparsefusion_utils/common_utils.py", "snippet": "def get_lpips_fn():\n '''\n Return LPIPS function\n '''\n loss_fn_vgg = lpips.LPIPS(net='vgg')\n return loss_fn_vgg" }, { "identifier": "normalize", "path": "external/sparsefusion_utils/common_utils.py", "snippet": "def normalize(x):\n '''\n Normalize [0, 1] to [-1, 1]\n '''\n return torch.clip(x*2 - 1.0, -1.0, 1.0)" }, { "identifier": "unnormalize", "path": "external/sparsefusion_utils/common_utils.py", "snippet": "def unnormalize(x):\n '''\n Unnormalize [-1, 1] to [0, 1]\n '''\n return torch.clip((x + 1.0) / 2.0, 0.0, 1.0)" }, { "identifier": "init_ray_sampler", "path": "external/sparsefusion_utils/render_utils.py", "snippet": "def init_ray_sampler(gpu, img_h, img_w, min=0.1, max=4.0, bbox=None, n_pts_per_ray=128, n_rays=750, scale_factor=None):\n '''\n Construct ray samplers for torch-ngp\n\n Args:\n gpu (int): gpu id\n img_h (int): image height\n img_w (int): image width\n min (int): min depth for point along ray\n max (int): max depth for point along ray\n bbox (List): bounding box for monte carlo sampler\n n_pts_per_ray (int): number of points along a ray\n n_rays (int): number of rays for monte carlo sampler\n scale_factor (int): return a grid sampler at a scale factor\n \n Returns:\n sampler_grid (sampler): a grid sampler at full resolution\n sampler_mc (sampler): a monte carlo sampler\n sampler_feat (sampler): a grid sampler at scale factor resolution\n if scale factor is provided\n '''\n\n img_h, img_w = img_h, img_w\n volume_extent_world = max\n half_pix_width = 1.0 / img_w\n half_pix_height = 1.0 / img_h\n\n raysampler_grid = GridRaysampler(\n min_x=1.0 - half_pix_width,\n max_x=-1.0 + half_pix_width,\n min_y=1.0 - half_pix_height,\n max_y=-1.0 + half_pix_height,\n image_height=img_h,\n image_width=img_w,\n n_pts_per_ray=n_pts_per_ray,\n min_depth=min,\n max_depth=volume_extent_world,\n )\n if scale_factor is not None:\n img_h_ = int(img_h//scale_factor)\n img_w_ = int(img_w//scale_factor)\n half_pix_width_ = 1.0 / img_w_\n half_pix_height_ = 1.0 / img_h_\n raysampler_features = GridRaysampler(\n min_x=1.0 - half_pix_width_,\n max_x=-1.0 + half_pix_width_,\n min_y=1.0 - half_pix_height_,\n max_y=-1.0 + half_pix_height_,\n image_height=img_h_,\n image_width=img_w_,\n n_pts_per_ray=20,\n min_depth=min,\n max_depth=volume_extent_world,\n )\n if bbox is None:\n raysampler_mc = MonteCarloRaysampler(\n min_x = -1.0,\n max_x = 1.0,\n min_y = -1.0,\n max_y = 1.0,\n n_rays_per_image=n_rays,\n n_pts_per_ray=n_pts_per_ray,\n min_depth=min,\n max_depth=volume_extent_world,\n )\n elif bbox is not None:\n raysampler_mc = MonteCarloRaysampler(\n min_x = -bbox[0,1],\n max_x = -bbox[0,3],\n min_y = -bbox[0,0],\n max_y = -bbox[0,2],\n n_rays_per_image=n_rays,\n n_pts_per_ray=n_pts_per_ray,\n min_depth=min,\n max_depth=volume_extent_world,\n )\n\n if scale_factor is not None:\n return raysampler_grid, raysampler_mc, raysampler_features\n else:\n return raysampler_grid, raysampler_mc" }, { "identifier": "PerceptualLoss", "path": "external/sparsefusion_utils/external_utils.py", "snippet": "class PerceptualLoss(nn.Module):\n def __init__(self, net, device):\n super().__init__()\n self.model = lpips.LPIPS(net=net, verbose=False).to(device)\n self.device = device\n\n def get_device(self, default_device=None):\n \"\"\"\n Returns which device module is on, assuming all parameters are on the same GPU.\n \"\"\"\n try:\n return next(self.parameters()).device\n except StopIteration:\n return default_device\n\n def __call__(self, pred, target, normalize=True):\n \"\"\"\n Pred and target are Variables.\n If normalize is on, scales images between [-1, 1]\n Assumes the inputs are in range [0, 1].\n B 3 H W\n \"\"\"\n if pred.shape[1] != 3:\n pred = pred.permute(0, 3, 1, 2)\n target = target.permute(0, 3, 1, 2)\n # print(pred.shape, target.shape)\n if normalize:\n target = 2 * target - 1\n pred = 2 * pred - 1\n\n # temp_device = pred.device\n # device = self.get_device(temp_device)\n\n device = self.device\n\n pred = pred.to(device).float()\n target = target.to(device)\n dist = self.model.forward(pred, target)\n return dist.to(device)" }, { "identifier": "UpSRT", "path": "upsrt/model/model.py", "snippet": "class UpSRT(nn.Module):\n \"\"\"\n A \"scene\" represents a novel scene/object in 3D, and our input consists of multiple sparse views\n (num_input_views) corresponding to that scene. Aim is to form a scene embedding for input sparse\n view images and patch rays corresponding to these images. We pass the input images to a pre-trained\n feature extractor (could be anything, ViT or ResNet) to obtain patch embeddings of shape\n (num_input_views*P, D1). We also encode corresponding patch rays (corresponding to center pixel\n of each patch) of shape (num_input_views*P, D2). We concatenate the image and ray embeddings\n (num_input_views*P, D1+D2) and pass them to a transformer encoder to generate a scene encoding of\n dimensions - (num_input_views*P, D).\n\n The scene encoding from the transformer encoder is fed to another transformer decoder along with\n per-pixel query rays from a novel view point to generate novel view pixel values. We will then\n take a reconstruction loss between the predicted pixels and gt pixels.\n\n \"\"\"\n\n def __init__(self, cfg):\n super(UpSRT, self).__init__()\n\n self.num_pixel_queries = cfg.num_pixel_queries\n\n # Image patch feature extractor\n self.image_feature_dim = cfg.feature_extractor.image_feature_dim\n self.num_patches_x = cfg.feature_extractor.num_patches_x\n self.num_patches_y = cfg.feature_extractor.num_patches_y\n\n # Ray positional encoding args\n self.num_freqs = cfg.ray.num_freqs\n self.start_freq = cfg.ray.start_freq\n self.parameterize = cfg.ray.parameterize\n self.harmonic_embedding_dim = 2 * self.num_freqs * 6\n self.view_space = cfg.ray.view_space\n\n # Transformer encoder and decoder\n self.transformer_dim = cfg.transformer_dim\n self.scene_encoder = SceneEncoder(cfg.scene_encoder)\n self.ray_decoder = RayDecoder(cfg.ray_decoder)\n\n # self.linear_img_features = nn.Linear(self.image_feature_dim, self.transformer_dim)\n\n self.linear_scene = nn.Linear(\n self.image_feature_dim + self.harmonic_embedding_dim + 2*self.num_freqs + 2*self.num_freqs,\n self.transformer_dim\n )\n self.linear_query_pixel_rays = nn.Linear(self.harmonic_embedding_dim, self.transformer_dim)\n\n # stddev = 1.0 / math.sqrt(self.transformer_dim)\n # self.first_camera_enc = nn.Parameter(\n # data = torch.randn((1, 1, 1, self.transformer_dim)) * stddev\n # )\n # self.other_camera_enc = nn.Parameter(\n # data = torch.randn((1, 1, 1, self.transformer_dim)) * stddev\n # )\n # self.patch_enc = nn.Parameter(\n # data = torch.randn((1, 1, self.num_patches_x*self.num_patches_y, self.transformer_dim)) * stddev\n # )\n\n def forward(self, dino_features, input_cameras, query_pixel_rays, device, return_type=\"pred_rgb\"):\n \"\"\"\n Args:\n dino_features: (b, n, t, d)\n input_cameras: (list of list of cameras; list shape (B, num_input_views)).\n query_pixel_rays: (B, num_pixel_queries, 6) - note: (origin, direction) representation\n\n Returns:\n torch.Tensor: Predicted pixel values corresponding to query_pixel_rays of shape (B, num_pixel_queries, 3).\n \"\"\"\n n_views = dino_features.shape[1]\n\n # Scene latent representation\n scene_encoding = self.encode(device, dino_features, input_cameras) # (n_inp * patch, b, transformer_dim)\n\n pred_pixels = self.decode(\n scene_encoding, query_pixel_rays, input_cameras,\n device, return_type\n )\n return pred_pixels\n\n def encode(self, device, dino_features, input_cameras):\n \"\"\"\n Args:\n dino_features: (b, n, t, d)\n input_cameras: Input cameras corresponding to each provided view for the batch (list of list cameras; list shape (B, num_input_views)).\n\n Returns:\n torch.Tensor: Predicted pixel values corresponding to query_pixel_rays of shape (B, num_pixel_queries, 3).\n\n \"\"\"\n n_views = dino_features.shape[1]\n\n identity_cameras = self.create_cameras_with_identity_extrinsics(input_cameras)\n input_patch_rays = get_patch_rays(\n identity_cameras, num_patches_x=self.num_patches_x,\n num_patches_y=self.num_patches_y, device=device\n )\n\n # Convert to plucker and convert to harmonics embeddings\n input_patch_rays = positional_encoding(\n input_patch_rays, n_freqs=self.num_freqs,\n parameterize=self.parameterize, start_freq=self.start_freq\n ) # (b, n_inp, patch, pos_embedding_dim)\n\n patch_id_encoding = create_patch_id_encoding(\n dino_features.shape, num_patches=self.num_patches_x*self.num_patches_y,\n n_freqs=self.num_freqs, start_freq=self.start_freq\n ).to(device)\n\n camera_id_encoding = create_camera_id_encoding(\n dino_features.shape, num_patches=self.num_patches_x*self.num_patches_y,\n n_freqs=self.num_freqs, start_freq=self.start_freq\n ).to(device)\n\n # Concatenate input patch ray embeddings to image patch features\n scene_features = torch.cat(\n (dino_features, input_patch_rays, patch_id_encoding, camera_id_encoding), dim=-1\n ) # (b, n_inp, patch, img_feature_dim + pos_embedding_dim + 2*self.num_freqs + 2*self.num_freqs)\n\n # Project scene features to transformer dimensions\n scene_features = self.linear_scene(scene_features) # (b, n_inp, patch, transformer_dim)\n\n # Scene latent representation\n scene_encoding = self.scene_encoder(scene_features) # (b, n_inp * patch, transformer_dim)\n\n return scene_encoding\n\n def get_set_latent_representation(self, dino_features, input_cameras):\n\n scene_encoding = self.encode(\n device=dino_features.device, dino_features=dino_features,\n input_cameras=input_cameras\n )\n return scene_encoding\n\n def decode(\n self, scene_encoding, query_pixel_rays,\n input_cameras, device, return_type=\"pred_rgb\"\n ):\n\n # Convert query rays to view space if required.\n query_pixel_rays = self.convert_query_rays_to_view_space(\n device, input_cameras, query_pixel_rays\n )\n\n # Encode and project query rays to transformer dimensions\n query_pixel_rays = positional_encoding(\n query_pixel_rays, n_freqs=self.num_freqs,\n parameterize=self.parameterize, start_freq=self.start_freq\n ) # (b, num_pixel_queries, pos_embedding_dim)\n\n query_pixel_rays = self.linear_query_pixel_rays(query_pixel_rays) # (b, num_pixel_queries, transformer_dim)\n\n pred_pixels = self.ray_decoder(\n query_pixel_rays, scene_encoding, return_type = return_type\n )\n return pred_pixels\n\n def get_query_rays(self, query_cameras, image_size=None, query_ray_filter=None):\n\n if not self.training:\n raise RuntimeError(\"This function is only to be used during training.\")\n\n return get_random_query_pixel_rays(\n query_cameras, num_pixel_queries=self.num_pixel_queries, query_ray_filter=query_ray_filter,\n min_x=1, max_x=-1, min_y=1, max_y=-1,\n return_xys=True, device='cpu'\n )\n\n def infer(self, dino_features, input_cameras, query_cameras, image_size=None):\n \"\"\"Infers model for a given set of input views and the query view. Predicts the pixel values for all pixels (H*W) given the query view.\n Args:\n dino_features: (b, n, t, d)\n input_cameras(list[pytorch3d.renderer.cameras.CamerasBase]): List of Pytorch3D cameras of length (n_cameras,) corresponding to the\n input views.\n query_cameras(list[pytorch3d.renderer.cameras.CamerasBase]): List of Pytorch3D cameras of length (n_query_cameras,) corresponding to the\n query views.\n image_size(tuple[int, int]): Size of the image in pixels (height, width).\n\n Returns:\n torch.Tensor: Tensor of shape (n_query_cameras, H*W, 3) containing the predicted\n pixel values for each pixel in each query view.\n \"\"\"\n assert not self.training, \"Set model.eval() before calling infer\"\n with torch.no_grad():\n pred_pixels, _ = self.get_query_features(\n dino_features = dino_features, input_cameras = input_cameras,\n query_cameras = query_cameras, image_size = image_size,\n decoder_return_type = \"pred_rgb\"\n ) # (n_query_cameras, H*W, 3)\n\n return pred_pixels\n\n def get_query_features(\n self, dino_features, input_cameras, query_cameras, image_size,\n decoder_return_type, return_grid_rays=False, return_slt=False\n ):\n\n device = dino_features.device\n grid_rays, _ = get_grid_rays_gpu(\n query_cameras, image_size=image_size, min_x=1, max_x=-1,\n min_y=1, max_y=-1\n ) # grid_rays: (n_query_cameras, H*W, 6), and, xys: (n_query_cameras, H*W, 2)\n\n # Break the given number of query rays into reasonable batches (to avoid OOM)\n n_queries = self.num_pixel_queries\n num_query_batches = math.ceil(grid_rays.shape[1] / n_queries)\n pred_pixels_list = []\n\n scene_encoding = self.encode(device, dino_features, input_cameras)\n\n for i in range(num_query_batches):\n # Get grid rays corresponding to the current batch of pixels\n grid_rays_current_iter = grid_rays[:, i * n_queries:(i + 1) * n_queries] # (n_query_cameras, n_queries, 6)\n\n # Predict the pixel values for the given rays\n # NOTE: Removed input_indices requirement\n pred_pixels = self.decode(\n scene_encoding=scene_encoding, query_pixel_rays=grid_rays_current_iter,\n input_cameras=input_cameras, device=device, return_type=decoder_return_type\n ) # (n_query_cameras, n_queries, F)\n pred_pixels_list.append(pred_pixels)\n\n query_features = torch.cat(pred_pixels_list, dim=1) # (n_query_cameras, H*W, F)\n feature_dim = query_features.shape[-1]\n query_features = torch.reshape(query_features, (-1, *image_size, feature_dim))\n\n if return_grid_rays:\n plucker_grid_rays = get_plucker_parameterization(\n torch.reshape(grid_rays, (-1, *image_size, 6)).to(dino_features.device)\n )\n\n else:\n plucker_grid_rays = None\n\n if return_slt:\n output = (query_features, plucker_grid_rays, scene_encoding)\n\n else:\n output = (query_features, plucker_grid_rays)\n\n return output\n\n def convert_to_view_space(self, input_cameras, input_rays, query_rays):\n if not self.view_space:\n return input_rays, query_rays\n\n reference_cameras = [cameras[0] for cameras in input_cameras]\n reference_R = [camera.R.to(input_rays.device) for camera in reference_cameras] # List (length=batch_size) of Rs(shape: 1, 3, 3)\n reference_R = torch.cat(reference_R, dim=0) # (B, 3, 3)\n reference_T = [camera.T.to(input_rays.device) for camera in reference_cameras] # List (length=batch_size) of Ts(shape: 1, 3)\n reference_T = torch.cat(reference_T, dim=0) # (B, 3)\n input_rays = transform_rays(reference_R=reference_R, reference_T=reference_T, rays=input_rays)\n query_rays = transform_rays(reference_R=reference_R, reference_T=reference_T, rays=query_rays.unsqueeze(1)).squeeze(1)\n return input_rays, query_rays\n\n def convert_query_rays_to_view_space(self, device, input_cameras, query_rays):\n if not self.view_space:\n return query_rays\n\n reference_cameras = [cameras[0] for cameras in input_cameras]\n reference_R = [camera.R.to(device) for camera in reference_cameras] # List (length=batch_size) of Rs(shape: 1, 3, 3)\n reference_R = torch.cat(reference_R, dim=0) # (B, 3, 3)\n reference_T = [camera.T.to(device) for camera in reference_cameras] # List (length=batch_size) of Ts(shape: 1, 3)\n reference_T = torch.cat(reference_T, dim=0) # (B, 3)\n query_rays = transform_rays(reference_R=reference_R, reference_T=reference_T, rays=query_rays.unsqueeze(1)).squeeze(1)\n return query_rays\n\n def create_cameras_with_identity_extrinsics(self, input_cameras):\n\n identity_cameras = []\n for cameras in input_cameras:\n cur_list = []\n for camera in cameras:\n new_camera = PerspectiveCameras(\n R=torch.eye(3, dtype=torch.float32).unsqueeze(0),\n T=torch.zeros((1, 3), dtype=torch.float32),\n focal_length=camera.focal_length, principal_point=camera.principal_point,\n image_size=camera.image_size\n )\n cur_list.append(new_camera)\n identity_cameras.append(cur_list)\n\n return identity_cameras" }, { "identifier": "DINOv2KeyExtractor", "path": "dino/model/model.py", "snippet": "class DINOv2KeyExtractor(nn.Module):\n\n def __init__(self, cfg):\n\n super().__init__()\n self.cache = None\n self.model_key = cfg.model_key\n self.layer_name = cfg.layer_name\n\n self.model = torch.hub.load('facebookresearch/dinov2', self.model_key)\n self.model.eval()\n\n self.modules_dict = dict([*self.model.named_modules()])\n mod = self.modules_dict[self.layer_name]\n mod.register_forward_hook(self.create_hook(self.layer_name))\n\n self.dino_transforms = transforms.Compose([\n transforms.Resize(224, interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n ])\n\n def create_hook(self, layer_name):\n \"\"\"\n Creates a hook function for the given layer name that saves the output of\n that layer to self.cache\n \"\"\"\n def hook_fn(module, inp, out):\n\n # Shape details\n _, _, dim3 = out.shape\n dim = int(dim3 / 3)\n\n # Removing the CLS token and extracting only the key features\n # and storing it in self.cache\n self.cache = out[:, 1:, dim:(dim+dim)]\n\n return hook_fn\n\n def preprocess(self, x):\n \"\"\"\n Args:\n x : torch.Tensor with shape [B, C, H, W] and values in the range [-1, 1]\n \"\"\"\n unnorm_img = x * 0.5 + 0.5 # unnorm is in the range [0, 1]\n output = self.dino_transforms(unnorm_img)\n return output\n\n def get_key_features(self, x):\n \"\"\"\n Args:\n x : torch.Tensor with shape [B, C, H, W] and values in the range [-1, 1]\n \"\"\"\n _ = self.model(self.preprocess(x))\n return self.cache # (B, T, D)\n\n def forward(self, input_views):\n \"\"\"\n Args:\n input_views : torch.Tensor with shape [B, N, C, H, W] and values in the range [-1, 1]\n \"\"\"\n N = input_views.shape[1]\n reshaped = rearrange(input_views, \"b n c h w -> (b n) c h w\")\n\n key_feats_ = self.get_key_features(reshaped) # (B*N, T, K)\n key_feats = rearrange(key_feats_, \"(b n) t d -> b n t d\", n=N)\n\n return key_feats" }, { "identifier": "DiffusionPipelineCN", "path": "diffusion/pipeline_control_net.py", "snippet": "class DiffusionPipelineCN(nn.Module):\n\n def __init__(self, cfg, srt_model=None, dino_model=None):\n\n super().__init__()\n self.cfg = cfg\n self.control_net_model_config_path = self.cfg.control_net_model_config_path\n self.prompt_color = self.cfg.control_net_prompt_color\n self._setup_model()\n self.srt_model = srt_model\n self.dino_model = dino_model\n\n self.cond_type = self.cfg.cond_type\n if self.cond_type == \"DF\":\n self._create_batch_dict_fn = self._create_batch_dict_df\n self._maybe_dropout_condition_fn = self._maybe_dropout_condition_df\n\n elif self.cond_type == \"SLT\":\n self._create_batch_dict_fn = self._create_batch_dict_slt\n self._maybe_dropout_condition_fn = self._maybe_dropout_condition_slt\n\n elif self.cond_type == \"DF+SLT\":\n self._create_batch_dict_fn = self._create_batch_dict_dfslt\n self._maybe_dropout_condition_fn = self._maybe_dropout_condition_dfslt\n\n else:\n raise ValueError\n\n def _setup_model(self):\n\n model = create_model(self.cfg.control_net_model_config_path).cpu()\n model.sd_locked = self.cfg.control_net_sd_locked\n model.only_mid_control = False\n\n # if self.cfg.control_net_init_ckpt_path is not None:\n # model.load_state_dict(load_state_dict(self.cfg.control_net_init_ckpt_path, location='cpu'))\n # model.sd_locked = self.cfg.control_net_sd_locked\n # model.only_mid_control = False\n # else:\n # raise RuntimeError\n\n self.handle = model\n\n def to_device(self, device):\n self.dino_model = self.dino_model.to(device)\n self.srt_model = self.srt_model.to(device)\n self.handle = self.handle.to(device)\n\n def _get_text_prompt(self, batch_size, class_idxs=None):\n prompt = [f\"a high quality image with a {self.prompt_color} background\" for _ in range(batch_size)]\n\n return prompt\n\n def _get_null_text_prompt(self, batch_size):\n prompt = [\"\" for _ in range(batch_size)]\n return prompt\n\n def _create_batch_dict_df(self, clean_data, srt_cond, class_idxs=None):\n # NOTE: clean_data and cond_images must be channels last!\n batch = {\n \"jpg\": clean_data,\n \"txt\": self._get_text_prompt(len(clean_data), class_idxs),\n \"hint\": srt_cond, # srt_cond is cond_images\n }\n return batch\n\n def _create_batch_dict_slt(self, clean_data, srt_cond, class_idxs=None):\n # NOTE: clean_data must be channels last!\n slt, query_cameras = srt_cond\n batch = {\n \"jpg\": clean_data,\n \"txt\": self._get_text_prompt(len(clean_data), class_idxs),\n \"slt\": slt,\n \"query_cameras\": query_cameras,\n }\n return batch\n\n def _create_batch_dict_dfslt(self, clean_data, srt_cond, class_idxs=None):\n # NOTE: clean_data must be channels last!\n cond_images, slt, query_cameras = srt_cond\n batch = {\n \"jpg\": clean_data,\n \"txt\": self._get_text_prompt(len(clean_data), class_idxs),\n \"hint\": cond_images,\n \"slt\": slt,\n \"query_cameras\": query_cameras,\n }\n return batch\n\n def _maybe_dropout_condition_df(self, batch, cfg_seed, condition_dropout):\n\n # Logic inspired from https://github.com/cvlab-columbia/zero123/blob/main/zero123/ldm/models/diffusion/ddpm.py\n prompt, cond_images = batch[\"txt\"], batch[\"hint\"]\n random_number = torch.rand((), generator = torch.Generator().manual_seed(cfg_seed)).item()\n drop_prompt = random_number < (2 * condition_dropout)\n drop_condition = (random_number >= condition_dropout) and (random_number < (3 * condition_dropout))\n\n if drop_prompt:\n prompt = self._get_null_text_prompt(len(prompt))\n\n if drop_condition:\n cond_images = torch.zeros_like(cond_images)\n\n return batch\n\n def _maybe_dropout_condition_slt(self, batch, cfg_seed, condition_dropout):\n\n # Logic inspired from https://github.com/cvlab-columbia/zero123/blob/main/zero123/ldm/models/diffusion/ddpm.py\n prompt, slt, query_cameras = batch[\"txt\"], batch[\"slt\"], batch[\"query_cameras\"]\n random_number = torch.rand((), generator = torch.Generator().manual_seed(cfg_seed)).item()\n drop_prompt = random_number < (2 * condition_dropout)\n drop_condition = (random_number >= condition_dropout) and (random_number < (3 * condition_dropout))\n\n if drop_prompt:\n batch[\"txt\"] = self._get_null_text_prompt(len(prompt))\n\n if drop_condition:\n batch[\"slt\"] = torch.zeros_like(slt)\n batch[\"query_cameras\"] = [None for _ in range(len(query_cameras))]\n\n return batch\n\n def _maybe_dropout_condition_dfslt(self, batch, cfg_seed, condition_dropout):\n\n # Logic inspired from https://github.com/cvlab-columbia/zero123/blob/main/zero123/ldm/models/diffusion/ddpm.py\n prompt, slt, query_cameras, cond_images = batch[\"txt\"], batch[\"slt\"], batch[\"query_cameras\"], batch[\"hint\"]\n random_number = torch.rand((), generator = torch.Generator().manual_seed(cfg_seed)).item()\n drop_prompt = random_number < (2 * condition_dropout)\n drop_condition = (random_number >= condition_dropout) and (random_number < (3 * condition_dropout))\n\n if drop_prompt:\n batch[\"txt\"] = self._get_null_text_prompt(len(prompt))\n\n if drop_condition:\n batch[\"hint\"] = torch.zeros_like(cond_images)\n batch[\"slt\"] = torch.zeros_like(slt)\n batch[\"query_cameras\"] = [None for _ in range(len(query_cameras))]\n\n return batch\n\n def forward_with_loss(\n self, clean_data, srt_cond, class_idxs=None,\n enable_cfg=False, cfg_seed=None, condition_dropout=0.0\n ):\n clean_data_ = torch.permute(clean_data, (0, 2, 3, 1)).contiguous()\n batch = self._create_batch_dict_fn(clean_data_, srt_cond, class_idxs)\n\n if enable_cfg:\n batch = self._maybe_dropout_condition_fn(batch, cfg_seed, condition_dropout)\n\n # This should call the shared_step function implemented in the class LatentDiffusion via inheritance\n loss, _ = self.handle.shared_step(batch)\n return loss\n\n def forward_one_step_denoise(self, clean_data, cond_images, class_idxs=None):\n\n # NOTE: Does not perform CFG!\n clean_data_ = torch.permute(clean_data, (0, 2, 3, 1)).contiguous()\n batch = self._create_batch_dict(clean_data_, cond_images, class_idxs)\n\n # This should call the perform_one_step_denoise function implemented in the class LatentDiffusion via inheritance\n pred_latent, t = self.handle.perform_one_step_denoise(batch)\n decoded = self.handle.decode_first_stage(pred_latent)\n alpha_cumprod = self.handle.alphas_cumprod[t]\n return decoded, alpha_cumprod\n\n def _prepare_srt_cond_dict(self, srt_cond):\n\n if self.cond_type == \"DF\":\n cond_images = torch.permute(srt_cond, (0, 3, 1, 2)).contiguous()\n srt_cond_dict = {\n \"c_concat\": [cond_images]\n }\n elif self.cond_type == \"SLT\":\n slt, query_cameras = srt_cond\n srt_cond_dict = {\n \"slt\": slt,\n \"query_cameras\": query_cameras\n }\n elif self.cond_type == \"DF+SLT\":\n cond_images, slt, query_cameras = srt_cond\n cond_images = torch.permute(cond_images, (0, 3, 1, 2)).contiguous()\n srt_cond_dict = {\n \"hint\": cond_images,\n \"slt\": slt,\n \"query_cameras\": query_cameras\n }\n else:\n raise ValueError\n\n return srt_cond_dict\n\n def _prepare_srt_uncond_dict(self, srt_cond):\n\n if self.cond_type == \"DF\":\n cond_images = torch.permute(srt_cond, (0, 3, 1, 2)).contiguous()\n srt_uncond_dict = {\n \"c_concat\": [torch.zeros_like(cond_images)]\n }\n\n elif self.cond_type == \"SLT\":\n slt, query_cameras = srt_cond\n srt_uncond_dict = {\n \"slt\": torch.zeros_like(slt),\n \"query_cameras\": [None for _ in range(len(query_cameras))]\n }\n\n elif self.cond_type == \"DF+SLT\":\n cond_images, slt, query_cameras = srt_cond\n cond_images = torch.permute(cond_images, (0, 3, 1, 2)).contiguous()\n srt_uncond_dict = {\n \"hint\": torch.zeros_like(cond_images),\n \"slt\": torch.zeros_like(slt),\n \"query_cameras\": [None for _ in range(len(query_cameras))]\n }\n\n else:\n raise ValueError\n\n return srt_uncond_dict\n\n def _prepare_args_for_cfg(self, batch_size, cond_type, c_cross, srt_cond):\n\n # Preparing the cond variable\n if cond_type != \"F1\":\n raise ValueError(\"Not Supported.\")\n\n uc_cross = self.handle.get_unconditional_conditioning(batch_size)\n cond = {\n \"c_crossattn\": [c_cross],\n **self._prepare_srt_cond_dict(srt_cond)\n }\n uncond = {\n \"c_crossattn\": [uc_cross],\n **self._prepare_srt_uncond_dict(srt_cond)\n }\n\n return cond, uncond\n\n def forward_multi_step_denoise(\n self, clean_data, srt_cond, batch_size,\n unconditional_guidance_scale, cfg_type=\"F1\", class_idxs=None,\n t_start=None, t_end=None, ddim_eta=0.0, ddim_steps=30\n ):\n\n ddim_sampler = DDIMSampler(self.handle)\n ddim_sampler.make_schedule(ddim_num_steps=ddim_steps, ddim_eta=ddim_eta, verbose=False)\n\n t_start_ = 1 if t_start is None else t_start\n t_end_ = len(ddim_sampler.ddim_timesteps)-1 if t_end is None else t_end\n\n # Adding noise\n encoder_posterior = self.handle.encode_first_stage(clean_data)\n x = self.handle.get_first_stage_encoding(encoder_posterior).detach()\n t = torch.randint(\n t_start_, t_end_,\n (clean_data.shape[0],), device=self.handle.device\n ).long()\n noisy_data = ddim_sampler.stochastic_encode(x, t)\n\n text = self._get_text_prompt(batch_size, class_idxs)\n c_cross = self.handle.get_learned_conditioning(text)\n cond, uncond = self._prepare_args_for_cfg(batch_size, cfg_type, c_cross, srt_cond)\n\n denoised_data = ddim_sampler.decode(\n noisy_data, cond, t, unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uncond, cfg_type=cfg_type\n )\n decoded = self.handle.decode_first_stage(denoised_data)\n alpha_cumprod = ddim_sampler.ddim_alphas[t.item()]\n\n return decoded, alpha_cumprod\n\n def infer(\n self, srt_cond, batch_size, device, cfg_type,\n unconditional_guidance_scale, class_idxs=None\n ):\n \"\"\"\n v2 performs classifier free guidance\n \"\"\"\n ddim_eta = 0.0\n ddim_steps = 50\n\n if batch_size != 1:\n raise ValueError\n\n if unconditional_guidance_scale is None:\n raise ValueError\n\n text = self._get_text_prompt(batch_size, class_idxs)\n c_cross = self.handle.get_learned_conditioning(text)\n cond, uncond = self._prepare_args_for_cfg(batch_size, cfg_type, c_cross, srt_cond)\n\n infered_out = self.use_ddim_sampler(\n ddim_steps=ddim_steps, batch_size=batch_size, cond=cond,\n unconditional_guidance_scale=unconditional_guidance_scale,\n eta=ddim_eta, unconditional_conditioning=uncond,\n cfg_type=cfg_type\n )\n return infered_out\n\n def use_ddim_sampler(self, ddim_steps, batch_size, cond, **kwargs):\n\n ddim_sampler = DDIMSampler(self.handle)\n shape = (4, *self.cfg.query_feature_size)\n samples, _ = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs)\n infered_out = self.handle.decode_first_stage(samples)\n return infered_out" } ]
import os import cv2 import torch import imageio import argparse import warnings import numpy as np import matplotlib.pyplot as plt import torch.nn.functional as F from PIL import Image from tqdm import tqdm from einops import rearrange from omegaconf import OmegaConf from accelerate import Accelerator from torch.utils.data import DataLoader, Dataset from pytorch3d.renderer import PerspectiveCameras from pytorch3d.transforms import Transform3d from transformers import logging as transformers_logging from pytorch3d.renderer.cameras import look_at_view_transform from external.nerf.network_grid import NeRFNetwork from external.sparsefusion_utils.common_utils import get_lpips_fn, normalize, unnormalize from external.sparsefusion_utils.render_utils import init_ray_sampler from external.sparsefusion_utils.external_utils import PerceptualLoss from upsrt.model.model import UpSRT from dino.model.model import DINOv2KeyExtractor from diffusion.pipeline_control_net import DiffusionPipelineCN
13,543
return len(self.query_cameras) @staticmethod def collate_fn(batch): batched_cameras = concat_cameras([x["query_cameras"] for x in batch]) return_dict = { "slt": torch.cat([x["slt"] for x in batch], dim = 0), "cond_images": torch.cat([x["cond_images"] for x in batch], dim = 0), "query_rgb_256": torch.cat([x["query_rgb_256"] for x in batch], dim = 0), "query_cameras": batched_cameras, } return return_dict def __getitem__(self, idx): return_dict = { "slt": self.cache[idx]["slt"], "cond_images": self.cache[idx]["cond_images"], "query_rgb_256": self.cache[idx]["query_rgb_256"], "query_cameras": self.query_cameras[idx] } return return_dict ################################################################################# # Util Functions ################################################################################# def get_cfg(cfg_path, verbose=False): cfg = OmegaConf.load(cfg_path) if verbose: print(OmegaConf.to_yaml(cfg)) return cfg def save_image(path, tensor, unnorm = False): img = np.transpose(tensor.cpu().numpy(), (1, 2, 0)) if unnorm: img = img * 0.5 + 0.5 img = np.clip(img, 0.0, 1.0) Image.fromarray((img*255.0).astype(np.uint8)).save(path) def _collect_attr(cams, attr): return torch.cat([getattr(x, attr) for x in cams], dim = 0) def concat_cameras(list_of_cameras): concat_cameras = PerspectiveCameras( R=_collect_attr(list_of_cameras, "R"), T=_collect_attr(list_of_cameras, "T"), focal_length=_collect_attr(list_of_cameras, "focal_length"), principal_point=_collect_attr(list_of_cameras, "principal_point"), image_size=_collect_attr(list_of_cameras, "image_size"), ) return concat_cameras def batched_cameras_to_list(batched_cameras): cameras = [] for i in range(len(batched_cameras)): cam = PerspectiveCameras( R=batched_cameras.R[i:i+1], T=batched_cameras.T[i:i+1], focal_length=batched_cameras.focal_length[i:i+1], principal_point=batched_cameras.principal_point[i:i+1], image_size=batched_cameras.image_size[i:i+1], ) cameras.append(cam) return cameras def _prepare_condition(srt_model, dino_model, input_views, input_cameras, query_cameras): dino_features = dino_model(input_views) query_features, plucker_encoding, slt = srt_model.get_query_features( dino_features=dino_features, input_cameras=input_cameras, query_cameras=query_cameras, image_size=(32, 32), decoder_return_type = "pre_rgb_mlp_features", return_grid_rays = True, return_slt = True ) cond_images = torch.cat((query_features, plucker_encoding), dim = 3) return (cond_images.detach().cpu(), slt.detach().cpu()), dino_features def get_default_torch_ngp_opt(): ''' Return default options for torch-ngp ''' opt = argparse.Namespace() opt.cuda_ray = False opt.bg_radius = 0 opt.density_thresh = 10 opt.bound = 1 opt.min_near = 0.05 return opt ################################################################################# # Main Functions ################################################################################# def distillation_loop( gpu, cfg, args, opt, model_tuple, save_dir, seq_name, max_itr=3000, loss_fn_vgg=None, ): ################################################################################# # Setup ################################################################################# print("[***] Preparing training setup") lambda_opacity = 1e-3 lambda_entropy = 0.0 lambda_percep = 0.1 plot_log_freq = 20 img_log_freq = 100 hw_scale = 2.0 gradient_accumulation_steps = 1 sds_bootstrap_itrs = 300 start_percep_step = -1 bg_color_choice = 1.0 max_itr = 3000
################################################################################# # Util Classes ################################################################################# class ITWDataset(Dataset): def __init__(self, root): super().__init__() self.root = root image_size = [256, 256] focal_length = [4.5, 4.5] principal_point = [0.0, 0.0] files = sorted(os.listdir(self.root)) self.masks = [] self.images = [] for file in files: img_path = os.path.join(self.root, file) img = torch.tensor(self.load_image(img_path).astype(np.float32)/255.0) img = torch.permute(img, (2, 0, 1)).contiguous() img = img * 2.0 - 1.0 # (3, 256, 256) self.images.append(img) self.cameras = self.create_synth_cameras( num_cameras=250, focal_length=focal_length, principal_point=principal_point, image_size=image_size, inference=False ) self.inference_cameras = self.create_synth_cameras( num_cameras=32, focal_length=focal_length, principal_point=principal_point, image_size=image_size, inference=True ) self.images = torch.stack(self.images, dim=0) # (N, 3, 256, 256) self.input_cameras = self.create_cameras_with_identity_extrinsics( num_cameras = len(self.images), focal_length = focal_length, principal_point = principal_point, image_size = image_size, ) @staticmethod def load_image(path): x = cv2.imread(path, 1) x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB) return x @staticmethod def create_synth_cameras( num_cameras, focal_length, principal_point, image_size, sample_distance=False, inference=False ): base_elevation = 22.5 principal_point_ = torch.tensor([principal_point]) focal_length_ = torch.tensor([focal_length]) image_size_ = torch.tensor([image_size]) synth_cameras, w1_ci_TFs = [], [] distance_choices = [1.5, 1.0, 0.5] min_ele, max_ele = base_elevation - 30.0, base_elevation + 30.0 if inference: azimuths_ = torch.linspace(0.0, 360.0, num_cameras+1) elevations_ = torch.ones((num_cameras+1,)) * base_elevation azimuths_, elevations_ = azimuths_[:-1], elevations_[:-1] else: azimuths_, elevations_ = None, None # Setting up W1 to Ci transforms for i in range(num_cameras): if inference: distance_choice = 1.0 azimuth_choice = azimuths_[i] elevation_choice = elevations_[i] else: if i == 0: azimuth_choice = 0.0 elevation_choice = base_elevation distance_choice = 1.0 else: azimuth_choice = np.random.random() * 360.0 elevation_choice = np.random.random() * (max_ele - min_ele) + min_ele if sample_distance: distance_choice = distance_choices[np.random.randint(0, 3)] else: distance_choice = 1.0 R, T = look_at_view_transform( dist = distance_choice, elev = elevation_choice, azim = azimuth_choice, degrees = True ) w1_ci_TF_ = torch.eye(4).unsqueeze(0) w1_ci_TF_[:, :3, :3] = R w1_ci_TF_[:, 3, :3] = T w1_ci_TF = Transform3d(matrix = w1_ci_TF_) w1_ci_TFs.append(w1_ci_TF) # Location of camera corresponding to 1st image in the W2 system w2_c1_TF_ = torch.eye(4).unsqueeze(0) w2_c1_TF_[0, 3, :3] = torch.tensor([0.0, 0.0, 1.0]) w2_c1_TF = Transform3d(matrix = w2_c1_TF_) # Location of camera corresponding to 1st image in the W1 system (how? because we defined it to be so!) w1_c1_TF = w1_ci_TFs[0] # Calculating W2 to W1 transform w2_w1_TF = w2_c1_TF.compose(w1_c1_TF.inverse()) # Re-calculating cameras such that every camera uses W2 to Ci transform for i in range(num_cameras): w1_ci_TF = w1_ci_TFs[i] w2_ci_TF = w2_w1_TF.compose(w1_ci_TF) w2_ci_TF_ = w2_ci_TF.get_matrix() new_R = w2_ci_TF_[:, :3, :3] # (1, 3, 3) new_T = w2_ci_TF_[:, 3, :3] # (1, 3) camera = PerspectiveCameras( R=new_R, T=new_T, focal_length=focal_length_, principal_point=principal_point_, image_size=image_size_ ) synth_cameras.append(camera) return synth_cameras @staticmethod def create_cameras_with_identity_extrinsics(num_cameras, focal_length, principal_point, image_size): cameras = [] principal_point_ = torch.tensor([principal_point]) focal_length_ = torch.tensor([focal_length]) image_size_ = torch.tensor([image_size]) for i in range(num_cameras): camera = PerspectiveCameras( R=torch.eye(3, dtype=torch.float32).unsqueeze(0), T=torch.zeros((1, 3), dtype=torch.float32), focal_length=focal_length_, principal_point=principal_point_, image_size=image_size_ ) cameras.append(camera) return cameras def __getitem__(self, idx): return (self.images, self.cameras, self.input_cameras, self.inference_cameras) class CachedQueryDataset(torch.utils.data.Dataset): def __init__(self, query_cameras, cache): self.query_cameras = query_cameras[0] self.cache = cache def __len__(self): return len(self.query_cameras) @staticmethod def collate_fn(batch): batched_cameras = concat_cameras([x["query_cameras"] for x in batch]) return_dict = { "slt": torch.cat([x["slt"] for x in batch], dim = 0), "cond_images": torch.cat([x["cond_images"] for x in batch], dim = 0), "query_rgb_256": torch.cat([x["query_rgb_256"] for x in batch], dim = 0), "query_cameras": batched_cameras, } return return_dict def __getitem__(self, idx): return_dict = { "slt": self.cache[idx]["slt"], "cond_images": self.cache[idx]["cond_images"], "query_rgb_256": self.cache[idx]["query_rgb_256"], "query_cameras": self.query_cameras[idx] } return return_dict ################################################################################# # Util Functions ################################################################################# def get_cfg(cfg_path, verbose=False): cfg = OmegaConf.load(cfg_path) if verbose: print(OmegaConf.to_yaml(cfg)) return cfg def save_image(path, tensor, unnorm = False): img = np.transpose(tensor.cpu().numpy(), (1, 2, 0)) if unnorm: img = img * 0.5 + 0.5 img = np.clip(img, 0.0, 1.0) Image.fromarray((img*255.0).astype(np.uint8)).save(path) def _collect_attr(cams, attr): return torch.cat([getattr(x, attr) for x in cams], dim = 0) def concat_cameras(list_of_cameras): concat_cameras = PerspectiveCameras( R=_collect_attr(list_of_cameras, "R"), T=_collect_attr(list_of_cameras, "T"), focal_length=_collect_attr(list_of_cameras, "focal_length"), principal_point=_collect_attr(list_of_cameras, "principal_point"), image_size=_collect_attr(list_of_cameras, "image_size"), ) return concat_cameras def batched_cameras_to_list(batched_cameras): cameras = [] for i in range(len(batched_cameras)): cam = PerspectiveCameras( R=batched_cameras.R[i:i+1], T=batched_cameras.T[i:i+1], focal_length=batched_cameras.focal_length[i:i+1], principal_point=batched_cameras.principal_point[i:i+1], image_size=batched_cameras.image_size[i:i+1], ) cameras.append(cam) return cameras def _prepare_condition(srt_model, dino_model, input_views, input_cameras, query_cameras): dino_features = dino_model(input_views) query_features, plucker_encoding, slt = srt_model.get_query_features( dino_features=dino_features, input_cameras=input_cameras, query_cameras=query_cameras, image_size=(32, 32), decoder_return_type = "pre_rgb_mlp_features", return_grid_rays = True, return_slt = True ) cond_images = torch.cat((query_features, plucker_encoding), dim = 3) return (cond_images.detach().cpu(), slt.detach().cpu()), dino_features def get_default_torch_ngp_opt(): ''' Return default options for torch-ngp ''' opt = argparse.Namespace() opt.cuda_ray = False opt.bg_radius = 0 opt.density_thresh = 10 opt.bound = 1 opt.min_near = 0.05 return opt ################################################################################# # Main Functions ################################################################################# def distillation_loop( gpu, cfg, args, opt, model_tuple, save_dir, seq_name, max_itr=3000, loss_fn_vgg=None, ): ################################################################################# # Setup ################################################################################# print("[***] Preparing training setup") lambda_opacity = 1e-3 lambda_entropy = 0.0 lambda_percep = 0.1 plot_log_freq = 20 img_log_freq = 100 hw_scale = 2.0 gradient_accumulation_steps = 1 sds_bootstrap_itrs = 300 start_percep_step = -1 bg_color_choice = 1.0 max_itr = 3000
perceptual_loss = PerceptualLoss('vgg', device=f'cuda:{gpu}')
5
2023-12-12 00:49:11+00:00
16k
nox-410/tvm.tl
python/tvm/topi/arm_cpu/conv2d_gemm.py
[ { "identifier": "get_const_tuple", "path": "python/tvm/topi/utils.py", "snippet": "def get_const_tuple(in_tuple):\n \"\"\"Verifies input tuple is IntImm or Var, returns tuple of int or Var.\n\n Parameters\n ----------\n in_tuple : tuple of Expr\n The input.\n\n Returns\n -------\n out_tuple : tuple of int\n The output.\n \"\"\"\n ret = []\n ana = None\n for elem in in_tuple:\n if isinstance(elem, (tvm.tir.Var, tvm.tir.expr.Any)):\n ret.append(elem)\n elif not isinstance(elem, (tvm.tir.IntImm, int)):\n ana = tvm.arith.Analyzer() if ana is None else ana\n elem = ana.simplify(elem)\n if not isinstance(elem, tvm.tir.IntImm):\n ret.append(elem)\n else:\n ret.append(get_const_int(elem))\n else:\n ret.append(get_const_int(elem))\n return tuple(ret)" }, { "identifier": "get_const_int", "path": "python/tvm/topi/utils.py", "snippet": "def get_const_int(expr):\n \"\"\"Verifies expr is integer and get the constant value.\n\n Parameters\n ----------\n expr : tvm.Expr or int\n The input expression.\n\n Returns\n -------\n out_value : int\n The output.\n \"\"\"\n if isinstance(expr, Integral):\n return expr\n if not isinstance(expr, tvm.tir.IntImm):\n ana = tvm.arith.Analyzer()\n expr = ana.simplify(expr)\n if not isinstance(expr, tvm.tir.IntImm):\n raise ValueError(\"Expect value to be constant int\")\n return int(expr.value)" }, { "identifier": "get_pad_tuple", "path": "python/tvm/topi/nn/utils.py", "snippet": "def get_pad_tuple(padding, kernel):\n \"\"\"Common code to get the pad option\n\n Parameters\n ----------\n padding : int or str\n Padding size, or ['VALID', 'SAME']\n\n kernel : tuple of int\n Conv kernel size\n\n Returns\n -------\n pad_top : int\n Padding size on top\n\n pad_left : int\n Padding size on left\n\n pad_down : int\n Padding size on down.\n\n pad_right : int\n Padding size on right.\n \"\"\"\n # compute the padding size\n if isinstance(padding, (tuple, list)):\n if len(padding) == 2:\n pad_h = padding[0] * 2\n pad_w = padding[1] * 2\n elif len(padding) == 4:\n return padding[0], padding[1], padding[2], padding[3]\n else:\n raise ValueError(\"Size of padding can only be 2 or 4\")\n elif isinstance(padding, int):\n pad_h = pad_w = padding * 2\n elif padding == \"VALID\":\n pad_h = 0\n pad_w = 0\n elif padding == \"SAME\":\n pad_h = kernel[0] - 1\n pad_w = kernel[1] - 1\n else:\n raise ValueError(f\"Unknown padding option {padding}\")\n pad_top = (pad_h + 1) // 2\n pad_left = (pad_w + 1) // 2\n return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left" }, { "identifier": "gemm_4x4_int8_int8_int32", "path": "python/tvm/topi/arm_cpu/tensor_intrin.py", "snippet": "def gemm_4x4_int8_int8_int32(M, N, K, unroll, in_type):\n \"\"\"\n Int8 4x4 matrix multiplication and accumulation using a sequence of\n umull -> uadalp -> umull2 -> uadalp instructions. This function\n takes two arrays of int8 data type A[4][K] and B[4][K], and produces\n a 4x4 matrix which is equal to A*B'.\n\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void gemm_4x4_int8_int8_int32(int8 A[4][K], int8 B[4][K], int32 C[4][4]){\n for (int i = 0; i < 4; i++){\n for (int j = 0; j < 4; j++){\n for (int k = 0; k < K; k++){\n C[i][j] += A[i][k] * B[j][k]\n }\n }\n }\n\n Notes:\n * The tiling strategy is picked to maximize register usage.\n\n Parameters\n ----------\n M : int\n rows of the matrix A\n N : int\n columns of the matrix B\n K : int\n columns of matrix A\n unroll : bool\n Unroll the loop accumulation if True\n in_type : str, {'uint8', 'int8'}\n\n Returns\n -------\n intrin : TensorIntrin\n The ARM uint8/int8 TensorIntrin that can be used in tensorizing schedule\n \"\"\"\n assert in_type in [\"uint8\", \"int8\"]\n A = te.placeholder((K // 16, te.var(\"m\"), 16), dtype=in_type, name=\"A\")\n B = te.placeholder((K // 16, te.var(\"n\"), 16), dtype=in_type, name=\"B\")\n dtype_vec = in_type + \"x16\"\n idxm = tvm.tir.indexmod\n\n k = te.reduce_axis((0, K), \"k\")\n C = te.compute(\n (te.var(\"m\"), te.var(\"n\")),\n lambda x, y: te.sum(\n A[k // 16, x, idxm(k, 16)].astype(\"int32\") * B[k // 16, y, idxm(k, 16)].astype(\"int32\"),\n axis=k,\n ),\n name=\"C\",\n )\n\n a_buffer = tvm.tir.decl_buffer(\n A.shape,\n dtype=in_type,\n name=\"a_buffer\",\n offset_factor=1,\n strides=[te.var(\"sa_1\"), te.var(\"sa_2\"), 1],\n )\n\n b_buffer = tvm.tir.decl_buffer(\n B.shape,\n dtype=in_type,\n name=\"b_buffer\",\n offset_factor=1,\n strides=[te.var(\"sb_1\"), te.var(\"sb_2\"), 1],\n )\n\n c_buffer = tvm.tir.decl_buffer(\n C.shape, dtype=\"int32\", name=\"c_buffer\", offset_factor=1, strides=[te.var(\"sc\"), 1]\n )\n\n # Intrinsics used in the following algorithm\n umull_intrin = \"llvm.aarch64.neon.umull\" if in_type == \"uint8\" else \"llvm.aarch64.neon.smull\"\n uaddlp_intrin = \"llvm.aarch64.neon.uaddlp\" if in_type == \"uint8\" else \"llvm.aarch64.neon.saddlp\"\n addp_intrin = \"llvm.aarch64.neon.addp\"\n\n def uadalp(a, b):\n \"\"\"Add pair and accumulate\n\n Parameters:\n ----------\n a: int16x8 vector\n b: int16x8 vector\n\n Returns:\n --------\n return a int32x4 vector\n\n Pseudocode:\n ----------\n a += (b0+b1, b2+b3, b4+b5, b6+b7)\n \"\"\"\n\n return a + tvm.tir.call_llvm_pure_intrin(\n \"int32x4\", uaddlp_intrin, tvm.tir.const(1, \"uint32\"), b\n )\n\n def umull(a, b):\n \"\"\"Multiply long (higher part)\n\n Parameters:\n ----------\n a: int8x16 vector\n b: int8x16 vector\n\n Returns:\n --------\n return a int16x8 vector\n\n Pseudocode:\n ----------\n c = (a0*b0, a1*b1, a2*b2, a3*b3, a4*b4, a5*b5, a6*b6, a7*b7)\n \"\"\"\n a_high = tvm.tir.call_intrin(\"int8x8\", \"tir.vectorhigh\", a)\n b_high = tvm.tir.call_intrin(\"int8x8\", \"tir.vectorhigh\", b)\n c = tvm.tir.call_llvm_pure_intrin(\n \"int16x8\", umull_intrin, tvm.tir.const(2, \"uint32\"), a_high, b_high\n )\n return c\n\n def umull2(a, b):\n \"\"\"Multiply long (lower part)\n\n Parameters:\n ----------\n a: int8x16 vector\n b: int8x16 vector\n\n Returns:\n --------\n return a int16x8 vector\n\n Pseudocode:\n ----------\n c = (a8*b8, a9*b9, a10*b10, a11*b11, a12*b12, a13*b13, a14*b14, a15*b15)\n \"\"\"\n a_low = tvm.tir.call_intrin(\"int8x8\", \"tir.vectorlow\", a)\n b_low = tvm.tir.call_intrin(\"int8x8\", \"tir.vectorlow\", b)\n c = tvm.tir.call_llvm_pure_intrin(\n \"int16x8\", umull_intrin, tvm.tir.const(2, \"uint32\"), a_low, b_low\n )\n return c\n\n def addp(a, b):\n \"\"\"Add two vectors in pairs\n\n Parameters:\n ----------\n a: int32x4 vector\n b: int32x4 vector\n\n Returns:\n --------\n return a int32x4 vector\n\n Pseudocode:\n ----------\n c = (a0+a1, a2+a3, b0+b1, b0+b3)\n \"\"\"\n return tvm.tir.call_llvm_pure_intrin(\n \"int32x4\", addp_intrin, tvm.tir.const(2, \"uint32\"), a, b\n )\n\n def accumulation_loop(M, N, ins, acc, tile_idx):\n \"\"\"Internal tile accumulation. This function\n takes two arrays of int8 data type A[tile_idx][4][16] and B[tile_idx][4][16], produces\n a 4x4 matrix which is equal to A*B' and accumulates into C[4][4]\n\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void gemm_4x4_int8_int8_int32(int8 A[tile_idx][4][K],\n int8 B[tile_idx][4][K],\n int32 C[4][4]){\n for (int i = 0; i < 4; i++){\n for (int j = 0; j < 4; j++){\n for (int k = 0; k < 16; k++){\n C[i][j] += A[tile_idx][i][k] * B[tile_idx][j][k]\n }\n }\n }\n\n Notes:\n * The tiling strategy is picked to maximize register usage.\n\n Parameters:\n ----------\n M : int\n Number of total rows of the output matrix\n N : int\n Number of total columns of the output matrix\n ins : list of tvm.tir.buffer\n Input buffers\n acc : tvm.tir.ir_builder.BufferVar\n Bank of register accumulators\n tiled_idx : int\n Index of a sub-tile of A and B in A[tile_idx][:][:] and B[tile_idx][:][:].\n Please note that 0 <= tile_idx <= K//16\n\n \"\"\"\n a0 = ins[0].vload([tile_idx, 0, 0], dtype_vec)\n a1 = tvm.tir.const(0, \"int8x16\")\n if M > 1:\n a1 = ins[0].vload([tile_idx, 1, 0], dtype_vec)\n a2 = tvm.tir.const(0, \"int8x16\")\n if M > 2:\n a2 = ins[0].vload([tile_idx, 2, 0], dtype_vec)\n a3 = tvm.tir.const(0, \"int8x16\")\n if M > 3:\n a3 = ins[0].vload([tile_idx, 3, 0], dtype_vec)\n\n b0 = ins[1].vload([tile_idx, 0, 0], dtype_vec)\n b1 = tvm.tir.const(0, \"int8x16\")\n if N > 1:\n b1 = ins[1].vload([tile_idx, 1, 0], dtype_vec)\n b2 = tvm.tir.const(0, \"int8x16\")\n if N > 2:\n b2 = ins[1].vload([tile_idx, 2, 0], dtype_vec)\n b3 = tvm.tir.const(0, \"int8x16\")\n if N > 3:\n b3 = ins[1].vload([tile_idx, 3, 0], dtype_vec)\n\n # First half\n # Lower part of a0 * {b0,b1,b2,b3}\n d00 = umull(a0, b0)\n d01 = umull(a0, b1)\n d02 = umull(a0, b2)\n d03 = umull(a0, b3)\n\n # Lower part of a1 * {b0,b1,b2,b3}\n d10 = umull(a1, b0)\n d11 = umull(a1, b1)\n d12 = umull(a1, b2)\n d13 = umull(a1, b3)\n\n # Accumulate\n acc[0] = uadalp(acc[0], d00)\n acc[1] = uadalp(acc[1], d01)\n acc[2] = uadalp(acc[2], d02)\n acc[3] = uadalp(acc[3], d03)\n acc[4] = uadalp(acc[4], d10)\n acc[5] = uadalp(acc[5], d11)\n acc[6] = uadalp(acc[6], d12)\n acc[7] = uadalp(acc[7], d13)\n\n # Higher part of a0 * {b0,b1,b2,b3}\n d00 = umull2(a0, b0)\n d01 = umull2(a0, b1)\n d02 = umull2(a0, b2)\n d03 = umull2(a0, b3)\n\n # Higher part of a1 * {b0,b1,b2,b3}\n d10 = umull2(a1, b0)\n d11 = umull2(a1, b1)\n d12 = umull2(a1, b2)\n d13 = umull2(a1, b3)\n\n # Accumulate again\n acc[0] = uadalp(acc[0], d00)\n acc[1] = uadalp(acc[1], d01)\n acc[2] = uadalp(acc[2], d02)\n acc[3] = uadalp(acc[3], d03)\n acc[4] = uadalp(acc[4], d10)\n acc[5] = uadalp(acc[5], d11)\n acc[6] = uadalp(acc[6], d12)\n acc[7] = uadalp(acc[7], d13)\n\n # Second half\n # Lower part of a2 * {b0,b1,b2,b3}\n d00 = umull(a2, b0)\n d01 = umull(a2, b1)\n d02 = umull(a2, b2)\n d03 = umull(a2, b3)\n\n # Lower part of a3 * {b0,b1,b2,b3}\n d10 = umull(a3, b0)\n d11 = umull(a3, b1)\n d12 = umull(a3, b2)\n d13 = umull(a3, b3)\n\n # Accumulate\n acc[8] = uadalp(acc[8], d00)\n acc[9] = uadalp(acc[9], d01)\n acc[10] = uadalp(acc[10], d02)\n acc[11] = uadalp(acc[11], d03)\n acc[12] = uadalp(acc[12], d10)\n acc[13] = uadalp(acc[13], d11)\n acc[14] = uadalp(acc[14], d12)\n acc[15] = uadalp(acc[15], d13)\n\n # Higher part of a2 * {b0,b1,b2,b3}\n d00 = umull2(a2, b0)\n d01 = umull2(a2, b1)\n d02 = umull2(a2, b2)\n d03 = umull2(a2, b3)\n\n # Lower part of a3 * {b0,b1,b2,b3}\n d10 = umull2(a3, b0)\n d11 = umull2(a3, b1)\n d12 = umull2(a3, b2)\n d13 = umull2(a3, b3)\n\n # Accumulate\n acc[8] = uadalp(acc[8], d00)\n acc[9] = uadalp(acc[9], d01)\n acc[10] = uadalp(acc[10], d02)\n acc[11] = uadalp(acc[11], d03)\n acc[12] = uadalp(acc[12], d10)\n acc[13] = uadalp(acc[13], d11)\n acc[14] = uadalp(acc[14], d12)\n acc[15] = uadalp(acc[15], d13)\n\n def _intrin_func(ins, outs):\n def _instr():\n ib = tvm.tir.ir_builder.create()\n # Allocate a local buffer (possibly translates to registers)\n acc = ib.allocate(\"int32x4\", 16, name=\"accs\", scope=\"local\")\n m = outs[0].shape[0]\n n = outs[0].shape[1]\n # Initialization\n for i in range(0, 16):\n acc[i] = tvm.tir.const(0, \"int32x4\")\n\n if unroll:\n for i in range(0, int(K // 16)):\n accumulation_loop(M, N, ins, acc, i)\n else:\n with ib.for_range(0, K // 16, name=\"i\") as i:\n accumulation_loop(M, N, ins, acc, i)\n\n # Final accumulations\n # acc[4*r + c] contains the partial accumulations of element C[r][c]\n #\n # In particular:\n # acc[4*r] contains the partial sums of a[r,0:K].*b[0,0:K] -> (a,b,c,d)\n # acc[4*r+1] contains the partial sums of a[r, 0:K].*b[1,0:K] -> (e,f,g,h)\n # acc[4*r+2] contains the partial sums of a[r, 0:K].*b[2,0:K] -> (i,j,k,l)\n # acc[4*r+3] contains the partial sums of a[r, 0:K].*b[3,0:K] -> (m,n,o,p)\n #\n # Please note that 0<= r, c < 4\n\n acc[0] = addp(acc[0], acc[1]) # (a+b, c+d, e+f, g+h)\n acc[1] = addp(acc[2], acc[3]) # (i+j, k+l, m+n, o+p)\n acc[0] = addp(acc[0], acc[1]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)\n\n acc[4] = addp(acc[4], acc[5]) # (a+b, c+d, e+f, g+h)\n acc[5] = addp(acc[6], acc[7]) # (i+j, k+l, m+n, o+p)\n acc[4] = addp(acc[4], acc[5]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)\n\n acc[8] = addp(acc[8], acc[9]) # (a+b, c+d, e+f, g+h)\n acc[9] = addp(acc[10], acc[11]) # (i+j, k+l, m+n, o+p)\n acc[8] = addp(acc[8], acc[9]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)\n\n acc[12] = addp(acc[12], acc[13]) # (a+b, c+d, e+f, g+h)\n acc[13] = addp(acc[14], acc[15]) # (i+j, k+l, m+n, o+p)\n acc[12] = addp(acc[12], acc[13]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)\n\n # Store the result\n if N > 3:\n out_0 = acc[0]\n out_1 = acc[4]\n out_2 = acc[8]\n out_3 = acc[12]\n elif N > 2:\n out_0 = tvm.tir.call_intrin(\"int32x3\", \"tir.reinterpret\", acc[0])\n out_1 = tvm.tir.call_intrin(\"int32x3\", \"tir.reinterpret\", acc[4])\n out_2 = tvm.tir.call_intrin(\"int32x3\", \"tir.reinterpret\", acc[8])\n out_3 = tvm.tir.call_intrin(\"int32x3\", \"tir.reinterpret\", acc[12])\n elif N > 1:\n out_0 = tvm.tir.call_intrin(\"int32x2\", \"tir.reinterpret\", acc[0])\n out_1 = tvm.tir.call_intrin(\"int32x2\", \"tir.reinterpret\", acc[4])\n out_2 = tvm.tir.call_intrin(\"int32x2\", \"tir.reinterpret\", acc[8])\n out_3 = tvm.tir.call_intrin(\"int32x2\", \"tir.reinterpret\", acc[12])\n else:\n out_0 = tvm.tir.call_intrin(\"int32\", \"tir.reinterpret\", acc[0])\n out_1 = tvm.tir.call_intrin(\"int32\", \"tir.reinterpret\", acc[4])\n out_2 = tvm.tir.call_intrin(\"int32\", \"tir.reinterpret\", acc[8])\n out_3 = tvm.tir.call_intrin(\"int32\", \"tir.reinterpret\", acc[12])\n\n ib.emit(outs[0].vstore([0, 0], out_0))\n if M > 1:\n ib.emit(outs[0].vstore([1, 0], out_1))\n if M > 2:\n ib.emit(outs[0].vstore([2, 0], out_2))\n if M > 3:\n ib.emit(outs[0].vstore([3, 0], out_3))\n return ib.get()\n\n # body, reset, update\n return _instr()\n\n buffer_params = {\"offset_factor\": 1}\n return te.decl_tensor_intrin(\n C.op,\n _intrin_func,\n binds={A: a_buffer, B: b_buffer, C: c_buffer},\n default_buffer_params=buffer_params,\n )" }, { "identifier": "gemm_acc_4x4_int8_int8_int32", "path": "python/tvm/topi/arm_cpu/tensor_intrin.py", "snippet": "def gemm_acc_4x4_int8_int8_int32(dtype):\n \"\"\"\n Int8 4x4 matrix multiplication and accumulation using sdot/udot\n instructions. This function takes two arrays of int8 datatype\n -- A[4][4] and B[4][4] and produces a 4x4 matrix\n which is equal to A*B'.\n\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void gemm_acc_4x4_int8_int8_int32(int8 A[4][4], int8 B[4][4], int32 C[4][4]){\n for (int i = 0; i < 4; i++){\n for (int j = 0; j < 4; j++){\n for (int k = 0; k < 4; k++){\n C[i][j] += A[i][k] * B[j][k]\n }\n }\n }\n\n Notes:\n * The tiling strategy is picked to maximize register usage.\n\n Parameters\n ----------\n dtype : str, {\"uint8\", \"int8\"}\n Whether it works on unsigned int or signed int\n\n Returns\n -------\n intrin : TensorIntrin\n The Arm TensorIntrin that can be used in tensorizing schedule\n \"\"\"\n assert dtype in [\"uint8\", \"int8\"]\n # This needs to be a variable number of \"rows\" since TVM\n # \"thinks\" I only need to compute one row because of\n # padding\n A = te.placeholder((te.var(\"rows\"), 4), dtype, name=\"A\")\n B = te.placeholder((4, 4), dtype, name=\"B\")\n dtype_vec = dtype + \"x16\"\n\n k = te.reduce_axis((0, 4), name=\"k\")\n C = te.compute(\n (te.var(\"rows\"), 4),\n lambda i, j: te.sum(A[i, k].astype(\"int32\") * B[j, k].astype(\"int32\"), axis=k),\n name=\"C\",\n )\n\n aa_buffer = tvm.tir.decl_buffer(\n A.shape, dtype, name=\"aa_buffer\", offset_factor=1, strides=[te.var(\"sa\"), 1]\n )\n bb_buffer = tvm.tir.decl_buffer(\n B.shape, dtype, name=\"bb_buffer\", offset_factor=1, strides=[te.var(\"sb\"), 1]\n )\n cc_buffer = tvm.tir.decl_buffer(\n C.shape, dtype=\"int32\", name=\"cc_buffer\", offset_factor=1, strides=[te.var(\"sc\"), 1]\n )\n\n llvm_intrin = \"llvm.aarch64.neon.sdot\" if dtype == \"int8\" else \"llvm.aarch64.neon.udot\"\n\n def _intrin_func(ins, outs):\n def _instr(index):\n ib = tvm.tir.ir_builder.create()\n if index == 1:\n for i in range(0, 4):\n ib.emit(outs[0].vstore([i, 0], tvm.tir.const(0, \"int32x4\")))\n return ib.get()\n # Load all the elements of tile A.\n # vec_a = [a, b, c, d,\n # e, f, g, h,\n # l, m, n, o,\n # p, q, r, s];\n vec_a = ins[0].vload([0, 0], dtype_vec)\n\n # Replicate 4 times the i-th row of A. For instance,\n # vec_a[0] = [a, b, c, d,\n # a, b, c, d,\n # a, b, c, d,\n # a, b, c, d,];\n vec_aa = [select_word(vec_a, i, dtype_vec) for i in range(0, 4)]\n\n # Load all the elements of B. Remember that B\n # is transposed:\n # vec_b = [0, 4, 8, 12,\n # 1, 5, 9, 13,\n # 2, 6, 10, 14,\n # 3, 7, 11, 15,];\n vec_b = ins[1].vload([0, 0], dtype_vec)\n\n # Execute the dot product\n for i in range(0, 4):\n vec_c = outs[0].vload([i, 0], \"int32x4\")\n # Compute the product between the i-th row of A\n # and all the rows of B. Remember that sdot/udot\n # subdive the input vectors in 16 elements\n # and then take the dot product among each group.\n # The result is stored in a int32x4 register\n #\n # For instance, for i=0, we have:\n # sdot(vec_aa[0], vec_b) = [a*0+b*4+c*8+d*12,\n # a*1+b*5+c*9+d*13,\n # a*2+b*6+c*10+d*14,\n # a*3+b*7+c*11+d*15]\n vdot = tvm.tir.call_llvm_intrin(\n \"int32x4\", llvm_intrin, tvm.tir.const(3, \"uint32\"), vec_c, vec_b, vec_aa[i]\n )\n\n # Store the result\n ib.emit(outs[0].vstore([i, 0], vdot))\n\n return ib.get()\n\n # body, reset, update\n return _instr(0), _instr(1), _instr(2)\n\n buffer_params = {\"offset_factor\": 1}\n return te.decl_tensor_intrin(\n C.op,\n _intrin_func,\n binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},\n default_buffer_params=buffer_params,\n )" }, { "identifier": "gemm_acc_nx16_int8_int8_int32", "path": "python/tvm/topi/arm_cpu/tensor_intrin.py", "snippet": "def gemm_acc_nx16_int8_int8_int32(dtype, rows):\n \"\"\"\n Int8 nx16 matrix multiplication and accumulation using sdot/udot instructions\n This function takes two arrays of int8 datatype -- A[n][4] and\n B[4][16] and produces a rowsx16 matrix which is equal to A*B'\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void mmla_nx16_int8_int8_int32(int8 A[n][16], int8 B[4][16][4], int32 output[n][16]){\n for (int i = 0; i < n; i++){\n for (int j = 0; j < 16; j++){\n for (int k = 0; k < 16; k++){\n out[i][j] += A[i][k] * B[k//4][j][k%4]\n }\n }\n }\n }\n\n Notes:\n * The tile size of B is 16x4. Since the reduction variable k moves between 0 and 16\n we need 4 tiles of B to compute a single row of the output. The first 4 values of\n k will be fetched from B[0][j][k], the second batch of 4 from B[1][j][k] and so on\n * The tiling strategy is picked to maximize register usage.\n\n Parameters\n ----------\n dtype : str, {\"uint8\", \"int8\"}\n Whether it works on unsigned int or signed int\n rows : int\n Number of the output rows \"n\"\n\n Returns\n -------\n intrin : TensorIntrin\n The Arm TensorIntrin that can be used in tensorizing schedule\n \"\"\"\n assert dtype in [\"uint8\", \"int8\"]\n A = te.placeholder((rows, 16), dtype, name=\"A\")\n B = te.placeholder((4, 16, 4), dtype, name=\"B\")\n dtype_vec = dtype + \"x16\"\n idxm = tvm.tir.indexmod\n k = te.reduce_axis((0, 16), name=\"k\")\n C = te.compute(\n (rows, 16),\n lambda i, j: te.sum(\n A[i, k].astype(\"int32\") * B[k // 4, j, idxm(k, 4)].astype(\"int32\"), axis=k\n ),\n name=\"C\",\n )\n\n aa_buffer = tvm.tir.decl_buffer(\n A.shape, dtype, name=\"aa_buffer\", offset_factor=1, strides=[te.var(\"sa\"), 1]\n )\n bb_buffer = tvm.tir.decl_buffer(\n B.shape, dtype, name=\"bb_buffer\", offset_factor=1, strides=[te.var(\"sb0\"), te.var(\"sb1\"), 1]\n )\n cc_buffer = tvm.tir.decl_buffer(\n C.shape, dtype=\"int32\", name=\"cc_buffer\", offset_factor=1, strides=[te.var(\"sc\"), 1]\n )\n\n llvm_intrin = \"llvm.aarch64.neon.sdot\" if dtype == \"int8\" else \"llvm.aarch64.neon.udot\"\n\n def _intrin_func(ins, outs):\n def _instr(index):\n ib = tvm.tir.ir_builder.create()\n if index == 1:\n for i in range(0, rows):\n ib.emit(outs[0].vstore([i, 0], tvm.tir.const(0, \"int32x16\")))\n return ib.get()\n # Iterate on the number of rows of the output\n for k in range(0, rows):\n # Load 16 elements of A\n # vec_a = [a, b, c, d, e, f, g, h, l, m, n, o, p, q, r, s];\n vec_a = ins[0].vload([k, 0], dtype_vec)\n\n # Iterate over each of the 4 rowsx4 tiles of the output\n for j in range(0, 4):\n # Accumulate over each of the 4 (16x4) tiles contained in B\n for i in range(0, 4):\n # Replicate a single 4-element group of A (A[k, i:i+4])\n vec_aa = select_word(vec_a, i, dtype_vec)\n\n # Load 4 rows (each rows with 4 elements) from B (B[i:i+4, j:j+4])\n # vec_b = [0, 16, 32, 48,\n # 1, 17, 33, 49,\n # 2, 18, 34, 50,\n # 3, 19, 35, 51,];\n vec_b = ins[1].vload([i, 4 * j, 0], dtype_vec)\n\n # Accumulate in the correct part of the output\n vec_c = outs[0].vload([k, 4 * j], \"int32x4\")\n\n # Compute the dot product between the rowsx4 tile\n # from A and the 4x4 tile from B\n #\n # For instance, for i=0, we have:\n # sdot(vec_aa[0], vec_b) = [a*0+b*16+c*32+d*48,\n # a*1+b*17+c*33+d*49,\n # a*2+b*18+c*34+d*50,\n # a*3+b*19+c*35+d*51]\n vdot = tvm.tir.call_llvm_intrin(\n \"int32x4\", llvm_intrin, tvm.tir.const(3, \"uint32\"), vec_c, vec_b, vec_aa\n )\n ib.emit(outs[0].vstore([k, 4 * j], vdot))\n return ib.get()\n\n # body, reset, update\n return _instr(0), _instr(1), _instr(2)\n\n buffer_params = {\"offset_factor\": 1}\n return te.decl_tensor_intrin(\n C.op,\n _intrin_func,\n binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},\n default_buffer_params=buffer_params,\n )" }, { "identifier": "gemm_acc_2x2_int8_int8_int32", "path": "python/tvm/topi/arm_cpu/tensor_intrin.py", "snippet": "def gemm_acc_2x2_int8_int8_int32(dtype):\n \"\"\"\n Int8 2x2 matrix multiplication using smmla/ummla instructions\n This function takes two arrays of int8 datatype -- A[2][8] and\n B[2][8] and produces a 2x2 matrix which is equal to A*B'\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void mmla_2x2_int8_int8_int32(int8 A[2][8], int8 B[2][8], int32 C[2][2]){\n for (int i = 0; i < 2; i++){\n for (int j = 0; j < 2; j++){\n for (int k = 0; k < 8; k++){\n C[i][j] += A[i][k] * B[j][k]\n }\n }\n }\n\n Parameters\n ----------\n dtype : str, {\"uint8\", \"int8\"}\n Whether it works on unsigned int or signed int\n\n Returns\n -------\n intrin : TensorIntrin\n The Arm TensorIntrin that can be used in tensorizing schedule\n \"\"\"\n assert dtype in [\"uint8\", \"int8\"]\n A = te.placeholder((2, 8), dtype, name=\"A\")\n B = te.placeholder((2, 8), dtype, name=\"B\")\n dtype_vec = dtype + \"x16\"\n\n k = te.reduce_axis((0, 8), name=\"k\")\n C = te.compute(\n (2, 2),\n lambda i, j: te.sum(A[i, k].astype(\"int32\") * B[j, k].astype(\"int32\"), axis=k),\n name=\"C\",\n )\n\n aa_buffer = tvm.tir.decl_buffer(\n A.shape, dtype, name=\"aa_buffer\", offset_factor=1, strides=[te.var(\"sa\"), 1]\n )\n bb_buffer = tvm.tir.decl_buffer(\n B.shape, dtype, name=\"bb_buffer\", offset_factor=1, strides=[te.var(\"sb\"), 1]\n )\n cc_buffer = tvm.tir.decl_buffer(\n C.shape, dtype=\"int32\", name=\"cc_buffer\", offset_factor=1, strides=[te.var(\"sc\"), 1]\n )\n\n llvm_intrin = \"llvm.aarch64.neon.smmla\" if dtype == \"int8\" else \"llvm.aarch64.neon.ummla\"\n\n def _intrin_func(ins, outs):\n def _instr(index):\n ib = tvm.tir.ir_builder.create()\n if index == 1:\n ib.emit(outs[0].vstore([0, 0], tvm.tir.const(0, \"int32x4\")))\n return ib.get()\n # Load in vec_a the two rows of A\n # vec_a = [a, b, c, d, e, f, g, h;\n # i, j, k, l, m, n, o, p,]\n vec_a = ins[0].vload([0, 0], dtype_vec)\n # Load in vec_b the two rows of B\n # vec_b = [0, 2, 4, 6, 8, 10, 12, 14;\n # 1, 3, 5, 7, 9, 11, 13, 14,]\n vec_b = ins[1].vload([0, 0], dtype_vec)\n\n # Execute the matrix multiplication via (s/u)mmla:\n # vec_c = [a*0 + b*2 + c*4 + d*6 +e*8 + f*10 + g*12 + h*14;\n # a*1 + b*3 + c*5 + d*7 +e*9 + f*11 + g*13 + h*15;\n # i*0 + j*2 + k*4 + l*6 +m*8 + n*10 + o*12 + p*14;\n # i*1 + j*3 + k*5 + l*7 +m*9 + n*11 + o*13 + p*15]\n vec_c = outs[0].vload([0, 0], \"int32x4\")\n vmmla = tvm.tir.call_llvm_intrin(\n \"int32x4\", llvm_intrin, tvm.tir.const(3, \"uint32\"), vec_c, vec_a, vec_b\n )\n # Store the result\n ib.emit(outs[0].vstore([0, 0], vmmla))\n return ib.get()\n\n # body, reset, update\n return _instr(0), _instr(1), _instr(2)\n\n buffer_params = {\"offset_factor\": 1}\n return te.decl_tensor_intrin(\n C.op,\n _intrin_func,\n binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},\n default_buffer_params=buffer_params,\n )" } ]
import tvm from tvm.target import Target from tvm import te from tvm.topi import nn from tvm.autotvm.task.space import AnnotateEntity, ReorderEntity, OtherOptionEntity from ..utils import get_const_tuple, get_const_int from ..nn.utils import get_pad_tuple from .tensor_intrin import ( gemm_4x4_int8_int8_int32, gemm_acc_4x4_int8_int8_int32, gemm_acc_nx16_int8_int8_int32, gemm_acc_2x2_int8_int8_int32, )
13,315
).astype(out_dtype), name="C", ) else: # Execute GEMM C_interleaved = te.compute( (batches, M_padded // tile_rows_A, N_transformed, tile_rows_A, tile_rows_B), lambda b, x, y, w, z: te.sum( A_interleaved[b, x, k // tile_cols_A, w, idxm(k, tile_cols_A)].astype("int32") * B_interleaved_t[y, k // tile_cols_B, z, idxm(k, tile_cols_B)].astype("int32"), axis=k, ), name="C_interleaved", ) # Unpack the result C = te.compute( (batches, M, N), lambda b, x, y: C_interleaved[ b, x // tile_rows_A, y // tile_rows_B, idxm(x, tile_rows_A), idxm(y, tile_rows_B), ].astype(out_dtype), name="C", ) zero = tvm.tir.const(0) else: # No need to pack/unpack, execute GEMM directly C = te.compute( (batches, M_padded, N_padded), lambda b, x, y: te.sum( A[b, x, k].astype("int32") * B_interleaved_t[ y // tile_rows_B, k // tile_cols_B, idxm(y, tile_rows_B), idxm(k, tile_cols_B) ].astype("int32"), axis=k, ), name="C", ) # We need to ensure that infer bound pass does not remove the padding # which is necessary for the tensorizations to work. So we need to # add a dummy reference to the padding area of the result zero = ( tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1] - tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1] ) # Reshape the result into a convolution output out_shape = (batches, OH, OW, OC) out = te.compute( out_shape, lambda b, x, y, z: (C(b, y + OW * x, z) + zero).astype(out_dtype), name="conv2d_gemm_output", ) return out def schedule_conv2d_gemm_interleaved(cfg, s, out, final_out): """Schedule the conv2d_gemm interleaved strategy""" C = out.op.input_tensors[0] C_interleaved = C.op.input_tensors[0] A_interleaved = C_interleaved.op.input_tensors[0] # Input transform A_interleaved_input = A_interleaved.op.input_tensors[0] if A_interleaved_input.op.name == "A_padded_K" or A_interleaved_input.op.name == "A_padded_M": s[A_interleaved_input].compute_at(s[A_interleaved], A_interleaved.op.axis[3]) s[A_interleaved_input].vectorize(A_interleaved_input.op.axis[2]) s[A_interleaved_input].compute_inline() data_im2col = A_interleaved_input.op.input_tensors[0] else: data_im2col = A_interleaved_input b, m, n = data_im2col.op.axis if data_im2col.op.name == "data_im2col": n_size = data_im2col.shape[2] if n_size % 16 == 0: split_factor = 16 else: split_factor = 8 n_outer, n_inner = s[data_im2col].split(n, split_factor) s[data_im2col].unroll(n_outer) s[data_im2col].vectorize(n_inner) b_m_fused = s[data_im2col].fuse(b, m) s[data_im2col].parallel(b_m_fused) else: s[data_im2col].compute_inline() # Computation(through tensorize) b, xo, yo, xi, yi = C_interleaved.op.axis[0:5] outer_gemm, inner_gemm = cfg["reorder_gemm"].apply(s, C_interleaved, [xo, yo]) b_outer_gemm_fused = s[C_interleaved].fuse(b, outer_gemm) s[C_interleaved].parallel(b_outer_gemm_fused) s[A_interleaved].compute_at(s[C_interleaved], b_outer_gemm_fused) _, _, _, outer_A_interleaved, inner_A_interleaved = A_interleaved.op.axis cfg["A_interleaved_unroll_vec"].apply( s, A_interleaved, [outer_A_interleaved, inner_A_interleaved] ) in_type = A_interleaved.dtype out_type = C.dtype k = C_interleaved.op.reduce_axis[0] _, M, N = C.shape if in_type in ["int8", "uint8"]: target = Target.current(allow_none=False) if target.features.has_matmul_i8: gemm_acc = gemm_acc_2x2_int8_int8_int32(in_type) xi_inner, yi_inner = C_interleaved.op.axis[-2:] k_outer, k_inner = s[C_interleaved].split(k, 8) s[C_interleaved].reorder( b_outer_gemm_fused, inner_gemm, k_outer, xi, yi, xi_inner, yi_inner, k_inner ) s[C_interleaved].tensorize(xi_inner, gemm_acc) s[C_interleaved].unroll(xi) s[C_interleaved].unroll(yi) elif target.features.has_dotprod:
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-variable, too-many-locals # pylint: disable=unused-argument, redefined-builtin """GEMM Convolution schedule on ARM""" def configure_knobs(cfg, M, K, target): """Configure auto-tuning knobs for the interleaved strategy""" x, y = cfg.axis(M // 4), cfg.axis(K // 16) cfg.define_reorder("reorder_gemm", [x, y], policy="candidate", candidate=[[x, y], [y, x]]) outer_loop, inner_loop = cfg.axis(4), cfg.axis(16) cfg.define_annotate( "A_interleaved_unroll_vec", [outer_loop, inner_loop], policy="try_unroll_vec" ) # Fallback configuration if cfg.is_fallback: cfg["reorder_gemm"] = ReorderEntity([0, 1]) cfg["A_interleaved_unroll_vec"] = AnnotateEntity(["unroll", "vec"]) if not target.features.has_dotprod: cfg.define_knob("gemm_quantized_unroll", [True, False]) if cfg.is_fallback: cfg["gemm_quantized_unroll"] = OtherOptionEntity(False) # Compute function def compute_conv2d_gemm_without_weight_transform( cfg, data, B_interleaved_t, strides, padding, dilation, out_dtype, kernel_size, output_channels, interleave_A, ): """Compute conv2d by transforming the input, executing GEMM and transforming the output back""" batches, IH, IW, IC = get_const_tuple(data.shape) KH, KW = get_const_tuple(kernel_size) OC = get_const_int(output_channels) kernel_area = KH * KW if isinstance(dilation, int): dilation_h = dilation_w = dilation else: dilation_h, dilation_w = get_const_tuple(dilation) dilated_kernel_h = (KH - 1) * dilation_h + 1 dilated_kernel_w = (KW - 1) * dilation_w + 1 pad_top, pad_left, pad_down, pad_right = get_pad_tuple( padding, (dilated_kernel_h, dilated_kernel_w) ) HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides) OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1 OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1 if pad_top or pad_left: data_pad = nn.pad( data, [0, pad_top, pad_left, 0], [0, pad_down, pad_right, 0], name="data_pad" ) else: data_pad = data # Im2col M = OH * OW K = IC * kernel_area N = OC A_shape = (batches, M, K) if kernel_area == 1: A = tvm.topi.reshape(data_pad, A_shape) else: A = te.compute( A_shape, lambda n, x, y: data_pad[ n, HSTR * (x // OW) + dilation_h * ((y // IC) // KW), WSTR * (x % OW) + dilation_w * ((y // IC) % KW), y % IC, ], name="data_im2col", ) # Pad if necessary N_transformed = B_interleaved_t.shape[0] tile_rows_B = B_interleaved_t.shape[2] tile_cols_B = B_interleaved_t.shape[3] # Select the tiling strategy for A. # The tiling information is chosen to maximize register usage during # the tile computation. # # Please refer to: # - https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-performance-for-armv8-architectures # pylint: disable=line-too-long # - https://discuss.tvm.apache.org/t/rfc-accelerate-quantized-convolution-through-dot-product # - https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-through-mmla-instruction # - Conv2DGemmWeightTransformRel in src/relay/op/nn/convolution.h # In order to have more information # target = Target.current(allow_none=False) if target.features.has_matmul_i8: # If smmla/ummla is enabled, we are loading 8 rows from A. Each row # will contain 8 elements tile_rows_A = 8 tile_cols_A = 8 elif target.features.has_dotprod and interleave_A: # If dot product has been enabled, and we are interleaving A # tile size should be 8x4 tile_rows_A = 8 tile_cols_A = 4 else: # If either there is no dot product or if we are using a native strategy # tile size should be 4x16 tile_rows_A = 4 tile_cols_A = 16 pad_M = 0 pad_K = 0 if M % tile_rows_A != 0: pad_M = tile_rows_A - (M % tile_rows_A) if K % tile_cols_A != 0: pad_K = tile_cols_A - (K % tile_cols_A) M_padded = M + pad_M K_padded = K + pad_K N_padded = N_transformed * tile_rows_B pad_before = (0, 0, 0) pad_after = (0, pad_M, pad_K) if pad_K != 0: A = nn.pad(A, pad_before=pad_before, pad_after=pad_after, name="A_padded_K") elif pad_M != 0: A = nn.pad(A, pad_before=pad_before, pad_after=pad_after, name="A_padded_M") idxm = tvm.tir.indexmod k = te.reduce_axis((0, K_padded), "k") if interleave_A: # Configuration space configure_knobs(cfg, M_padded, K_padded, target) # Pack the input data A_interleaved = te.compute( (batches, M_padded // tile_rows_A, K_padded // tile_cols_A, tile_rows_A, tile_cols_A), lambda b, x, y, z, w: A[b, z + tile_rows_A * x, w + tile_cols_A * y], name="A_interleaved", ) target = Target.current(allow_none=False) if target.features.has_matmul_i8: # Execute GEMM. In the case of mmla, we need to enforce the tiling # from the compute. This is because mmla is doing a tiled computation # as well. So we have a big 8x12 tile, with small 2x2 sub-tiles # generated by mmla. In theory we could make the tile 2x2 and # fuse and split during scheduling, but this would not work # because of possible padding C_interleaved = te.compute( ( batches, M_padded // tile_rows_A, N_transformed, tile_rows_A // 2, tile_rows_B // 2, 2, 2, ), lambda b, x, y, w, z, s, t: te.sum( A_interleaved[b, x, k // tile_cols_A, 2 * w + s, idxm(k, tile_cols_A)].astype( "int32" ) * B_interleaved_t[y, k // tile_cols_B, 2 * z + t, idxm(k, tile_cols_B)].astype( "int32" ), axis=k, ), name="C_interleaved", ) # Ensure the padding needed for tensorize does not get removed during tir passes # by adding a dummy reference to the specific padded area of the result zero = ( tvm.tir.const(1, C_interleaved.dtype) * C_interleaved[ batches - 1, M // tile_rows_A, N_transformed - 1, idxm(M, tile_rows_A) // 2, tile_rows_B // 2 - 1, 1, 1, ] - tvm.tir.const(1, C_interleaved.dtype) * C_interleaved[ batches - 1, M // tile_rows_A, N_transformed - 1, idxm(M, tile_rows_A) // 2, tile_rows_B // 2 - 1, 1, 1, ] ) # Unpack the result C = te.compute( (batches, M, N), lambda b, x, y: ( C_interleaved[ b, x // tile_rows_A, y // tile_rows_B, idxm(x, tile_rows_A) // 2, idxm(y, tile_rows_B) // 2, idxm(idxm(x, tile_rows_A), 2), idxm(idxm(y, tile_rows_B), 2), ] + zero ).astype(out_dtype), name="C", ) else: # Execute GEMM C_interleaved = te.compute( (batches, M_padded // tile_rows_A, N_transformed, tile_rows_A, tile_rows_B), lambda b, x, y, w, z: te.sum( A_interleaved[b, x, k // tile_cols_A, w, idxm(k, tile_cols_A)].astype("int32") * B_interleaved_t[y, k // tile_cols_B, z, idxm(k, tile_cols_B)].astype("int32"), axis=k, ), name="C_interleaved", ) # Unpack the result C = te.compute( (batches, M, N), lambda b, x, y: C_interleaved[ b, x // tile_rows_A, y // tile_rows_B, idxm(x, tile_rows_A), idxm(y, tile_rows_B), ].astype(out_dtype), name="C", ) zero = tvm.tir.const(0) else: # No need to pack/unpack, execute GEMM directly C = te.compute( (batches, M_padded, N_padded), lambda b, x, y: te.sum( A[b, x, k].astype("int32") * B_interleaved_t[ y // tile_rows_B, k // tile_cols_B, idxm(y, tile_rows_B), idxm(k, tile_cols_B) ].astype("int32"), axis=k, ), name="C", ) # We need to ensure that infer bound pass does not remove the padding # which is necessary for the tensorizations to work. So we need to # add a dummy reference to the padding area of the result zero = ( tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1] - tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1] ) # Reshape the result into a convolution output out_shape = (batches, OH, OW, OC) out = te.compute( out_shape, lambda b, x, y, z: (C(b, y + OW * x, z) + zero).astype(out_dtype), name="conv2d_gemm_output", ) return out def schedule_conv2d_gemm_interleaved(cfg, s, out, final_out): """Schedule the conv2d_gemm interleaved strategy""" C = out.op.input_tensors[0] C_interleaved = C.op.input_tensors[0] A_interleaved = C_interleaved.op.input_tensors[0] # Input transform A_interleaved_input = A_interleaved.op.input_tensors[0] if A_interleaved_input.op.name == "A_padded_K" or A_interleaved_input.op.name == "A_padded_M": s[A_interleaved_input].compute_at(s[A_interleaved], A_interleaved.op.axis[3]) s[A_interleaved_input].vectorize(A_interleaved_input.op.axis[2]) s[A_interleaved_input].compute_inline() data_im2col = A_interleaved_input.op.input_tensors[0] else: data_im2col = A_interleaved_input b, m, n = data_im2col.op.axis if data_im2col.op.name == "data_im2col": n_size = data_im2col.shape[2] if n_size % 16 == 0: split_factor = 16 else: split_factor = 8 n_outer, n_inner = s[data_im2col].split(n, split_factor) s[data_im2col].unroll(n_outer) s[data_im2col].vectorize(n_inner) b_m_fused = s[data_im2col].fuse(b, m) s[data_im2col].parallel(b_m_fused) else: s[data_im2col].compute_inline() # Computation(through tensorize) b, xo, yo, xi, yi = C_interleaved.op.axis[0:5] outer_gemm, inner_gemm = cfg["reorder_gemm"].apply(s, C_interleaved, [xo, yo]) b_outer_gemm_fused = s[C_interleaved].fuse(b, outer_gemm) s[C_interleaved].parallel(b_outer_gemm_fused) s[A_interleaved].compute_at(s[C_interleaved], b_outer_gemm_fused) _, _, _, outer_A_interleaved, inner_A_interleaved = A_interleaved.op.axis cfg["A_interleaved_unroll_vec"].apply( s, A_interleaved, [outer_A_interleaved, inner_A_interleaved] ) in_type = A_interleaved.dtype out_type = C.dtype k = C_interleaved.op.reduce_axis[0] _, M, N = C.shape if in_type in ["int8", "uint8"]: target = Target.current(allow_none=False) if target.features.has_matmul_i8: gemm_acc = gemm_acc_2x2_int8_int8_int32(in_type) xi_inner, yi_inner = C_interleaved.op.axis[-2:] k_outer, k_inner = s[C_interleaved].split(k, 8) s[C_interleaved].reorder( b_outer_gemm_fused, inner_gemm, k_outer, xi, yi, xi_inner, yi_inner, k_inner ) s[C_interleaved].tensorize(xi_inner, gemm_acc) s[C_interleaved].unroll(xi) s[C_interleaved].unroll(yi) elif target.features.has_dotprod:
gemm_acc = gemm_acc_4x4_int8_int8_int32(in_type)
4
2023-12-14 02:37:47+00:00
16k
yolain/ComfyUI-Easy-Use
py/easyNodes.py
[ { "identifier": "advanced_encode", "path": "py/adv_encode.py", "snippet": "def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5,\n apply_to_pooled=True):\n tokenized = clip.tokenize(text, return_word_ids=True)\n if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):\n embs_l = None\n embs_g = None\n pooled = None\n if 'l' in tokenized and isinstance(clip.cond_stage_model, SDXLClipModel):\n embs_l, _ = advanced_encode_from_tokens(tokenized['l'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_l),\n w_max=w_max,\n return_pooled=False)\n if 'g' in tokenized:\n embs_g, pooled = advanced_encode_from_tokens(tokenized['g'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x,\n encode_token_weights_g),\n w_max=w_max,\n return_pooled=True,\n apply_to_pooled=apply_to_pooled)\n return prepareXL(embs_l, embs_g, pooled, clip_balance)\n else:\n return advanced_encode_from_tokens(tokenized['l'],\n token_normalization,\n weight_interpretation,\n lambda x: (clip.encode_from_tokens({'l': x}), None),\n w_max=w_max)" }, { "identifier": "advanced_encode_XL", "path": "py/adv_encode.py", "snippet": "def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5,\n apply_to_pooled=True):\n tokenized1 = clip.tokenize(text1, return_word_ids=True)\n tokenized2 = clip.tokenize(text2, return_word_ids=True)\n\n embs_l, _ = advanced_encode_from_tokens(tokenized1['l'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_l),\n w_max=w_max,\n return_pooled=False)\n\n embs_g, pooled = advanced_encode_from_tokens(tokenized2['g'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_g),\n w_max=w_max,\n return_pooled=True,\n apply_to_pooled=apply_to_pooled)\n\n gcd_num = gcd(embs_l.shape[1], embs_g.shape[1])\n repeat_l = int((embs_g.shape[1] / gcd_num) * embs_l.shape[1])\n repeat_g = int((embs_l.shape[1] / gcd_num) * embs_g.shape[1])\n\n return prepareXL(embs_l.expand((-1, repeat_l, -1)), embs_g.expand((-1, repeat_g, -1)), pooled, clip_balance)" }, { "identifier": "BASE_RESOLUTIONS", "path": "py/config.py", "snippet": "BASE_RESOLUTIONS = [\n (\"自定义\", \"自定义\"),\n (512, 512),\n (512, 768),\n (768, 512),\n (576, 1024),\n (768, 1024),\n (768, 1280),\n (768, 1344),\n (768, 1536),\n (816, 1920),\n (832, 1152),\n (896, 1152),\n (896, 1088),\n (1024, 1024),\n (1024, 576),\n (1024, 768),\n (1080, 1920),\n (1440, 2560),\n (1088, 896),\n (1152, 832),\n (1152, 896),\n (1280, 768),\n (1344, 768),\n (1536, 640),\n (1536, 768),\n (1920, 816),\n (1920, 1080),\n (2560, 1440),\n]" }, { "identifier": "log_node_info", "path": "py/log.py", "snippet": "def log_node_info(node_name, message=None):\n \"\"\"Logs an info message.\"\"\"\n _log_node(COLORS_FG[\"CYAN\"], node_name, message)" }, { "identifier": "log_node_error", "path": "py/log.py", "snippet": "def log_node_error(node_name, message=None):\n \"\"\"Logs an warn message.\"\"\"\n _log_node(COLORS_FG[\"RED\"], node_name, message)" }, { "identifier": "log_node_warn", "path": "py/log.py", "snippet": "def log_node_warn(node_name, message=None):\n \"\"\"Logs an warn message.\"\"\"\n _log_node(COLORS_FG[\"YELLOW\"], node_name, message)" }, { "identifier": "log_node_success", "path": "py/log.py", "snippet": "def log_node_success(node_name, message=None):\n \"\"\"Logs a success message.\"\"\"\n _log_node(COLORS_FG[\"GREEN\"], node_name, message)" }, { "identifier": "process_with_loras", "path": "py/wildcards.py", "snippet": "def process_with_loras(wildcard_opt, model, clip, title=\"Positive\", seed=None, can_load_lora=True, pipe_lora_stack=[]):\n lora_name_cache = []\n\n pass1 = process(wildcard_opt, seed)\n loras = extract_lora_values(pass1)\n pass2 = remove_lora_tags(pass1)\n\n has_noodle_key = True if \"__\" in wildcard_opt else False\n has_loras = True if loras != [] else False\n show_wildcard_prompt = True if has_noodle_key or has_loras else False\n\n for lora_name, model_weight, clip_weight, lbw, lbw_a, lbw_b in loras:\n if (lora_name.split('.')[-1]) not in folder_paths.supported_pt_extensions:\n lora_name = lora_name+\".safetensors\"\n\n lora_name = resolve_lora_name(lora_name_cache, lora_name)\n\n path = folder_paths.get_full_path(\"loras\", lora_name)\n\n if path is not None:\n print(f\"LORA: {lora_name}: {model_weight}, {clip_weight}, LBW={lbw}, A={lbw_a}, B={lbw_b}\")\n\n def default_lora():\n return nodes.LoraLoader().load_lora(model, clip, lora_name, model_weight, clip_weight)\n\n if lbw is not None:\n cls = nodes.NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire']\n if can_load_lora:\n model, clip, _ = cls().doit(model, clip, lora_name, model_weight, clip_weight, False, 0, lbw_a, lbw_b, \"\", lbw)\n pipe_lora_stack.append({\n \"lora_name\": lora_name, \"model\": model, \"clip\": clip, \"lora_model_strength\": model_weight,\n \"lora_clip_strength\": clip_weight,\n \"lbw_a\": lbw_a,\n \"lbw_b\": lbw_b,\n \"lbw\": lbw\n })\n else:\n pipe_lora_stack.append({\"lora_name\": lora_name, \"model\": model, \"clip\": clip, \"lora_model_strength\": model_weight, \"lora_clip_strength\": clip_weight})\n if can_load_lora:\n model, clip = default_lora()\n else:\n print(f\"LORA NOT FOUND: {lora_name}\")\n\n # print(f\"{title}: {pass2}\")\n # print(f'{title}_decode:', pass1)\n\n return model, clip, pass2, pass1, show_wildcard_prompt, pipe_lora_stack" }, { "identifier": "get_wildcard_list", "path": "py/wildcards.py", "snippet": "def get_wildcard_list():\n return [f\"__{x}__\" for x in easy_wildcard_dict.keys()]" }, { "identifier": "sample_dpmpp_2s_ancestral", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_dpmpp_2s_ancestral(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"Ancestral sampling with DPM-Solver++(2S) second-order steps.\"\"\"\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n sigma_fn = lambda t: t.neg().exp()\n t_fn = lambda sigma: sigma.log().neg()\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n if sigma_down == 0:\n # Euler method\n d = to_d(x, sigmas[i], denoised)\n dt = sigma_down - sigmas[i]\n x = x + d * dt\n else:\n # DPM-Solver++(2S)\n t, t_next = t_fn(sigmas[i]), t_fn(sigma_down)\n r = 1 / 2\n h = t_next - t\n s = t + r * h\n x_2 = (sigma_fn(s) / sigma_fn(t)) * x - (-h * r).expm1() * denoised\n denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)\n x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_2\n # Noise addition\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n noise_sampler = default_noise_sampler(x)\n noise = noise_sampler(sigmas[i], sigmas[i + 1])\n x = x + noise * sigma_up * s_noise\n return x" }, { "identifier": "sample_dpmpp_2m_sde", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_dpmpp_2m_sde(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n solver_type=\"midpoint\",\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"DPM-Solver++(2M) SDE.\"\"\"\n\n if solver_type not in {\"heun\", \"midpoint\"}:\n raise ValueError(\"solver_type must be 'heun' or 'midpoint'\")\n\n seed = extra_args.get(\"seed\", None)\n sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n\n old_denoised = None\n h_last = None\n h = None\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n if sigmas[i + 1] == 0:\n # Denoising step\n x = denoised\n else:\n # DPM-Solver++(2M) SDE\n t, s = -sigmas[i].log(), -sigmas[i + 1].log()\n h = s - t\n eta_h = eta * h\n\n x = sigmas[i + 1] / sigmas[i] * (-eta_h).exp() * x + (-h - eta_h).expm1().neg() * denoised\n\n if old_denoised is not None:\n r = h_last / h\n if solver_type == \"heun\":\n x = x + ((-h - eta_h).expm1().neg() / (-h - eta_h) + 1) * (1 / r) * (denoised - old_denoised)\n elif solver_type == \"midpoint\":\n x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)\n\n if eta:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n denoised = None # 次ステップとサイズがあわないのでとりあえずNoneにしておく。\n noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True)\n x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise\n\n old_denoised = denoised\n h_last = h\n return x" }, { "identifier": "sample_lcm", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_lcm(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n noise_sampler=None,\n eta=None,\n s_noise=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n\n x = denoised\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n noise_sampler = default_noise_sampler(x)\n x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1])\n\n return x" }, { "identifier": "sample_euler_ancestral", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_euler_ancestral(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"Ancestral sampling with Euler method steps.\"\"\"\n extra_args = {} if extra_args is None else extra_args\n noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler\n s_in = x.new_ones([x.shape[0]])\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n d = to_d(x, sigmas[i], denoised)\n # Euler method\n dt = sigma_down - sigmas[i]\n x = x + d * dt\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n\n noise_sampler = default_noise_sampler(x)\n noise = noise_sampler(sigmas[i], sigmas[i + 1])\n x = x + noise * sigma_up * s_noise\n return x" }, { "identifier": "DynThresh", "path": "py/dynthres_core.py", "snippet": "class DynThresh:\n\n Modes = [\"Constant\", \"Linear Down\", \"Cosine Down\", \"Half Cosine Down\", \"Linear Up\", \"Cosine Up\", \"Half Cosine Up\", \"Power Up\", \"Power Down\", \"Linear Repeating\", \"Cosine Repeating\", \"Sawtooth\"]\n Startpoints = [\"MEAN\", \"ZERO\"]\n Variabilities = [\"AD\", \"STD\"]\n\n def __init__(self, mimic_scale, threshold_percentile, mimic_mode, mimic_scale_min, cfg_mode, cfg_scale_min, sched_val, experiment_mode, max_steps, separate_feature_channels, scaling_startpoint, variability_measure, interpolate_phi):\n self.mimic_scale = mimic_scale\n self.threshold_percentile = threshold_percentile\n self.mimic_mode = mimic_mode\n self.cfg_mode = cfg_mode\n self.max_steps = max_steps\n self.cfg_scale_min = cfg_scale_min\n self.mimic_scale_min = mimic_scale_min\n self.experiment_mode = experiment_mode\n self.sched_val = sched_val\n self.sep_feat_channels = separate_feature_channels\n self.scaling_startpoint = scaling_startpoint\n self.variability_measure = variability_measure\n self.interpolate_phi = interpolate_phi\n\n def interpret_scale(self, scale, mode, min):\n scale -= min\n max = self.max_steps - 1\n frac = self.step / max\n if mode == \"Constant\":\n pass\n elif mode == \"Linear Down\":\n scale *= 1.0 - frac\n elif mode == \"Half Cosine Down\":\n scale *= math.cos(frac)\n elif mode == \"Cosine Down\":\n scale *= math.cos(frac * 1.5707)\n elif mode == \"Linear Up\":\n scale *= frac\n elif mode == \"Half Cosine Up\":\n scale *= 1.0 - math.cos(frac)\n elif mode == \"Cosine Up\":\n scale *= 1.0 - math.cos(frac * 1.5707)\n elif mode == \"Power Up\":\n scale *= math.pow(frac, self.sched_val)\n elif mode == \"Power Down\":\n scale *= 1.0 - math.pow(frac, self.sched_val)\n elif mode == \"Linear Repeating\":\n portion = (frac * self.sched_val) % 1.0\n scale *= (0.5 - portion) * 2 if portion < 0.5 else (portion - 0.5) * 2\n elif mode == \"Cosine Repeating\":\n scale *= math.cos(frac * 6.28318 * self.sched_val) * 0.5 + 0.5\n elif mode == \"Sawtooth\":\n scale *= (frac * self.sched_val) % 1.0\n scale += min\n return scale\n\n def dynthresh(self, cond, uncond, cfg_scale, weights):\n mimic_scale = self.interpret_scale(self.mimic_scale, self.mimic_mode, self.mimic_scale_min)\n cfg_scale = self.interpret_scale(cfg_scale, self.cfg_mode, self.cfg_scale_min)\n # uncond shape is (batch, 4, height, width)\n conds_per_batch = cond.shape[0] / uncond.shape[0]\n assert conds_per_batch == int(conds_per_batch), \"Expected # of conds per batch to be constant across batches\"\n cond_stacked = cond.reshape((-1, int(conds_per_batch)) + uncond.shape[1:])\n\n ### Normal first part of the CFG Scale logic, basically\n diff = cond_stacked - uncond.unsqueeze(1)\n if weights is not None:\n diff = diff * weights\n relative = diff.sum(1)\n\n ### Get the normal result for both mimic and normal scale\n mim_target = uncond + relative * mimic_scale\n cfg_target = uncond + relative * cfg_scale\n ### If we weren't doing mimic scale, we'd just return cfg_target here\n\n ### Now recenter the values relative to their average rather than absolute, to allow scaling from average\n mim_flattened = mim_target.flatten(2)\n cfg_flattened = cfg_target.flatten(2)\n mim_means = mim_flattened.mean(dim=2).unsqueeze(2)\n cfg_means = cfg_flattened.mean(dim=2).unsqueeze(2)\n mim_centered = mim_flattened - mim_means\n cfg_centered = cfg_flattened - cfg_means\n\n if self.sep_feat_channels:\n if self.variability_measure == 'STD':\n mim_scaleref = mim_centered.std(dim=2).unsqueeze(2)\n cfg_scaleref = cfg_centered.std(dim=2).unsqueeze(2)\n else: # 'AD'\n mim_scaleref = mim_centered.abs().max(dim=2).values.unsqueeze(2)\n cfg_scaleref = torch.quantile(cfg_centered.abs(), self.threshold_percentile, dim=2).unsqueeze(2)\n\n else:\n if self.variability_measure == 'STD':\n mim_scaleref = mim_centered.std()\n cfg_scaleref = cfg_centered.std()\n else: # 'AD'\n mim_scaleref = mim_centered.abs().max()\n cfg_scaleref = torch.quantile(cfg_centered.abs(), self.threshold_percentile)\n\n if self.scaling_startpoint == 'ZERO':\n scaling_factor = mim_scaleref / cfg_scaleref\n result = cfg_flattened * scaling_factor\n\n else: # 'MEAN'\n if self.variability_measure == 'STD':\n cfg_renormalized = (cfg_centered / cfg_scaleref) * mim_scaleref\n else: # 'AD'\n ### Get the maximum value of all datapoints (with an optional threshold percentile on the uncond)\n max_scaleref = torch.maximum(mim_scaleref, cfg_scaleref)\n ### Clamp to the max\n cfg_clamped = cfg_centered.clamp(-max_scaleref, max_scaleref)\n ### Now shrink from the max to normalize and grow to the mimic scale (instead of the CFG scale)\n cfg_renormalized = (cfg_clamped / max_scaleref) * mim_scaleref\n\n ### Now add it back onto the averages to get into real scale again and return\n result = cfg_renormalized + cfg_means\n\n actual_res = result.unflatten(2, mim_target.shape[2:])\n\n if self.interpolate_phi != 1.0:\n actual_res = actual_res * self.interpolate_phi + cfg_target * (1.0 - self.interpolate_phi)\n\n if self.experiment_mode == 1:\n num = actual_res.cpu().numpy()\n for y in range(0, 64):\n for x in range (0, 64):\n if num[0][0][y][x] > 1.0:\n num[0][1][y][x] *= 0.5\n if num[0][1][y][x] > 1.0:\n num[0][1][y][x] *= 0.5\n if num[0][2][y][x] > 1.5:\n num[0][2][y][x] *= 0.5\n actual_res = torch.from_numpy(num).to(device=uncond.device)\n elif self.experiment_mode == 2:\n num = actual_res.cpu().numpy()\n for y in range(0, 64):\n for x in range (0, 64):\n over_scale = False\n for z in range(0, 4):\n if abs(num[0][z][y][x]) > 1.5:\n over_scale = True\n if over_scale:\n for z in range(0, 4):\n num[0][z][y][x] *= 0.7\n actual_res = torch.from_numpy(num).to(device=uncond.device)\n elif self.experiment_mode == 3:\n coefs = torch.tensor([\n # R G B W\n [0.298, 0.207, 0.208, 0.0], # L1\n [0.187, 0.286, 0.173, 0.0], # L2\n [-0.158, 0.189, 0.264, 0.0], # L3\n [-0.184, -0.271, -0.473, 1.0], # L4\n ], device=uncond.device)\n res_rgb = torch.einsum(\"laxy,ab -> lbxy\", actual_res, coefs)\n max_r, max_g, max_b, max_w = res_rgb[0][0].max(), res_rgb[0][1].max(), res_rgb[0][2].max(), res_rgb[0][3].max()\n max_rgb = max(max_r, max_g, max_b)\n print(f\"test max = r={max_r}, g={max_g}, b={max_b}, w={max_w}, rgb={max_rgb}\")\n if self.step / (self.max_steps - 1) > 0.2:\n if max_rgb < 2.0 and max_w < 3.0:\n res_rgb /= max_rgb / 2.4\n else:\n if max_rgb > 2.4 and max_w > 3.0:\n res_rgb /= max_rgb / 2.4\n actual_res = torch.einsum(\"laxy,ab -> lbxy\", res_rgb, coefs.inverse())\n\n return actual_res" } ]
import sys import os import re import json import time import math import torch import psutil import random import datetime import comfy.sd import comfy.utils import numpy as np import folder_paths import comfy.samplers import comfy.controlnet import latent_preview import comfy.model_base import comfy.model_management from pathlib import Path from comfy.sd import CLIP, VAE from comfy.cli_args import args from urllib.request import urlopen from collections import defaultdict from PIL.PngImagePlugin import PngInfo from PIL import Image, ImageDraw, ImageFont from comfy.model_patcher import ModelPatcher from comfy_extras.chainner_models import model_loading from typing import Dict, List, Optional, Tuple, Union, Any from .adv_encode import advanced_encode, advanced_encode_XL from server import PromptServer from nodes import VAELoader, MAX_RESOLUTION, RepeatLatentBatch, NODE_CLASS_MAPPINGS as ALL_NODE_CLASS_MAPPINGS, ConditioningSetMask from comfy_extras.nodes_mask import LatentCompositeMasked from .config import BASE_RESOLUTIONS from .log import log_node_info, log_node_error, log_node_warn, log_node_success from .wildcards import process_with_loras, get_wildcard_list from comfy_extras.nodes_stable3d import camera_embeddings from .gradual_latent_hires_fix import sample_dpmpp_2s_ancestral, sample_dpmpp_2m_sde, sample_lcm, sample_euler_ancestral from .dynthres_core import DynThresh
11,124
w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] # ControlNet if "ControlNet" in self.x_type or "ControlNet" in self.y_type: _pipe = { "model": model if model is not None else plot_image_vars["model"], "positive": positive if positive is not None else plot_image_vars["positive_cond"], "negative": negative if negative is not None else plot_image_vars["negative_cond"], "vae": vae if vae is not None else plot_image_vars['vae'], "clip": clip if clip is not None else plot_image_vars['clip'], "samples": None, "images": None, "loader_settings": {} } cnet = plot_image_vars["cnet"] if "cnet" in plot_image_vars else None if cnet: strength, start_percent, end_percent = x_value.split(',') if "ControlNet" in self.x_type else y_value.split(',') strength = float(strength) start_percent = float(start_percent) end_percent = float(end_percent) for index, item in enumerate(cnet): control_net_names = item[0] image = item[1] for idx, control_net_name in enumerate(control_net_names): # print(control_net_name) _pipe, = controlnetAdvanced().controlnetApply(_pipe, image, control_net_name, None, strength, start_percent, end_percent) positive = _pipe['positive'] negative = _pipe['negative'] del _pipe # 简单用法 if plot_image_vars["x_node_type"] == "loader" or plot_image_vars["y_node_type"] == "loader": model, clip, vae = easyCache.load_checkpoint(plot_image_vars['ckpt_name']) if plot_image_vars['lora_name'] != "None": model, clip = easyCache.load_lora(plot_image_vars['lora_name'], model, clip, plot_image_vars['lora_model_strength'], plot_image_vars['lora_clip_strength']) # Check for custom VAE if plot_image_vars['vae_name'] not in ["Baked-VAE", "Baked VAE"]: vae = easyCache.load_vae(plot_image_vars['vae_name']) # CLIP skip if not clip: raise Exception("No CLIP found") clip = clip.clone() clip.clip_layer(plot_image_vars['clip_skip']) if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] positive, = cls().encode(clip, plot_image_vars['positive'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) negative, = cls().encode(clip, plot_image_vars['negative'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception(f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: positive, positive_pooled = advanced_encode(clip, plot_image_vars['positive'], plot_image_vars['positive_token_normalization'], plot_image_vars['positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") positive = [[positive, {"pooled_output": positive_pooled}]] negative, negative_pooled = advanced_encode(clip, plot_image_vars['negative'], plot_image_vars['negative_token_normalization'], plot_image_vars['negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] model = model if model is not None else plot_image_vars["model"] clip = clip if clip is not None else plot_image_vars["clip"] vae = vae if vae is not None else plot_image_vars["vae"] positive = positive if positive is not None else plot_image_vars["positive_cond"] negative = negative if negative is not None else plot_image_vars["negative_cond"] seed = seed if seed is not None else plot_image_vars["seed"] steps = steps if steps is not None else plot_image_vars["steps"] cfg = cfg if cfg is not None else plot_image_vars["cfg"] sampler_name = sampler_name if sampler_name is not None else plot_image_vars["sampler_name"] scheduler = scheduler if scheduler is not None else plot_image_vars["scheduler"] denoise = denoise if denoise is not None else plot_image_vars["denoise"] # Sample samples = sampler.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise=denoise, disable_noise=disable_noise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise) # Decode images and store latent = samples["samples"] # Add the latent tensor to the tensors list latents_plot.append(latent) # Decode the image image = vae.decode(latent).cpu() if self.output_individuals in [True, "True"]: easy_save = easySave(self.my_unique_id, self.prompt, self.extra_pnginfo) easy_save.images(image, self.save_prefix, self.image_output, group_id=self.num) # Convert the image from tensor to PIL Image and add it to the list pil_image = easySampler.tensor2pil(image) image_list.append(pil_image) # Update max dimensions self.max_width = max(self.max_width, pil_image.width) self.max_height = max(self.max_height, pil_image.height) # Return the touched variables return image_list, self.max_width, self.max_height, latents_plot # Process Functions def validate_xy_plot(self): if self.x_type == 'None' and self.y_type == 'None':
# 加载器 class easyLoader: def __init__(self): self.loaded_objects = { "ckpt": defaultdict(tuple), # {ckpt_name: (model, ...)} "clip": defaultdict(tuple), "clip_vision": defaultdict(tuple), "bvae": defaultdict(tuple), "vae": defaultdict(object), "lora": defaultdict(dict), # {lora_name: {UID: (model_lora, clip_lora)}} } self.memory_threshold = self.determine_memory_threshold(0.7) def clean_values(self, values: str): original_values = values.split("; ") cleaned_values = [] for value in original_values: cleaned_value = value.strip(';').strip() if cleaned_value == "": continue try: cleaned_value = int(cleaned_value) except ValueError: try: cleaned_value = float(cleaned_value) except ValueError: pass cleaned_values.append(cleaned_value) return cleaned_values def clear_unused_objects(self, desired_names: set, object_type: str): keys = set(self.loaded_objects[object_type].keys()) for key in keys - desired_names: del self.loaded_objects[object_type][key] def get_input_value(self, entry, key): val = entry["inputs"][key] return val if isinstance(val, str) else val[0] def process_pipe_loader(self, entry, desired_ckpt_names, desired_vae_names, desired_lora_names, desired_lora_settings, num_loras=3, suffix=""): for idx in range(1, num_loras + 1): lora_name_key = f"{suffix}lora{idx}_name" desired_lora_names.add(self.get_input_value(entry, lora_name_key)) setting = f'{self.get_input_value(entry, lora_name_key)};{entry["inputs"][f"{suffix}lora{idx}_model_strength"]};{entry["inputs"][f"{suffix}lora{idx}_clip_strength"]}' desired_lora_settings.add(setting) desired_ckpt_names.add(self.get_input_value(entry, f"{suffix}ckpt_name")) desired_vae_names.add(self.get_input_value(entry, f"{suffix}vae_name")) def update_loaded_objects(self, prompt): desired_ckpt_names = set() desired_vae_names = set() desired_lora_names = set() desired_lora_settings = set() for entry in prompt.values(): class_type = entry["class_type"] if class_type == "easy a1111Loader" or class_type == "easy comfyLoader": lora_name = self.get_input_value(entry, "lora_name") desired_lora_names.add(lora_name) setting = f'{lora_name};{entry["inputs"]["lora_model_strength"]};{entry["inputs"]["lora_clip_strength"]}' desired_lora_settings.add(setting) desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name")) desired_vae_names.add(self.get_input_value(entry, "vae_name")) elif class_type == "easy zero123Loader" or class_type == 'easy svdLoader': desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name")) desired_vae_names.add(self.get_input_value(entry, "vae_name")) elif class_type == "easy XYInputs: ModelMergeBlocks": desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_1")) desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_2")) vae_use = self.get_input_value(entry, "vae_use") if vae_use != 'Use Model 1' and vae_use != 'Use Model 2': desired_vae_names.add(vae_use) object_types = ["ckpt", "clip", "bvae", "vae", "lora"] for object_type in object_types: desired_names = desired_ckpt_names if object_type in ["ckpt", "clip", "bvae"] else desired_vae_names if object_type == "vae" else desired_lora_names self.clear_unused_objects(desired_names, object_type) def add_to_cache(self, obj_type, key, value): """ Add an item to the cache with the current timestamp. """ timestamped_value = (value, time.time()) self.loaded_objects[obj_type][key] = timestamped_value def determine_memory_threshold(self, percentage=0.8): """ Determines the memory threshold as a percentage of the total available memory. Args: - percentage (float): The fraction of total memory to use as the threshold. Should be a value between 0 and 1. Default is 0.8 (80%). Returns: - memory_threshold (int): Memory threshold in bytes. """ total_memory = psutil.virtual_memory().total memory_threshold = total_memory * percentage return memory_threshold def get_memory_usage(self): """ Returns the memory usage of the current process in bytes. """ process = psutil.Process(os.getpid()) return process.memory_info().rss def eviction_based_on_memory(self): """ Evicts objects from cache based on memory usage and priority. """ current_memory = self.get_memory_usage() if current_memory < self.memory_threshold: return eviction_order = ["vae", "lora", "bvae", "clip", "ckpt"] for obj_type in eviction_order: if current_memory < self.memory_threshold: break # Sort items based on age (using the timestamp) items = list(self.loaded_objects[obj_type].items()) items.sort(key=lambda x: x[1][1]) # Sorting by timestamp for item in items: if current_memory < self.memory_threshold: break del self.loaded_objects[obj_type][item[0]] current_memory = self.get_memory_usage() def load_checkpoint(self, ckpt_name, config_name=None, load_vision=False): cache_name = ckpt_name if config_name not in [None, "Default"]: cache_name = ckpt_name + "_" + config_name if cache_name in self.loaded_objects["ckpt"]: cache_out = self.loaded_objects["clip_vision"][cache_name][0] if load_vision else self.loaded_objects["clip"][cache_name][0] return self.loaded_objects["ckpt"][cache_name][0], cache_out, self.loaded_objects["bvae"][cache_name][0] ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) output_clip = False if load_vision else True output_clipvision = True if load_vision else False if config_name not in [None, "Default"]: config_path = folder_paths.get_full_path("configs", config_name) loaded_ckpt = comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision, embedding_directory=folder_paths.get_folder_paths("embeddings")) else: loaded_ckpt = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision, embedding_directory=folder_paths.get_folder_paths("embeddings")) self.add_to_cache("ckpt", cache_name, loaded_ckpt[0]) self.add_to_cache("bvae", cache_name, loaded_ckpt[2]) if load_vision: out = loaded_ckpt[3] self.add_to_cache("clip_vision", cache_name, out) else: out = loaded_ckpt[1] self.add_to_cache("clip", cache_name, loaded_ckpt[1]) self.eviction_based_on_memory() return loaded_ckpt[0], out, loaded_ckpt[2] def load_vae(self, vae_name): if vae_name in self.loaded_objects["vae"]: return self.loaded_objects["vae"][vae_name][0] vae_path = folder_paths.get_full_path("vae", vae_name) sd = comfy.utils.load_torch_file(vae_path) loaded_vae = comfy.sd.VAE(sd=sd) self.add_to_cache("vae", vae_name, loaded_vae) self.eviction_based_on_memory() return loaded_vae def load_lora(self, lora_name, model, clip, strength_model, strength_clip): model_hash = str(model)[44:-1] clip_hash = str(clip)[25:-1] unique_id = f'{model_hash};{clip_hash};{lora_name};{strength_model};{strength_clip}' if unique_id in self.loaded_objects["lora"] and unique_id in self.loaded_objects["lora"][lora_name]: return self.loaded_objects["lora"][unique_id][0] lora_path = folder_paths.get_full_path("loras", lora_name) lora = comfy.utils.load_torch_file(lora_path, safe_load=True) model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) self.add_to_cache("lora", unique_id, (model_lora, clip_lora)) self.eviction_based_on_memory() return model_lora, clip_lora # 采样器 class easySampler: def __init__(self): self.last_helds: dict[str, list] = { "results": [], "pipe_line": [], } @staticmethod def tensor2pil(image: torch.Tensor) -> Image.Image: """Convert a torch tensor to a PIL image.""" return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) @staticmethod def pil2tensor(image: Image.Image) -> torch.Tensor: """Convert a PIL image to a torch tensor.""" return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) @staticmethod def enforce_mul_of_64(d): d = int(d) if d <= 7: d = 8 leftover = d % 8 # 8 is the number of pixels per byte if leftover != 0: # if the number of pixels is not a multiple of 8 if (leftover < 4): # if the number of pixels is less than 4 d -= leftover # remove the leftover pixels else: # if the number of pixels is more than 4 d += 8 - leftover # add the leftover pixels return int(d) @staticmethod def safe_split(to_split: str, delimiter: str) -> List[str]: """Split the input string and return a list of non-empty parts.""" parts = to_split.split(delimiter) parts = [part for part in parts if part not in ('', ' ', ' ')] while len(parts) < 2: parts.append('None') return parts def common_ksampler(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, preview_latent=True, disable_pbar=False): device = comfy.model_management.get_torch_device() latent_image = latent["samples"] if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: batch_inds = latent["batch_index"] if "batch_index" in latent else None noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) noise_mask = None if "noise_mask" in latent: noise_mask = latent["noise_mask"] preview_format = "JPEG" if preview_format not in ["JPEG", "PNG"]: preview_format = "JPEG" previewer = False if preview_latent: previewer = latent_preview.get_previewer(device, model.model.latent_format) pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): preview_bytes = None if previewer: preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) pbar.update_absolute(step + 1, total_steps, preview_bytes) samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) out = latent.copy() out["samples"] = samples return out def custom_ksampler(self, model, seed, steps, cfg, _sampler, sigmas, positive, negative, latent, disable_noise=False, preview_latent=True, disable_pbar=False): device = comfy.model_management.get_torch_device() latent_image = latent["samples"] if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: batch_inds = latent["batch_index"] if "batch_index" in latent else None noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) noise_mask = None if "noise_mask" in latent: noise_mask = latent["noise_mask"] preview_format = "JPEG" if preview_format not in ["JPEG", "PNG"]: preview_format = "JPEG" previewer = False if preview_latent: previewer = latent_preview.get_previewer(device, model.model.latent_format) pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): preview_bytes = None if previewer: preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) pbar.update_absolute(step + 1, total_steps, preview_bytes) samples = comfy.sample.sample_custom(model, noise, cfg, _sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) out = latent.copy() out["samples"] = samples return out def get_value_by_id(self, key: str, my_unique_id: Any) -> Optional[Any]: """Retrieve value by its associated ID.""" try: for value, id_ in self.last_helds[key]: if id_ == my_unique_id: return value except KeyError: return None def update_value_by_id(self, key: str, my_unique_id: Any, new_value: Any) -> Union[bool, None]: """Update the value associated with a given ID. Return True if updated, False if appended, None if key doesn't exist.""" try: for i, (value, id_) in enumerate(self.last_helds[key]): if id_ == my_unique_id: self.last_helds[key][i] = (new_value, id_) return True self.last_helds[key].append((new_value, my_unique_id)) return False except KeyError: return False def upscale(self, samples, upscale_method, scale_by, crop): s = samples.copy() width = self.enforce_mul_of_64(round(samples["samples"].shape[3] * scale_by)) height = self.enforce_mul_of_64(round(samples["samples"].shape[2] * scale_by)) if (width > MAX_RESOLUTION): width = MAX_RESOLUTION if (height > MAX_RESOLUTION): height = MAX_RESOLUTION s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, crop) return (s,) def handle_upscale(self, samples: dict, upscale_method: str, factor: float, crop: bool) -> dict: """Upscale the samples if the upscale_method is not set to 'None'.""" if upscale_method != "None": samples = self.upscale(samples, upscale_method, factor, crop)[0] return samples def init_state(self, my_unique_id: Any, key: str, default: Any) -> Any: """Initialize the state by either fetching the stored value or setting a default.""" value = self.get_value_by_id(key, my_unique_id) if value is not None: return value return default def get_output(self, pipe: dict,) -> Tuple: """Return a tuple of various elements fetched from the input pipe dictionary.""" return ( pipe, pipe.get("images"), pipe.get("model"), pipe.get("positive"), pipe.get("negative"), pipe.get("samples"), pipe.get("vae"), pipe.get("clip"), pipe.get("seed"), ) def get_output_sdxl(self, sdxl_pipe: dict) -> Tuple: """Return a tuple of various elements fetched from the input sdxl_pipe dictionary.""" return ( sdxl_pipe, sdxl_pipe.get("model"), sdxl_pipe.get("positive"), sdxl_pipe.get("negative"), sdxl_pipe.get("vae"), sdxl_pipe.get("refiner_model"), sdxl_pipe.get("refiner_positive"), sdxl_pipe.get("refiner_negative"), sdxl_pipe.get("refiner_vae"), sdxl_pipe.get("samples"), sdxl_pipe.get("clip"), sdxl_pipe.get("images"), sdxl_pipe.get("seed") ) # XY图表 class easyXYPlot: def __init__(self, xyPlotData, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id): self.x_node_type, self.x_type = easySampler.safe_split(xyPlotData.get("x_axis"), ': ') self.y_node_type, self.y_type = easySampler.safe_split(xyPlotData.get("y_axis"), ': ') self.x_values = xyPlotData.get("x_vals") if self.x_type != "None" else [] self.y_values = xyPlotData.get("y_vals") if self.y_type != "None" else [] self.grid_spacing = xyPlotData.get("grid_spacing") self.latent_id = 0 self.output_individuals = xyPlotData.get("output_individuals") self.x_label, self.y_label = [], [] self.max_width, self.max_height = 0, 0 self.latents_plot = [] self.image_list = [] self.num_cols = len(self.x_values) if len(self.x_values) > 0 else 1 self.num_rows = len(self.y_values) if len(self.y_values) > 0 else 1 self.total = self.num_cols * self.num_rows self.num = 0 self.save_prefix = save_prefix self.image_output = image_output self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.my_unique_id = my_unique_id # Helper Functions @staticmethod def define_variable(plot_image_vars, value_type, value, index): plot_image_vars[value_type] = value if value_type in ["seed", "Seeds++ Batch"]: value_label = f"{value}" else: value_label = f"{value_type}: {value}" if "ControlNet" in value_type: if "," in value: line = value.split(',') value_label = f"{value_type}: {line[2]}" if value_type in ["ModelMergeBlocks"]: if ":" in value: line = value.split(':') value_label = f"{line[0]}" elif len(value) > 16: value_label = f"ModelMergeBlocks {index + 1}" else: value_label = f"MMB: {value}" if value_type in ["Positive Prompt S/R"]: value_label = f"pos prompt {index + 1}" if index>0 else f"pos prompt" if value_type in ["Negative Prompt S/R"]: value_label = f"neg prompt {index + 1}" if index>0 else f"neg prompt" if value_type in ["steps", "cfg", "denoise", "clip_skip", "lora_model_strength", "lora_clip_strength"]: value_label = f"{value_type}: {value}" if value_type == "positive": value_label = f"pos prompt {index + 1}" elif value_type == "negative": value_label = f"neg prompt {index + 1}" return plot_image_vars, value_label @staticmethod def get_font(font_size): return ImageFont.truetype(str(Path(os.path.join(Path(__file__).parent.parent, 'resources/OpenSans-Medium.ttf'))), font_size) @staticmethod def update_label(label, value, num_items): if len(label) < num_items: return [*label, value] return label @staticmethod def rearrange_tensors(latent, num_cols, num_rows): new_latent = [] for i in range(num_rows): for j in range(num_cols): index = j * num_rows + i new_latent.append(latent[index]) return new_latent def calculate_background_dimensions(self): border_size = int((self.max_width // 8) * 1.5) if self.y_type != "None" or self.x_type != "None" else 0 bg_width = self.num_cols * (self.max_width + self.grid_spacing) - self.grid_spacing + border_size * ( self.y_type != "None") bg_height = self.num_rows * (self.max_height + self.grid_spacing) - self.grid_spacing + border_size * ( self.x_type != "None") x_offset_initial = border_size if self.y_type != "None" else 0 y_offset = border_size if self.x_type != "None" else 0 return bg_width, bg_height, x_offset_initial, y_offset def adjust_font_size(self, text, initial_font_size, label_width): font = self.get_font(initial_font_size) text_width, _ = font.getsize(text) scaling_factor = 0.9 if text_width > (label_width * scaling_factor): return int(initial_font_size * (label_width / text_width) * scaling_factor) else: return initial_font_size def create_label(self, img, text, initial_font_size, is_x_label=True, max_font_size=70, min_font_size=10): label_width = img.width if is_x_label else img.height # Adjust font size font_size = self.adjust_font_size(text, initial_font_size, label_width) font_size = min(max_font_size, font_size) # Ensure font isn't too large font_size = max(min_font_size, font_size) # Ensure font isn't too small label_height = int(font_size * 1.5) if is_x_label else font_size label_bg = Image.new('RGBA', (label_width, label_height), color=(255, 255, 255, 0)) d = ImageDraw.Draw(label_bg) font = self.get_font(font_size) # Check if text will fit, if not insert ellipsis and reduce text if d.textsize(text, font=font)[0] > label_width: while d.textsize(text + '...', font=font)[0] > label_width and len(text) > 0: text = text[:-1] text = text + '...' # Compute text width and height for multi-line text text_lines = text.split('\n') text_widths, text_heights = zip(*[d.textsize(line, font=font) for line in text_lines]) max_text_width = max(text_widths) total_text_height = sum(text_heights) # Compute position for each line of text lines_positions = [] current_y = 0 for line, line_width, line_height in zip(text_lines, text_widths, text_heights): text_x = (label_width - line_width) // 2 text_y = current_y + (label_height - total_text_height) // 2 current_y += line_height lines_positions.append((line, (text_x, text_y))) # Draw each line of text for line, (text_x, text_y) in lines_positions: d.text((text_x, text_y), line, fill='black', font=font) return label_bg def sample_plot_image(self, plot_image_vars, samples, preview_latent, latents_plot, image_list, disable_noise, start_step, last_step, force_full_denoise, x_value=None, y_value=None): model, clip, vae, positive, negative, seed, steps, cfg = None, None, None, None, None, None, None, None sampler_name, scheduler, denoise = None, None, None # 高级用法 if plot_image_vars["x_node_type"] == "advanced" or plot_image_vars["y_node_type"] == "advanced": if self.x_type == "Seeds++ Batch" or self.y_type == "Seeds++ Batch": seed = int(x_value) if self.x_type == "Seeds++ Batch" else int(y_value) if self.x_type == "Steps" or self.y_type == "Steps": steps = int(x_value) if self.x_type == "Steps" else int(y_value) if self.x_type == "StartStep" or self.y_type == "StartStep": start_step = int(x_value) if self.x_type == "StartStep" else int(y_value) if self.x_type == "EndStep" or self.y_type == "EndStep": last_step = int(x_value) if self.x_type == "EndStep" else int(y_value) if self.x_type == "CFG Scale" or self.y_type == "CFG Scale": cfg = float(x_value) if self.x_type == "CFG Scale" else float(y_value) if self.x_type == "Sampler" or self.y_type == "Sampler" or self.y_type == "Sampler & Scheduler": sampler_name = float(x_value) if self.x_type == "Sampler" or self.x_type == "Sampler & Scheduler" else float(y_value) if self.x_type == "Scheduler" or self.y_type == "Scheduler" or self.y_type == "Sampler & Scheduler": scheduler = float(x_value) if self.x_type == "Scheduler" or self.x_type == "Sampler & Scheduler" else float(y_value) if self.x_type == "Denoise" or self.y_type == "Denoise": denoise = float(x_value) if self.x_type == "Denoise" else float(y_value) # 模型叠加 if self.x_type == "ModelMergeBlocks" or self.y_type == "ModelMergeBlocks": ckpt_name_1, ckpt_name_2 = plot_image_vars['models'] model1, clip1, vae1 = easyCache.load_checkpoint(ckpt_name_1) model2, clip2, vae2 = easyCache.load_checkpoint(ckpt_name_2) xy_values = x_value if self.x_type == "ModelMergeBlocks" else y_value if ":" in xy_values: xy_line = xy_values.split(':') xy_values = xy_line[1] xy_arrs = xy_values.split(',') # ModelMergeBlocks if len(xy_arrs) == 3: input, middle, out = xy_arrs kwargs = { "input": input, "middle": middle, "out": out } elif len(xy_arrs) == 30: kwargs = {} kwargs["time_embed."] = xy_arrs[0] kwargs["label_emb."] = xy_arrs[1] for i in range(12): kwargs["input_blocks.{}.".format(i)] = xy_arrs[2+i] for i in range(3): kwargs["middle_block.{}.".format(i)] = xy_arrs[14+i] for i in range(12): kwargs["output_blocks.{}.".format(i)] = xy_arrs[17+i] kwargs["out."] = xy_arrs[29] else: raise Exception("ModelMergeBlocks weight length error") default_ratio = next(iter(kwargs.values())) m = model1.clone() kp = model2.get_key_patches("diffusion_model.") for k in kp: ratio = float(default_ratio) k_unet = k[len("diffusion_model."):] last_arg_size = 0 for arg in kwargs: if k_unet.startswith(arg) and last_arg_size < len(arg): ratio = float(kwargs[arg]) last_arg_size = len(arg) m.add_patches({k: kp[k]}, 1.0 - ratio, ratio) vae_use = plot_image_vars['vae_use'] clip = clip2 if vae_use == 'Use Model 2' else clip1 if vae_use == 'Use Model 2': vae = vae2 elif vae_use == 'Use Model 1': vae = vae1 else: (vae,) = VAELoader().load_vae(vae_use) model = m # 如果存在lora_stack叠加lora optional_lora_stack = plot_image_vars['lora_stack'] if optional_lora_stack is not None and optional_lora_stack != []: for lora in optional_lora_stack: lora_name = lora["lora_name"] model = model if model is not None else lora["model"] clip = clip if clip is not None else lora["clip"] lora_model_strength = lora["lora_model_strength"] lora_clip_strength = lora["lora_clip_strength"] if "lbw" in lora: lbw = lora["lbw"] lbw_a = lora["lbw_a"] lbw_b = lora["lbw_b"] cls = ALL_NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire'] model, clip, _ = cls().doit(model, clip, lora_name, lora_model_strength, lora_clip_strength, False, 0, lbw_a, lbw_b, "", lbw) model, clip = easyCache.load_lora(lora_name, model, clip, lora_model_strength, lora_clip_strength) # 处理clip clip = clip.clone() if plot_image_vars['clip_skip'] != 0: clip.clip_layer(plot_image_vars['clip_skip']) # 提示词 if "Positive" in self.x_type or "Positive" in self.y_type: if self.x_type == 'Positive Prompt S/R' or self.y_type == 'Positive Prompt S/R': positive = x_value if self.x_type == "Positive Prompt S/R" else y_value if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] clip = clip if clip is not None else plot_image_vars["clip"] positive, = cls().encode(clip, positive, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception( f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: clip = clip if clip is not None else plot_image_vars["clip"] positive, positive_pooled = advanced_encode(clip, positive, plot_image_vars['positive_token_normalization'], plot_image_vars[ 'positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") positive = [[positive, {"pooled_output": positive_pooled}]] if "Negative" in self.x_type or "Negative" in self.y_type: if self.x_type == 'Negative Prompt S/R' or self.y_type == 'Negative Prompt S/R': negative = x_value if self.x_type == "Negative Prompt S/R" else y_value if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] clip = clip if clip is not None else plot_image_vars["clip"] negative, = cls().encode(clip, negative, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception( f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: clip = clip if clip is not None else plot_image_vars["clip"] negative, negative_pooled = advanced_encode(clip, negative, plot_image_vars['negative_token_normalization'], plot_image_vars[ 'negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] # ControlNet if "ControlNet" in self.x_type or "ControlNet" in self.y_type: _pipe = { "model": model if model is not None else plot_image_vars["model"], "positive": positive if positive is not None else plot_image_vars["positive_cond"], "negative": negative if negative is not None else plot_image_vars["negative_cond"], "vae": vae if vae is not None else plot_image_vars['vae'], "clip": clip if clip is not None else plot_image_vars['clip'], "samples": None, "images": None, "loader_settings": {} } cnet = plot_image_vars["cnet"] if "cnet" in plot_image_vars else None if cnet: strength, start_percent, end_percent = x_value.split(',') if "ControlNet" in self.x_type else y_value.split(',') strength = float(strength) start_percent = float(start_percent) end_percent = float(end_percent) for index, item in enumerate(cnet): control_net_names = item[0] image = item[1] for idx, control_net_name in enumerate(control_net_names): # print(control_net_name) _pipe, = controlnetAdvanced().controlnetApply(_pipe, image, control_net_name, None, strength, start_percent, end_percent) positive = _pipe['positive'] negative = _pipe['negative'] del _pipe # 简单用法 if plot_image_vars["x_node_type"] == "loader" or plot_image_vars["y_node_type"] == "loader": model, clip, vae = easyCache.load_checkpoint(plot_image_vars['ckpt_name']) if plot_image_vars['lora_name'] != "None": model, clip = easyCache.load_lora(plot_image_vars['lora_name'], model, clip, plot_image_vars['lora_model_strength'], plot_image_vars['lora_clip_strength']) # Check for custom VAE if plot_image_vars['vae_name'] not in ["Baked-VAE", "Baked VAE"]: vae = easyCache.load_vae(plot_image_vars['vae_name']) # CLIP skip if not clip: raise Exception("No CLIP found") clip = clip.clone() clip.clip_layer(plot_image_vars['clip_skip']) if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] positive, = cls().encode(clip, plot_image_vars['positive'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) negative, = cls().encode(clip, plot_image_vars['negative'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception(f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: positive, positive_pooled = advanced_encode(clip, plot_image_vars['positive'], plot_image_vars['positive_token_normalization'], plot_image_vars['positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") positive = [[positive, {"pooled_output": positive_pooled}]] negative, negative_pooled = advanced_encode(clip, plot_image_vars['negative'], plot_image_vars['negative_token_normalization'], plot_image_vars['negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] model = model if model is not None else plot_image_vars["model"] clip = clip if clip is not None else plot_image_vars["clip"] vae = vae if vae is not None else plot_image_vars["vae"] positive = positive if positive is not None else plot_image_vars["positive_cond"] negative = negative if negative is not None else plot_image_vars["negative_cond"] seed = seed if seed is not None else plot_image_vars["seed"] steps = steps if steps is not None else plot_image_vars["steps"] cfg = cfg if cfg is not None else plot_image_vars["cfg"] sampler_name = sampler_name if sampler_name is not None else plot_image_vars["sampler_name"] scheduler = scheduler if scheduler is not None else plot_image_vars["scheduler"] denoise = denoise if denoise is not None else plot_image_vars["denoise"] # Sample samples = sampler.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise=denoise, disable_noise=disable_noise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise) # Decode images and store latent = samples["samples"] # Add the latent tensor to the tensors list latents_plot.append(latent) # Decode the image image = vae.decode(latent).cpu() if self.output_individuals in [True, "True"]: easy_save = easySave(self.my_unique_id, self.prompt, self.extra_pnginfo) easy_save.images(image, self.save_prefix, self.image_output, group_id=self.num) # Convert the image from tensor to PIL Image and add it to the list pil_image = easySampler.tensor2pil(image) image_list.append(pil_image) # Update max dimensions self.max_width = max(self.max_width, pil_image.width) self.max_height = max(self.max_height, pil_image.height) # Return the touched variables return image_list, self.max_width, self.max_height, latents_plot # Process Functions def validate_xy_plot(self): if self.x_type == 'None' and self.y_type == 'None':
log_node_warn(f'easyKsampler[{self.my_unique_id}]','No Valid Plot Types - Reverting to default sampling...')
5
2023-12-10 07:02:36+00:00
16k
AIFSH/NativeDancer
nativedancer/third_part/detectron2/utils/video_visualizer.py
[ { "identifier": "Instances", "path": "nativedancer/third_part/detectron2/structures/instances.py", "snippet": "class Instances:\n \"\"\"\n This class represents a list of instances in an image.\n It stores the attributes of instances (e.g., boxes, masks, labels, scores) as \"fields\".\n All fields must have the same ``__len__`` which is the number of instances.\n\n All other (non-field) attributes of this class are considered private:\n they must start with '_' and are not modifiable by a user.\n\n Some basic usage:\n\n 1. Set/get/check a field:\n\n .. code-block:: python\n\n instances.gt_boxes = Boxes(...)\n print(instances.pred_masks) # a tensor of shape (N, H, W)\n print('gt_masks' in instances)\n\n 2. ``len(instances)`` returns the number of instances\n 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields\n and returns a new :class:`Instances`.\n Typically, ``indices`` is a integer vector of indices,\n or a binary mask of length ``num_instances``\n\n .. code-block:: python\n\n category_3_detections = instances[instances.pred_classes == 3]\n confident_detections = instances[instances.scores > 0.9]\n \"\"\"\n\n def __init__(self, image_size: Tuple[int, int], **kwargs: Any):\n \"\"\"\n Args:\n image_size (height, width): the spatial size of the image.\n kwargs: fields to add to this `Instances`.\n \"\"\"\n self._image_size = image_size\n self._fields: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.set(k, v)\n\n @property\n def image_size(self) -> Tuple[int, int]:\n \"\"\"\n Returns:\n tuple: height, width\n \"\"\"\n return self._image_size\n\n def __setattr__(self, name: str, val: Any) -> None:\n if name.startswith(\"_\"):\n super().__setattr__(name, val)\n else:\n self.set(name, val)\n\n def __getattr__(self, name: str) -> Any:\n if name == \"_fields\" or name not in self._fields:\n raise AttributeError(\"Cannot find field '{}' in the given Instances!\".format(name))\n return self._fields[name]\n\n def set(self, name: str, value: Any) -> None:\n \"\"\"\n Set the field named `name` to `value`.\n The length of `value` must be the number of instances,\n and must agree with other existing fields in this object.\n \"\"\"\n with warnings.catch_warnings(record=True):\n data_len = len(value)\n if len(self._fields):\n assert (\n len(self) == data_len\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\n self._fields[name] = value\n\n def has(self, name: str) -> bool:\n \"\"\"\n Returns:\n bool: whether the field called `name` exists.\n \"\"\"\n return name in self._fields\n\n def remove(self, name: str) -> None:\n \"\"\"\n Remove the field called `name`.\n \"\"\"\n del self._fields[name]\n\n def get(self, name: str) -> Any:\n \"\"\"\n Returns the field called `name`.\n \"\"\"\n return self._fields[name]\n\n def get_fields(self) -> Dict[str, Any]:\n \"\"\"\n Returns:\n dict: a dict which maps names (str) to data of the fields\n\n Modifying the returned dict will modify this instance.\n \"\"\"\n return self._fields\n\n # Tensor-like methods\n def to(self, *args: Any, **kwargs: Any) -> \"Instances\":\n \"\"\"\n Returns:\n Instances: all fields are called with a `to(device)`, if the field has this method.\n \"\"\"\n ret = Instances(self._image_size)\n for k, v in self._fields.items():\n if hasattr(v, \"to\"):\n v = v.to(*args, **kwargs)\n ret.set(k, v)\n return ret\n\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Instances\":\n \"\"\"\n Args:\n item: an index-like object and will be used to index all the fields.\n\n Returns:\n If `item` is a string, return the data in the corresponding field.\n Otherwise, returns an `Instances` where all fields are indexed by `item`.\n \"\"\"\n if type(item) == int:\n if item >= len(self) or item < -len(self):\n raise IndexError(\"Instances index out of range!\")\n else:\n item = slice(item, None, len(self))\n\n ret = Instances(self._image_size)\n for k, v in self._fields.items():\n ret.set(k, v[item])\n return ret\n\n def __len__(self) -> int:\n for v in self._fields.values():\n # use __len__ because len() has to be int and is not friendly to tracing\n return v.__len__()\n raise NotImplementedError(\"Empty Instances does not support __len__!\")\n\n def __iter__(self):\n raise NotImplementedError(\"`Instances` object is not iterable!\")\n\n @staticmethod\n def cat(instance_lists: List[\"Instances\"]) -> \"Instances\":\n \"\"\"\n Args:\n instance_lists (list[Instances])\n\n Returns:\n Instances\n \"\"\"\n assert all(isinstance(i, Instances) for i in instance_lists)\n assert len(instance_lists) > 0\n if len(instance_lists) == 1:\n return instance_lists[0]\n\n image_size = instance_lists[0].image_size\n if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing\n for i in instance_lists[1:]:\n assert i.image_size == image_size\n ret = Instances(image_size)\n for k in instance_lists[0]._fields.keys():\n values = [i.get(k) for i in instance_lists]\n v0 = values[0]\n if isinstance(v0, torch.Tensor):\n values = torch.cat(values, dim=0)\n elif isinstance(v0, list):\n values = list(itertools.chain(*values))\n elif hasattr(type(v0), \"cat\"):\n values = type(v0).cat(values)\n else:\n raise ValueError(\"Unsupported type {} for concatenation\".format(type(v0)))\n ret.set(k, values)\n return ret\n\n def __str__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={}, \".format(len(self))\n s += \"image_height={}, \".format(self._image_size[0])\n s += \"image_width={}, \".format(self._image_size[1])\n s += \"fields=[{}])\".format(\", \".join((f\"{k}: {v}\" for k, v in self._fields.items())))\n return s\n\n __repr__ = __str__" }, { "identifier": "ColorMode", "path": "nativedancer/third_part/detectron2/utils/visualizer.py", "snippet": "class ColorMode(Enum):\n \"\"\"\n Enum of different color modes to use for instance visualizations.\n \"\"\"\n\n IMAGE = 0\n \"\"\"\n Picks a random color for every instance and overlay segmentations with low opacity.\n \"\"\"\n SEGMENTATION = 1\n \"\"\"\n Let instances of the same category have similar colors\n (from metadata.thing_colors), and overlay them with\n high opacity. This provides more attention on the quality of segmentation.\n \"\"\"\n IMAGE_BW = 2\n \"\"\"\n Same as IMAGE, but convert all areas without masks to gray-scale.\n Only available for drawing per-instance mask predictions.\n \"\"\"" }, { "identifier": "Visualizer", "path": "nativedancer/third_part/detectron2/utils/visualizer.py", "snippet": "class Visualizer:\n \"\"\"\n Visualizer that draws data about detection/segmentation on images.\n\n It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`\n that draw primitive objects to images, as well as high-level wrappers like\n `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`\n that draw composite data in some pre-defined style.\n\n Note that the exact visualization style for the high-level wrappers are subject to change.\n Style such as color, opacity, label contents, visibility of labels, or even the visibility\n of objects themselves (e.g. when the object is too small) may change according\n to different heuristics, as long as the results still look visually reasonable.\n\n To obtain a consistent style, you can implement custom drawing functions with the\n abovementioned primitive methods instead. If you need more customized visualization\n styles, you can process the data yourself following their format documented in\n tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not\n intend to satisfy everyone's preference on drawing styles.\n\n This visualizer focuses on high rendering quality rather than performance. It is not\n designed to be used for real-time applications.\n \"\"\"\n\n # TODO implement a fast, rasterized version using OpenCV\n\n def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):\n \"\"\"\n Args:\n img_rgb: a numpy array of shape (H, W, C), where H and W correspond to\n the height and width of the image respectively. C is the number of\n color channels. The image is required to be in RGB format since that\n is a requirement of the Matplotlib library. The image is also expected\n to be in the range [0, 255].\n metadata (Metadata): dataset metadata (e.g. class names and colors)\n instance_mode (ColorMode): defines one of the pre-defined style for drawing\n instances on an image.\n \"\"\"\n self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)\n if metadata is None:\n metadata = MetadataCatalog.get(\"__nonexist__\")\n self.metadata = metadata\n self.output = VisImage(self.img, scale=scale)\n self.cpu_device = torch.device(\"cpu\")\n\n # too small texts are useless, therefore clamp to 9\n self._default_font_size = max(\n np.sqrt(self.output.height * self.output.width) // 90, 10 // scale\n )\n self._instance_mode = instance_mode\n self.keypoint_threshold = _KEYPOINT_THRESHOLD\n\n def draw_instance_predictions(self, predictions):\n \"\"\"\n Draw instance-level prediction results on an image.\n\n Args:\n predictions (Instances): the output of an instance detection/segmentation\n model. Following fields will be used to draw:\n \"pred_boxes\", \"pred_classes\", \"scores\", \"pred_masks\" (or \"pred_masks_rle\").\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n boxes = predictions.pred_boxes if predictions.has(\"pred_boxes\") else None\n scores = predictions.scores if predictions.has(\"scores\") else None\n classes = predictions.pred_classes.tolist() if predictions.has(\"pred_classes\") else None\n labels = _create_text_labels(classes, scores, self.metadata.get(\"thing_classes\", None))\n keypoints = predictions.pred_keypoints if predictions.has(\"pred_keypoints\") else None\n\n if predictions.has(\"pred_masks\"):\n masks = np.asarray(predictions.pred_masks)\n masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]\n else:\n masks = None\n\n if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get(\"thing_colors\"):\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes\n ]\n alpha = 0.8\n else:\n colors = None\n alpha = 0.5\n\n if self._instance_mode == ColorMode.IMAGE_BW:\n self.output.reset_image(\n self._create_grayscale_image(\n (predictions.pred_masks.any(dim=0) > 0).numpy()\n if predictions.has(\"pred_masks\")\n else None\n )\n )\n alpha = 0.3\n\n self.overlay_instances(\n masks=masks,\n boxes=boxes,\n labels=labels,\n keypoints=keypoints,\n assigned_colors=colors,\n alpha=alpha,\n )\n return self.output\n\n def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):\n \"\"\"\n Draw semantic segmentation predictions/labels.\n\n Args:\n sem_seg (Tensor or ndarray): the segmentation of shape (H, W).\n Each value is the integer label of the pixel.\n area_threshold (int): segments with less than `area_threshold` are not drawn.\n alpha (float): the larger it is, the more opaque the segmentations are.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n if isinstance(sem_seg, torch.Tensor):\n sem_seg = sem_seg.numpy()\n labels, areas = np.unique(sem_seg, return_counts=True)\n sorted_idxs = np.argsort(-areas).tolist()\n labels = labels[sorted_idxs]\n for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):\n try:\n mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]\n except (AttributeError, IndexError):\n mask_color = None\n\n binary_mask = (sem_seg == label).astype(np.uint8)\n text = self.metadata.stuff_classes[label]\n self.draw_binary_mask(\n binary_mask,\n color=mask_color,\n edge_color=_OFF_WHITE,\n text=text,\n alpha=alpha,\n area_threshold=area_threshold,\n )\n return self.output\n\n def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):\n \"\"\"\n Draw panoptic prediction annotations or results.\n\n Args:\n panoptic_seg (Tensor): of shape (height, width) where the values are ids for each\n segment.\n segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.\n If it is a ``list[dict]``, each dict contains keys \"id\", \"category_id\".\n If None, category id of each pixel is computed by\n ``pixel // metadata.label_divisor``.\n area_threshold (int): stuff segments with less than `area_threshold` are not drawn.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)\n\n if self._instance_mode == ColorMode.IMAGE_BW:\n self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask()))\n\n # draw mask for all semantic segments first i.e. \"stuff\"\n for mask, sinfo in pred.semantic_masks():\n category_idx = sinfo[\"category_id\"]\n try:\n mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]\n except AttributeError:\n mask_color = None\n\n text = self.metadata.stuff_classes[category_idx]\n self.draw_binary_mask(\n mask,\n color=mask_color,\n edge_color=_OFF_WHITE,\n text=text,\n alpha=alpha,\n area_threshold=area_threshold,\n )\n\n # draw mask for all instances second\n all_instances = list(pred.instance_masks())\n if len(all_instances) == 0:\n return self.output\n masks, sinfo = list(zip(*all_instances))\n category_ids = [x[\"category_id\"] for x in sinfo]\n\n try:\n scores = [x[\"score\"] for x in sinfo]\n except KeyError:\n scores = None\n labels = _create_text_labels(\n category_ids, scores, self.metadata.thing_classes, [x.get(\"iscrowd\", 0) for x in sinfo]\n )\n\n try:\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids\n ]\n except AttributeError:\n colors = None\n self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)\n\n return self.output\n\n draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility\n\n def draw_dataset_dict(self, dic):\n \"\"\"\n Draw annotations/segmentations in Detectron2 Dataset format.\n\n Args:\n dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n annos = dic.get(\"annotations\", None)\n if annos:\n if \"segmentation\" in annos[0]:\n masks = [x[\"segmentation\"] for x in annos]\n else:\n masks = None\n if \"keypoints\" in annos[0]:\n keypts = [x[\"keypoints\"] for x in annos]\n keypts = np.array(keypts).reshape(len(annos), -1, 3)\n else:\n keypts = None\n\n boxes = [\n BoxMode.convert(x[\"bbox\"], x[\"bbox_mode\"], BoxMode.XYXY_ABS)\n if len(x[\"bbox\"]) == 4\n else x[\"bbox\"]\n for x in annos\n ]\n\n colors = None\n category_ids = [x[\"category_id\"] for x in annos]\n if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get(\"thing_colors\"):\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]])\n for c in category_ids\n ]\n names = self.metadata.get(\"thing_classes\", None)\n labels = _create_text_labels(\n category_ids,\n scores=None,\n class_names=names,\n is_crowd=[x.get(\"iscrowd\", 0) for x in annos],\n )\n self.overlay_instances(\n labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors\n )\n\n sem_seg = dic.get(\"sem_seg\", None)\n if sem_seg is None and \"sem_seg_file_name\" in dic:\n with PathManager.open(dic[\"sem_seg_file_name\"], \"rb\") as f:\n sem_seg = Image.open(f)\n sem_seg = np.asarray(sem_seg, dtype=\"uint8\")\n if sem_seg is not None:\n self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)\n\n pan_seg = dic.get(\"pan_seg\", None)\n if pan_seg is None and \"pan_seg_file_name\" in dic:\n with PathManager.open(dic[\"pan_seg_file_name\"], \"rb\") as f:\n pan_seg = Image.open(f)\n pan_seg = np.asarray(pan_seg)\n from panopticapi.utils import rgb2id\n\n pan_seg = rgb2id(pan_seg)\n if pan_seg is not None:\n segments_info = dic[\"segments_info\"]\n pan_seg = torch.tensor(pan_seg)\n self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5)\n return self.output\n\n def overlay_instances(\n self,\n *,\n boxes=None,\n labels=None,\n masks=None,\n keypoints=None,\n assigned_colors=None,\n alpha=0.5,\n ):\n \"\"\"\n Args:\n boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,\n or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,\n or a :class:`RotatedBoxes`,\n or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format\n for the N objects in a single image,\n labels (list[str]): the text to be displayed for each instance.\n masks (masks-like object): Supported types are:\n\n * :class:`detectron2.structures.PolygonMasks`,\n :class:`detectron2.structures.BitMasks`.\n * list[list[ndarray]]: contains the segmentation masks for all objects in one image.\n The first level of the list corresponds to individual instances. The second\n level to all the polygon that compose the instance, and the third level\n to the polygon coordinates. The third level should have the format of\n [x0, y0, x1, y1, ..., xn, yn] (n >= 3).\n * list[ndarray]: each ndarray is a binary mask of shape (H, W).\n * list[dict]: each dict is a COCO-style RLE.\n keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),\n where the N is the number of instances and K is the number of keypoints.\n The last dimension corresponds to (x, y, visibility or score).\n assigned_colors (list[matplotlib.colors]): a list of colors, where each color\n corresponds to each mask or box in the image. Refer to 'matplotlib.colors'\n for full list of formats that the colors are accepted in.\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n num_instances = 0\n if boxes is not None:\n boxes = self._convert_boxes(boxes)\n num_instances = len(boxes)\n if masks is not None:\n masks = self._convert_masks(masks)\n if num_instances:\n assert len(masks) == num_instances\n else:\n num_instances = len(masks)\n if keypoints is not None:\n if num_instances:\n assert len(keypoints) == num_instances\n else:\n num_instances = len(keypoints)\n keypoints = self._convert_keypoints(keypoints)\n if labels is not None:\n assert len(labels) == num_instances\n if assigned_colors is None:\n assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]\n if num_instances == 0:\n return self.output\n if boxes is not None and boxes.shape[1] == 5:\n return self.overlay_rotated_instances(\n boxes=boxes, labels=labels, assigned_colors=assigned_colors\n )\n\n # Display in largest to smallest order to reduce occlusion.\n areas = None\n if boxes is not None:\n areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)\n elif masks is not None:\n areas = np.asarray([x.area() for x in masks])\n\n if areas is not None:\n sorted_idxs = np.argsort(-areas).tolist()\n # Re-order overlapped instances in descending order.\n boxes = boxes[sorted_idxs] if boxes is not None else None\n labels = [labels[k] for k in sorted_idxs] if labels is not None else None\n masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None\n assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]\n keypoints = keypoints[sorted_idxs] if keypoints is not None else None\n\n for i in range(num_instances):\n color = assigned_colors[i]\n if boxes is not None:\n self.draw_box(boxes[i], edge_color=color)\n\n if masks is not None:\n for segment in masks[i].polygons:\n self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)\n\n if labels is not None:\n # first get a box\n if boxes is not None:\n x0, y0, x1, y1 = boxes[i]\n text_pos = (x0, y0) # if drawing boxes, put text on the box corner.\n horiz_align = \"left\"\n elif masks is not None:\n # skip small mask without polygon\n if len(masks[i].polygons) == 0:\n continue\n\n x0, y0, x1, y1 = masks[i].bbox()\n\n # draw text in the center (defined by median) when box is not drawn\n # median is less sensitive to outliers.\n text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]\n horiz_align = \"center\"\n else:\n continue # drawing the box confidence for keypoints isn't very useful.\n # for small objects, draw text at the side to avoid occlusion\n instance_area = (y1 - y0) * (x1 - x0)\n if (\n instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale\n or y1 - y0 < 40 * self.output.scale\n ):\n if y1 >= self.output.height - 5:\n text_pos = (x1, y0)\n else:\n text_pos = (x0, y1)\n\n height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n font_size = (\n np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)\n * 0.5\n * self._default_font_size\n )\n self.draw_text(\n labels[i],\n text_pos,\n color=lighter_color,\n horizontal_alignment=horiz_align,\n font_size=font_size,\n )\n\n # draw keypoints\n if keypoints is not None:\n for keypoints_per_instance in keypoints:\n self.draw_and_connect_keypoints(keypoints_per_instance)\n\n return self.output\n\n def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):\n \"\"\"\n Args:\n boxes (ndarray): an Nx5 numpy array of\n (x_center, y_center, width, height, angle_degrees) format\n for the N objects in a single image.\n labels (list[str]): the text to be displayed for each instance.\n assigned_colors (list[matplotlib.colors]): a list of colors, where each color\n corresponds to each mask or box in the image. Refer to 'matplotlib.colors'\n for full list of formats that the colors are accepted in.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n num_instances = len(boxes)\n\n if assigned_colors is None:\n assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]\n if num_instances == 0:\n return self.output\n\n # Display in largest to smallest order to reduce occlusion.\n if boxes is not None:\n areas = boxes[:, 2] * boxes[:, 3]\n\n sorted_idxs = np.argsort(-areas).tolist()\n # Re-order overlapped instances in descending order.\n boxes = boxes[sorted_idxs]\n labels = [labels[k] for k in sorted_idxs] if labels is not None else None\n colors = [assigned_colors[idx] for idx in sorted_idxs]\n\n for i in range(num_instances):\n self.draw_rotated_box_with_label(\n boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None\n )\n\n return self.output\n\n def draw_and_connect_keypoints(self, keypoints):\n \"\"\"\n Draws keypoints of an instance and follows the rules for keypoint connections\n to draw lines between appropriate keypoints. This follows color heuristics for\n line color.\n\n Args:\n keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints\n and the last dimension corresponds to (x, y, probability).\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n visible = {}\n keypoint_names = self.metadata.get(\"keypoint_names\")\n for idx, keypoint in enumerate(keypoints):\n\n # draw keypoint\n x, y, prob = keypoint\n if prob > self.keypoint_threshold:\n self.draw_circle((x, y), color=_RED)\n if keypoint_names:\n keypoint_name = keypoint_names[idx]\n visible[keypoint_name] = (x, y)\n\n if self.metadata.get(\"keypoint_connection_rules\"):\n for kp0, kp1, color in self.metadata.keypoint_connection_rules:\n if kp0 in visible and kp1 in visible:\n x0, y0 = visible[kp0]\n x1, y1 = visible[kp1]\n color = tuple(x / 255.0 for x in color)\n self.draw_line([x0, x1], [y0, y1], color=color)\n\n # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip\n # Note that this strategy is specific to person keypoints.\n # For other keypoints, it should just do nothing\n try:\n ls_x, ls_y = visible[\"left_shoulder\"]\n rs_x, rs_y = visible[\"right_shoulder\"]\n mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2\n except KeyError:\n pass\n else:\n # draw line from nose to mid-shoulder\n nose_x, nose_y = visible.get(\"nose\", (None, None))\n if nose_x is not None:\n self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)\n\n try:\n # draw line from mid-shoulder to mid-hip\n lh_x, lh_y = visible[\"left_hip\"]\n rh_x, rh_y = visible[\"right_hip\"]\n except KeyError:\n pass\n else:\n mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2\n self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)\n return self.output\n\n \"\"\"\n Primitive drawing functions:\n \"\"\"\n\n def draw_text(\n self,\n text,\n position,\n *,\n font_size=None,\n color=\"g\",\n horizontal_alignment=\"center\",\n rotation=0,\n ):\n \"\"\"\n Args:\n text (str): class label\n position (tuple): a tuple of the x and y coordinates to place text on image.\n font_size (int, optional): font of the text. If not provided, a font size\n proportional to the image width is calculated and used.\n color: color of the text. Refer to `matplotlib.colors` for full list\n of formats that are accepted.\n horizontal_alignment (str): see `matplotlib.text.Text`\n rotation: rotation angle in degrees CCW\n\n Returns:\n output (VisImage): image object with text drawn.\n \"\"\"\n if not font_size:\n font_size = self._default_font_size\n\n # since the text background is dark, we don't want the text to be dark\n color = np.maximum(list(mplc.to_rgb(color)), 0.2)\n color[np.argmax(color)] = max(0.8, np.max(color))\n\n x, y = position\n self.output.ax.text(\n x,\n y,\n text,\n size=font_size * self.output.scale,\n family=\"sans-serif\",\n bbox={\"facecolor\": \"black\", \"alpha\": 0.8, \"pad\": 0.7, \"edgecolor\": \"none\"},\n verticalalignment=\"top\",\n horizontalalignment=horizontal_alignment,\n color=color,\n zorder=10,\n rotation=rotation,\n )\n return self.output\n\n def draw_box(self, box_coord, alpha=0.5, edge_color=\"g\", line_style=\"-\"):\n \"\"\"\n Args:\n box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0\n are the coordinates of the image's top left corner. x1 and y1 are the\n coordinates of the image's bottom right corner.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n edge_color: color of the outline of the box. Refer to `matplotlib.colors`\n for full list of formats that are accepted.\n line_style (string): the string to use to create the outline of the boxes.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n x0, y0, x1, y1 = box_coord\n width = x1 - x0\n height = y1 - y0\n\n linewidth = max(self._default_font_size / 4, 1)\n\n self.output.ax.add_patch(\n mpl.patches.Rectangle(\n (x0, y0),\n width,\n height,\n fill=False,\n edgecolor=edge_color,\n linewidth=linewidth * self.output.scale,\n alpha=alpha,\n linestyle=line_style,\n )\n )\n return self.output\n\n def draw_rotated_box_with_label(\n self, rotated_box, alpha=0.5, edge_color=\"g\", line_style=\"-\", label=None\n ):\n \"\"\"\n Draw a rotated box with label on its top-left corner.\n\n Args:\n rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),\n where cnt_x and cnt_y are the center coordinates of the box.\n w and h are the width and height of the box. angle represents how\n many degrees the box is rotated CCW with regard to the 0-degree box.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n edge_color: color of the outline of the box. Refer to `matplotlib.colors`\n for full list of formats that are accepted.\n line_style (string): the string to use to create the outline of the boxes.\n label (string): label for rotated box. It will not be rendered when set to None.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n cnt_x, cnt_y, w, h, angle = rotated_box\n area = w * h\n # use thinner lines when the box is small\n linewidth = self._default_font_size / (\n 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3\n )\n\n theta = angle * math.pi / 180.0\n c = math.cos(theta)\n s = math.sin(theta)\n rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]\n # x: left->right ; y: top->down\n rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]\n for k in range(4):\n j = (k + 1) % 4\n self.draw_line(\n [rotated_rect[k][0], rotated_rect[j][0]],\n [rotated_rect[k][1], rotated_rect[j][1]],\n color=edge_color,\n linestyle=\"--\" if k == 1 else line_style,\n linewidth=linewidth,\n )\n\n if label is not None:\n text_pos = rotated_rect[1] # topleft corner\n\n height_ratio = h / np.sqrt(self.output.height * self.output.width)\n label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)\n font_size = (\n np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size\n )\n self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)\n\n return self.output\n\n def draw_circle(self, circle_coord, color, radius=3):\n \"\"\"\n Args:\n circle_coord (list(int) or tuple(int)): contains the x and y coordinates\n of the center of the circle.\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n radius (int): radius of the circle.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n x, y = circle_coord\n self.output.ax.add_patch(\n mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)\n )\n return self.output\n\n def draw_line(self, x_data, y_data, color, linestyle=\"-\", linewidth=None):\n \"\"\"\n Args:\n x_data (list[int]): a list containing x values of all the points being drawn.\n Length of list should match the length of y_data.\n y_data (list[int]): a list containing y values of all the points being drawn.\n Length of list should match the length of x_data.\n color: color of the line. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n linestyle: style of the line. Refer to `matplotlib.lines.Line2D`\n for a full list of formats that are accepted.\n linewidth (float or None): width of the line. When it's None,\n a default value will be computed and used.\n\n Returns:\n output (VisImage): image object with line drawn.\n \"\"\"\n if linewidth is None:\n linewidth = self._default_font_size / 3\n linewidth = max(linewidth, 1)\n self.output.ax.add_line(\n mpl.lines.Line2D(\n x_data,\n y_data,\n linewidth=linewidth * self.output.scale,\n color=color,\n linestyle=linestyle,\n )\n )\n return self.output\n\n def draw_binary_mask(\n self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10\n ):\n \"\"\"\n Args:\n binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and\n W is the image width. Each value in the array is either a 0 or 1 value of uint8\n type.\n color: color of the mask. Refer to `matplotlib.colors` for a full list of\n formats that are accepted. If None, will pick a random color.\n edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a\n full list of formats that are accepted.\n text (str): if None, will be drawn on the object\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n area_threshold (float): a connected component smaller than this area will not be shown.\n\n Returns:\n output (VisImage): image object with mask drawn.\n \"\"\"\n if color is None:\n color = random_color(rgb=True, maximum=1)\n color = mplc.to_rgb(color)\n\n has_valid_segment = False\n binary_mask = binary_mask.astype(\"uint8\") # opencv needs uint8\n mask = GenericMask(binary_mask, self.output.height, self.output.width)\n shape2d = (binary_mask.shape[0], binary_mask.shape[1])\n\n if not mask.has_holes:\n # draw polygons for regular masks\n for segment in mask.polygons:\n area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))\n if area < (area_threshold or 0):\n continue\n has_valid_segment = True\n segment = segment.reshape(-1, 2)\n self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)\n else:\n # TODO: Use Path/PathPatch to draw vector graphics:\n # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon\n rgba = np.zeros(shape2d + (4,), dtype=\"float32\")\n rgba[:, :, :3] = color\n rgba[:, :, 3] = (mask.mask == 1).astype(\"float32\") * alpha\n has_valid_segment = True\n self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))\n\n if text is not None and has_valid_segment:\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n self._draw_text_in_mask(binary_mask, text, lighter_color)\n return self.output\n\n def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5):\n \"\"\"\n Args:\n soft_mask (ndarray): float array of shape (H, W), each value in [0, 1].\n color: color of the mask. Refer to `matplotlib.colors` for a full list of\n formats that are accepted. If None, will pick a random color.\n text (str): if None, will be drawn on the object\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n\n Returns:\n output (VisImage): image object with mask drawn.\n \"\"\"\n if color is None:\n color = random_color(rgb=True, maximum=1)\n color = mplc.to_rgb(color)\n\n shape2d = (soft_mask.shape[0], soft_mask.shape[1])\n rgba = np.zeros(shape2d + (4,), dtype=\"float32\")\n rgba[:, :, :3] = color\n rgba[:, :, 3] = soft_mask * alpha\n self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))\n\n if text is not None:\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n binary_mask = (soft_mask > 0.5).astype(\"uint8\")\n self._draw_text_in_mask(binary_mask, text, lighter_color)\n return self.output\n\n def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):\n \"\"\"\n Args:\n segment: numpy array of shape Nx2, containing all the points in the polygon.\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a\n full list of formats that are accepted. If not provided, a darker shade\n of the polygon color will be used instead.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n\n Returns:\n output (VisImage): image object with polygon drawn.\n \"\"\"\n if edge_color is None:\n # make edge color darker than the polygon color\n if alpha > 0.8:\n edge_color = self._change_color_brightness(color, brightness_factor=-0.7)\n else:\n edge_color = color\n edge_color = mplc.to_rgb(edge_color) + (1,)\n\n polygon = mpl.patches.Polygon(\n segment,\n fill=True,\n facecolor=mplc.to_rgb(color) + (alpha,),\n edgecolor=edge_color,\n linewidth=max(self._default_font_size // 15 * self.output.scale, 1),\n )\n self.output.ax.add_patch(polygon)\n return self.output\n\n \"\"\"\n Internal methods:\n \"\"\"\n\n def _jitter(self, color):\n \"\"\"\n Randomly modifies given color to produce a slightly different color than the color given.\n\n Args:\n color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color\n picked. The values in the list are in the [0.0, 1.0] range.\n\n Returns:\n jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the\n color after being jittered. The values in the list are in the [0.0, 1.0] range.\n \"\"\"\n color = mplc.to_rgb(color)\n vec = np.random.rand(3)\n # better to do it in another color space\n vec = vec / np.linalg.norm(vec) * 0.5\n res = np.clip(vec + color, 0, 1)\n return tuple(res)\n\n def _create_grayscale_image(self, mask=None):\n \"\"\"\n Create a grayscale version of the original image.\n The colors in masked area, if given, will be kept.\n \"\"\"\n img_bw = self.img.astype(\"f4\").mean(axis=2)\n img_bw = np.stack([img_bw] * 3, axis=2)\n if mask is not None:\n img_bw[mask] = self.img[mask]\n return img_bw\n\n def _change_color_brightness(self, color, brightness_factor):\n \"\"\"\n Depending on the brightness_factor, gives a lighter or darker color i.e. a color with\n less or more saturation than the original color.\n\n Args:\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of\n 0 will correspond to no change, a factor in [-1.0, 0) range will result in\n a darker color and a factor in (0, 1.0] range will result in a lighter color.\n\n Returns:\n modified_color (tuple[double]): a tuple containing the RGB values of the\n modified color. Each value in the tuple is in the [0.0, 1.0] range.\n \"\"\"\n assert brightness_factor >= -1.0 and brightness_factor <= 1.0\n color = mplc.to_rgb(color)\n polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))\n modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])\n modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness\n modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness\n modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])\n return tuple(np.clip(modified_color, 0.0, 1.0))\n\n def _convert_boxes(self, boxes):\n \"\"\"\n Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.\n \"\"\"\n if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):\n return boxes.tensor.detach().numpy()\n else:\n return np.asarray(boxes)\n\n def _convert_masks(self, masks_or_polygons):\n \"\"\"\n Convert different format of masks or polygons to a tuple of masks and polygons.\n\n Returns:\n list[GenericMask]:\n \"\"\"\n\n m = masks_or_polygons\n if isinstance(m, PolygonMasks):\n m = m.polygons\n if isinstance(m, BitMasks):\n m = m.tensor.numpy()\n if isinstance(m, torch.Tensor):\n m = m.numpy()\n ret = []\n for x in m:\n if isinstance(x, GenericMask):\n ret.append(x)\n else:\n ret.append(GenericMask(x, self.output.height, self.output.width))\n return ret\n\n def _draw_text_in_mask(self, binary_mask, text, color):\n \"\"\"\n Find proper places to draw text given a binary mask.\n \"\"\"\n # TODO sometimes drawn on wrong objects. the heuristics here can improve.\n _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)\n if stats[1:, -1].size == 0:\n return\n largest_component_id = np.argmax(stats[1:, -1]) + 1\n\n # draw text on the largest component, as well as other very large components.\n for cid in range(1, _num_cc):\n if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:\n # median is more stable than centroid\n # center = centroids[largest_component_id]\n center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]\n self.draw_text(text, center, color=color)\n\n def _convert_keypoints(self, keypoints):\n if isinstance(keypoints, Keypoints):\n keypoints = keypoints.tensor\n keypoints = np.asarray(keypoints)\n return keypoints\n\n def get_output(self):\n \"\"\"\n Returns:\n output (VisImage): the image output containing the visualizations added\n to the image.\n \"\"\"\n return self.output" }, { "identifier": "_create_text_labels", "path": "nativedancer/third_part/detectron2/utils/visualizer.py", "snippet": "def _create_text_labels(classes, scores, class_names, is_crowd=None):\n \"\"\"\n Args:\n classes (list[int] or None):\n scores (list[float] or None):\n class_names (list[str] or None):\n is_crowd (list[bool] or None):\n\n Returns:\n list[str] or None\n \"\"\"\n labels = None\n if classes is not None:\n if class_names is not None and len(class_names) > 0:\n labels = [class_names[i] for i in classes]\n else:\n labels = [str(i) for i in classes]\n if scores is not None:\n if labels is None:\n labels = [\"{:.0f}%\".format(s * 100) for s in scores]\n else:\n labels = [\"{} {:.0f}%\".format(l, s * 100) for l, s in zip(labels, scores)]\n if labels is not None and is_crowd is not None:\n labels = [l + (\"|crowd\" if crowd else \"\") for l, crowd in zip(labels, is_crowd)]\n return labels" }, { "identifier": "_PanopticPrediction", "path": "nativedancer/third_part/detectron2/utils/visualizer.py", "snippet": "class _PanopticPrediction:\n \"\"\"\n Unify different panoptic annotation/prediction formats\n \"\"\"\n\n def __init__(self, panoptic_seg, segments_info, metadata=None):\n if segments_info is None:\n assert metadata is not None\n # If \"segments_info\" is None, we assume \"panoptic_img\" is a\n # H*W int32 image storing the panoptic_id in the format of\n # category_id * label_divisor + instance_id. We reserve -1 for\n # VOID label.\n label_divisor = metadata.label_divisor\n segments_info = []\n for panoptic_label in np.unique(panoptic_seg.numpy()):\n if panoptic_label == -1:\n # VOID region.\n continue\n pred_class = panoptic_label // label_divisor\n isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values()\n segments_info.append(\n {\n \"id\": int(panoptic_label),\n \"category_id\": int(pred_class),\n \"isthing\": bool(isthing),\n }\n )\n del metadata\n\n self._seg = panoptic_seg\n\n self._sinfo = {s[\"id\"]: s for s in segments_info} # seg id -> seg info\n segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)\n areas = areas.numpy()\n sorted_idxs = np.argsort(-areas)\n self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]\n self._seg_ids = self._seg_ids.tolist()\n for sid, area in zip(self._seg_ids, self._seg_areas):\n if sid in self._sinfo:\n self._sinfo[sid][\"area\"] = float(area)\n\n def non_empty_mask(self):\n \"\"\"\n Returns:\n (H, W) array, a mask for all pixels that have a prediction\n \"\"\"\n empty_ids = []\n for id in self._seg_ids:\n if id not in self._sinfo:\n empty_ids.append(id)\n if len(empty_ids) == 0:\n return np.zeros(self._seg.shape, dtype=np.uint8)\n assert (\n len(empty_ids) == 1\n ), \">1 ids corresponds to no labels. This is currently not supported\"\n return (self._seg != empty_ids[0]).numpy().astype(bool)\n\n def semantic_masks(self):\n for sid in self._seg_ids:\n sinfo = self._sinfo.get(sid)\n if sinfo is None or sinfo[\"isthing\"]:\n # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.\n continue\n yield (self._seg == sid).numpy().astype(bool), sinfo\n\n def instance_masks(self):\n for sid in self._seg_ids:\n sinfo = self._sinfo.get(sid)\n if sinfo is None or not sinfo[\"isthing\"]:\n continue\n mask = (self._seg == sid).numpy().astype(bool)\n if mask.sum() > 0:\n yield mask, sinfo" }, { "identifier": "random_color", "path": "nativedancer/third_part/detectron2/utils/colormap.py", "snippet": "def random_color(rgb=False, maximum=255):\n \"\"\"\n Args:\n rgb (bool): whether to return RGB colors or BGR colors.\n maximum (int): either 255 or 1\n\n Returns:\n ndarray: a vector of 3 numbers\n \"\"\"\n idx = np.random.randint(0, len(_COLORS))\n ret = _COLORS[idx] * maximum\n if not rgb:\n ret = ret[::-1]\n return ret" }, { "identifier": "random_colors", "path": "nativedancer/third_part/detectron2/utils/colormap.py", "snippet": "def random_colors(N, rgb=False, maximum=255):\n \"\"\"\n Args:\n N (int): number of unique colors needed\n rgb (bool): whether to return RGB colors or BGR colors.\n maximum (int): either 255 or 1\n\n Returns:\n ndarray: a list of random_color\n \"\"\"\n indices = random.sample(range(len(_COLORS)), N)\n ret = [_COLORS[i] * maximum for i in indices]\n if not rgb:\n ret = [x[::-1] for x in ret]\n return ret" } ]
import numpy as np import pycocotools.mask as mask_util from typing import List from ..structures import Instances from ..utils.visualizer import ( ColorMode, Visualizer, _create_text_labels, _PanopticPrediction, ) from .colormap import random_color, random_colors
13,318
# Copyright (c) Facebook, Inc. and its affiliates. class _DetectedInstance: """ Used to store data about detected objects in video frame, in order to transfer color to objects in the future frames. Attributes: label (int): bbox (tuple[float]): mask_rle (dict): color (tuple[float]): RGB colors in range (0, 1) ttl (int): time-to-live for the instance. For example, if ttl=2, the instance color can be transferred to objects in the next two frames. """ __slots__ = ["label", "bbox", "mask_rle", "color", "ttl"] def __init__(self, label, bbox, mask_rle, color, ttl): self.label = label self.bbox = bbox self.mask_rle = mask_rle self.color = color self.ttl = ttl class VideoVisualizer:
# Copyright (c) Facebook, Inc. and its affiliates. class _DetectedInstance: """ Used to store data about detected objects in video frame, in order to transfer color to objects in the future frames. Attributes: label (int): bbox (tuple[float]): mask_rle (dict): color (tuple[float]): RGB colors in range (0, 1) ttl (int): time-to-live for the instance. For example, if ttl=2, the instance color can be transferred to objects in the next two frames. """ __slots__ = ["label", "bbox", "mask_rle", "color", "ttl"] def __init__(self, label, bbox, mask_rle, color, ttl): self.label = label self.bbox = bbox self.mask_rle = mask_rle self.color = color self.ttl = ttl class VideoVisualizer:
def __init__(self, metadata, instance_mode=ColorMode.IMAGE):
1
2023-12-10 20:14:00+00:00
16k
ethanweber/nerfiller
nerfiller/nerf/nerfiller_pipeline.py
[ { "identifier": "RGBInpainter", "path": "nerfiller/inpaint/rgb_inpainter.py", "snippet": "class RGBInpainter:\n \"\"\"\n Module for inpainting with the stable diffusion inpainting pipeline.\n \"\"\"\n\n def __init__(\n self,\n half_precision_weights: bool = True,\n lora_model_path: Optional[str] = None,\n device: str = \"cuda:0\",\n vae_device: str = \"cuda:0\",\n pipeline_name: str = \"stabilityai/stable-diffusion-2-inpainting\",\n ):\n print(f\"Loading RGB Inpainter ...\")\n\n self.half_precision_weights = half_precision_weights\n self.lora_model_path = lora_model_path\n self.device = device\n self.vae_device = vae_device\n self.dtype = torch.float16 if self.half_precision_weights else torch.float32\n self.pipeline_name = pipeline_name\n self.set_pipe()\n self.setup()\n\n def set_pipe(self):\n pipe_kwargs = {\n \"safety_checker\": None,\n \"feature_extractor\": None,\n \"requires_safety_checker\": False,\n \"torch_dtype\": self.dtype,\n }\n self.pipe = StableDiffusionInpaintPipeline.from_pretrained(\n self.pipeline_name,\n **pipe_kwargs,\n )\n\n def setup(self):\n # Load LoRA\n if self.lora_model_path:\n self.pipe.load_lora_weights(self.lora_model_path)\n print(f\"Loaded LoRA model from {self.lora_model_path}\")\n\n self.tokenizer = self.pipe.tokenizer\n self.text_encoder = self.pipe.text_encoder.to(self.device).eval()\n\n self.unet = self.pipe.unet.to(self.device).eval()\n self.vae = self.pipe.vae.to(self.vae_device).eval()\n\n self.vae_scale_factor = 2 ** (len(self.pipe.vae.config.block_out_channels) - 1)\n self.vae_latent_channels = self.pipe.vae.config.latent_channels\n\n # self.scheduler = DDPMScheduler.from_config(self.pipe.scheduler.config)\n self.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)\n self.num_train_timesteps = self.scheduler.num_train_timesteps\n self.alphas = self.scheduler.alphas_cumprod.to(self.device)\n\n del self.pipe\n cleanup()\n\n print(f\"Loaded RGB inpainter!\")\n\n def compute_text_embeddings(self, prompt: str, negative_prompt: str):\n \"\"\"Get the text embeddings for a string.\"\"\"\n assert self.tokenizer is not None\n assert self.text_encoder is not None\n with torch.no_grad():\n text_inputs = tokenize_prompt(self.tokenizer, prompt, tokenizer_max_length=None)\n prompt_embeds = encode_prompt(\n self.text_encoder,\n text_inputs.input_ids,\n text_inputs.attention_mask,\n text_encoder_use_attention_mask=False,\n )\n negative_text_inputs = tokenize_prompt(self.tokenizer, negative_prompt, tokenizer_max_length=None)\n negative_prompt_embeds = encode_prompt(\n self.text_encoder,\n negative_text_inputs.input_ids,\n negative_text_inputs.attention_mask,\n text_encoder_use_attention_mask=False,\n )\n\n return [prompt_embeds, negative_prompt_embeds]\n\n def destroy_text_encoder(self) -> None:\n \"\"\"Delete the text modules to save on memory.\"\"\"\n del self.tokenizer\n del self.text_encoder\n cleanup()\n\n def forward_unet(\n self,\n sample,\n t,\n text_embeddings,\n denoise_in_grid: bool = False,\n ):\n # process embeddings\n prompt_embeds, negative_prompt_embeds = text_embeddings\n\n batch_size = sample.shape[0] // 3\n\n prompt_embeds = torch.cat(\n [\n prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n ]\n )\n\n if denoise_in_grid:\n grid_sample = make_grid(sample)\n grid_prompt_embeds = prompt_embeds[:3].repeat(grid_sample.shape[0] // 3, 1, 1)\n noise_pred = self.unet(\n sample=grid_sample,\n timestep=t,\n encoder_hidden_states=grid_prompt_embeds,\n return_dict=False,\n )[0]\n noise_pred = undo_grid(noise_pred)\n else:\n noise_pred = self.unet(\n sample=sample,\n timestep=t,\n encoder_hidden_states=prompt_embeds,\n return_dict=False,\n )[0]\n return noise_pred\n\n def get_noise_pred(\n self,\n t,\n model_input: ModelInput,\n text_embeddings,\n text_guidance_scale: float = 0.0,\n image_guidance_scale: float = 0.0,\n denoise_in_grid: bool = False,\n multidiffusion_steps: int = 1,\n multidiffusion_type: str = \"epsilon\",\n randomize_latents: bool = False,\n randomize_within_grid: bool = False,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n only_noise_pred: bool = False,\n ):\n assert self.scheduler.config.prediction_type == \"epsilon\", \"We assume the model predicts epsilon.\"\n\n batch_size = model_input.latents.shape[0]\n value = torch.zeros_like(model_input.latents)\n count = torch.zeros_like(model_input.latents)\n\n for i in range(multidiffusion_steps):\n if randomize_latents:\n indices = torch.randperm(batch_size)\n else:\n indices = torch.arange(batch_size)\n\n if denoise_in_grid and randomize_within_grid:\n for j in range(0, len(indices), 4):\n indices[j : j + 4] = indices[j : j + 4][torch.randperm(4)]\n\n latents = model_input.latents[indices]\n latents_mask = model_input.latents_mask[indices]\n latents_mask_uncond = model_input.latents_mask_uncond[indices]\n masked_image_latents = model_input.masked_image_latents[indices]\n masked_image_latents_uncond = model_input.masked_image_latents_uncond[indices]\n\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents, latents, latents])\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n latents_mask_input = torch.cat([latents_mask, latents_mask, latents_mask_uncond])\n masked_image_latents_input = torch.cat(\n [\n masked_image_latents,\n masked_image_latents,\n masked_image_latents_uncond,\n ]\n )\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input_cat = torch.cat(\n [latent_model_input, latents_mask_input, masked_image_latents_input],\n dim=1,\n )\n\n # TODO: save compute by skipping some text encodings if not using them in CFG\n\n noise_pred_all = self.forward_unet(\n sample=latent_model_input_cat,\n t=t,\n text_embeddings=text_embeddings,\n denoise_in_grid=denoise_in_grid,\n )\n\n noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred_all.chunk(3)\n\n noise_pred = (\n noise_pred_image\n + text_guidance_scale * (noise_pred_text - noise_pred_image)\n + image_guidance_scale * (noise_pred_image - noise_pred_uncond)\n )\n\n if multidiffusion_type == \"v_prediction\":\n v_prediction = get_v_prediction_from_epsilon(noise_pred, t, latents, self.scheduler.alphas_cumprod)\n value[indices] += v_prediction\n count[indices] += 1\n elif multidiffusion_type == \"epsilon\":\n value[indices] += noise_pred\n count[indices] += 1\n else:\n raise ValueError(\"Not implemented.\")\n\n # take the MultiDiffusion step\n final_noise_pred = torch.where(count > 0, value / count, value)\n\n if multidiffusion_type == \"v_prediction\":\n final_noise_pred = get_epsilon_from_v_prediction(\n final_noise_pred,\n t.item(),\n model_input.latents,\n self.scheduler.alphas_cumprod,\n )\n elif multidiffusion_type == \"epsilon\":\n pass\n else:\n raise ValueError(\"Not implemented.\")\n\n if only_noise_pred:\n return None, None, final_noise_pred\n\n scheduler_output = self.scheduler.step(final_noise_pred, t, model_input.latents, generator=generator)\n pred_prev_sample = scheduler_output.prev_sample\n pred_original_sample = scheduler_output.pred_original_sample\n\n assert not pred_prev_sample.isnan().any()\n assert not pred_original_sample.isnan().any()\n return pred_prev_sample, pred_original_sample, final_noise_pred\n\n def get_model_input(\n self,\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n starting_timestep: Optional[int] = None,\n keep_grad: bool = False,\n ) -> ModelInput:\n \"\"\"Returns the inputs for the unet.\"\"\"\n\n # TODO: incorporate seeds\n\n batch_size, _, height, width = image.shape\n\n noise = randn_tensor(\n shape=(\n batch_size,\n self.vae_latent_channels,\n height // self.vae_scale_factor,\n width // self.vae_scale_factor,\n ),\n generator=generator,\n device=torch.device(self.device),\n dtype=self.dtype,\n )\n if starting_image is not None:\n assert starting_timestep is not None\n if keep_grad:\n latents = self.encode_images(starting_image)\n else:\n with torch.no_grad():\n latents = self.encode_images(starting_image)\n latents = self.scheduler.add_noise(latents, noise, starting_timestep)\n else:\n latents = noise\n\n latents_mask = torch.nn.functional.interpolate(\n mask,\n size=(height // self.vae_scale_factor, width // self.vae_scale_factor),\n mode=\"nearest\",\n )\n assert len(torch.unique(latents_mask)) <= 2\n latents_mask = latents_mask.to(device=self.device, dtype=self.dtype)\n assert len(torch.unique(mask)) <= 2\n masked_image = torch.where(mask == 0, image, 0.5)\n with torch.no_grad():\n masked_image_latents = self.encode_images(masked_image)\n\n latents_mask_uncond = torch.ones_like(latents_mask)\n masked_image_uncond = torch.ones_like(masked_image) * 0.5\n with torch.no_grad():\n masked_image_latents_uncond = self.encode_images(masked_image_uncond)\n\n model_input = ModelInput(\n latents.to(device=self.device, dtype=self.dtype),\n latents_mask.to(device=self.device, dtype=self.dtype),\n masked_image_latents.to(device=self.device, dtype=self.dtype),\n latents_mask_uncond.to(device=self.device, dtype=self.dtype),\n masked_image_latents_uncond.to(device=self.device, dtype=self.dtype),\n noise.to(device=self.device, dtype=self.dtype),\n )\n\n return model_input\n\n def get_loss(\n self,\n x0: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n depth: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n multiview_guidance_scale: float = 0.0,\n reconstruction_guidance_scale: float = 0.0,\n feature_extractor: Optional[FeatureExtractor] = None,\n multiview_metric: Optional[MultiviewMetric] = None,\n K: Optional[Float[Tensor, \"B 3 3\"]] = None,\n c2w: Optional[Float[Tensor, \"B 3 4\"]] = None,\n output_folder: Optional[Path] = None,\n step: int = 0,\n guidance_step: int = 0,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n ):\n \"\"\"Losses on the VAE decoded images x0.\n The multi-view loss is applied where mask == 0.0 (regions that have known depth).\n \"\"\"\n\n loss = 0.0\n\n if multiview_guidance_scale != 0.0:\n features = feature_extractor(x0.to(feature_extractor.device)).to(self.device)\n\n # multiview guidance\n scale_factor = features.shape[-1] / x0.shape[-1]\n K_scaled = rescale_intrinsics(K, scale_factor, scale_factor)\n mask_scaled = 1.0 - torch.nn.functional.interpolate(mask, scale_factor=scale_factor, mode=\"nearest\")\n depth_scaled = torch.nn.functional.interpolate(depth, scale_factor=scale_factor, mode=\"bilinear\")\n for cam1 in range(len(c2w)):\n for cam2 in range(cam1 + 1, len(c2w)):\n loss_mv, loss_dict = multiview_metric(\n features1=features[cam1 : cam1 + 1],\n features2=features[cam2 : cam2 + 1],\n K1=K_scaled[cam1 : cam1 + 1],\n K2=K_scaled[cam2 : cam2 + 1],\n c2w1=c2w[cam1 : cam1 + 1],\n c2w2=c2w[cam2 : cam2 + 1],\n image1=x0[cam1 : cam1 + 1],\n image2=x0[cam2 : cam2 + 1],\n mask1=mask_scaled[cam1 : cam1 + 1],\n mask2=mask_scaled[cam2 : cam2 + 1],\n depth1=depth_scaled[cam1 : cam1 + 1],\n depth2=depth_scaled[cam2 : cam2 + 1],\n output_folder=output_folder if (cam1 == 0 and guidance_step == 0) else None,\n suffix=f\"-{step:06d}-{cam1:06d}-{cam2:06d}-{guidance_step:06d}\",\n )\n loss += multiview_guidance_scale * loss_mv.sum()\n\n if reconstruction_guidance_scale != 0.0:\n loss += (\n reconstruction_guidance_scale * (((starting_image.to(x0.device) - x0) * mask.to(x0.device)) ** 2).mean()\n )\n\n return loss\n\n @torch.cuda.amp.autocast(enabled=True)\n def get_image(\n self,\n text_embeddings,\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n num_inference_steps: int = 20,\n denoise_in_grid: bool = False,\n depth: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n text_guidance_scale: Optional[float] = None,\n image_guidance_scale: Optional[float] = None,\n multidiffusion_steps: int = 1,\n multidiffusion_type: str = \"epsilon\",\n randomize_latents: bool = False,\n randomize_within_grid: bool = False,\n use_decoder_approximation: bool = False,\n multiview_guidance_scale: float = 0.0,\n reconstruction_guidance_scale: float = 0.0,\n feature_extractor: Optional[FeatureExtractor] = None,\n multiview_metric: Optional[MultiviewMetric] = None,\n K: Optional[Float[Tensor, \"B 3 3\"]] = None,\n c2w: Optional[Float[Tensor, \"B 3 4\"]] = None,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n show_multiview: bool = False,\n guidance_steps: List[int] = [5],\n num_guidance_steps: int = 10,\n classifier_guidance_scale: float = 0.0,\n output_folder: Optional[Path] = None,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n starting_lower_bound: Optional[float] = None,\n starting_upper_bound: Optional[float] = None,\n classifier_guidance_loss_rescale=1000.0,\n classifier_guidance_start_step: int = 0,\n replace_original_pixels: bool = False,\n ) -> Float[Tensor, \"B 3 H W\"]:\n \"\"\"Run the denoising sampling process, also known as the reverse process.\n Inpaint where mask == 1.\n If output folder is not None, then save images to this folder.\n\n Args:\n text_embeddings: Either 2 per image (BB) or 2 total, which will use the same cond. and uncond. prompts for all.\n loss_rescale: To prevent fp16 underflow\n \"\"\"\n\n if output_folder:\n output_folder.mkdir(parents=True, exist_ok=True)\n\n batch_size, _, height, width = image.shape\n\n if starting_lower_bound is not None:\n min_step = int(self.num_train_timesteps * starting_lower_bound)\n max_step = int(self.num_train_timesteps * starting_upper_bound)\n # select t, set multi-step diffusion\n T = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device)\n self.scheduler.config.num_train_timesteps = T.item()\n else:\n self.scheduler.config.num_train_timesteps = self.num_train_timesteps\n\n self.scheduler.set_timesteps(num_inference_steps, device=self.device)\n\n model_input = self.get_model_input(\n image=image,\n mask=mask,\n generator=generator,\n # self.scheduler.config.num_train_timesteps == 1000 is equivalent to starting_lower_bound and starting_upper_bound both being 1\n # so start with full noise by setting this to None\n starting_image=starting_image if self.scheduler.config.num_train_timesteps != 1000 else None,\n starting_timestep=self.scheduler.timesteps[0],\n )\n\n if depth is None:\n depth = torch.zeros_like(mask)\n\n progress = Progress(\n TextColumn(\"[progress.description]{task.description}\"),\n BarColumn(),\n TaskProgressColumn(),\n TimeElapsedColumn(),\n )\n task1 = progress.add_task(\n f\"[green]Inpainting batch of images...\",\n total=len(self.scheduler.timesteps),\n )\n\n with progress:\n for i, t in enumerate(self.scheduler.timesteps):\n start_time = time.time()\n\n # DragDiffusion style guidance (\"drag\")\n use_drag_guidance = (\n multiview_guidance_scale != 0.0 or reconstruction_guidance_scale != 0.0\n ) and i in guidance_steps\n if use_drag_guidance:\n model_input.latents = model_input.latents.to(torch.float32).detach().requires_grad_(True)\n scaler = torch.cuda.amp.GradScaler()\n optimizer = torch.optim.Adam([model_input.latents], lr=1e-2)\n for guidance_step in range(num_guidance_steps):\n _, pred_original_sample, _ = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n denoise_in_grid=denoise_in_grid,\n multidiffusion_steps=1,\n multidiffusion_type=multidiffusion_type,\n randomize_latents=randomize_latents,\n randomize_within_grid=randomize_within_grid,\n )\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n if output_folder:\n image_x0 = torch.cat(list(x0.permute(0, 2, 3, 1)), dim=1).detach().cpu()\n mediapy.write_image(\n output_folder / f\"x0-{i:06d}-{guidance_step:06d}.png\",\n image_x0,\n )\n\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=output_folder / \"drag_guidance\",\n step=i,\n guidance_step=guidance_step,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/drag_guidance_loss-{i}\": loss})\n\n optimizer.zero_grad()\n assert not loss.isnan().any()\n scaler.scale(loss).backward()\n\n assert not model_input.latents.grad.isnan().any()\n # print(\n # model_input.latents.grad.abs().mean(),\n # (model_input.latents.grad == 0.0).sum() / model_input.latents.grad.numel(),\n # )\n\n scaler.step(optimizer)\n assert not model_input.latents.isnan().any()\n assert not depth.isnan().any()\n scaler.update()\n\n # take a step\n use_classifier_guidance = classifier_guidance_scale != 0.0 and i >= classifier_guidance_start_step\n model_input.latents = (\n model_input.latents.to(self.dtype).detach().requires_grad_(use_classifier_guidance)\n )\n with torch.enable_grad() if use_classifier_guidance else torch.no_grad():\n _, pred_original_sample, noise_pred = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n denoise_in_grid=denoise_in_grid,\n multidiffusion_steps=multidiffusion_steps,\n multidiffusion_type=multidiffusion_type,\n randomize_latents=randomize_latents,\n randomize_within_grid=randomize_within_grid,\n )\n\n # classifier guidance (\"classifier\")\n if use_classifier_guidance:\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=output_folder / \"classifier_guidance\",\n step=i,\n guidance_step=0,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/classifier_guidance_loss\": loss})\n\n grad = (\n torch.autograd.grad(\n classifier_guidance_loss_rescale * loss,\n model_input.latents,\n )[0]\n / classifier_guidance_loss_rescale\n )\n # print(\n # grad.abs().mean(),\n # (grad == 0.0).sum() / grad.numel(),\n # )\n noise_pred = noise_pred + classifier_guidance_scale * grad\n\n model_input.latents = model_input.latents.detach().requires_grad_(False)\n scheduler_output = self.scheduler.step(noise_pred, t, model_input.latents, generator=generator)\n model_input.latents = scheduler_output.prev_sample\n\n if output_folder:\n # save the denoised x0\n with torch.no_grad():\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n if use_drag_guidance or use_classifier_guidance:\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=None,\n step=i,\n guidance_step=0,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/loss\": loss})\n\n image_x0 = torch.cat(list(x0.permute(0, 2, 3, 1)), dim=1).detach().cpu()\n mediapy.write_image(output_folder / \"x0.png\", image_x0)\n mediapy.write_image(output_folder / f\"x0-{i:06d}.png\", image_x0)\n\n progress.update(task1, advance=1)\n end_time = time.time()\n # print(f\"[green]Time for iter {i}:\", end_time - start_time)\n\n if output_folder:\n output_filename = str(output_folder) + \".mp4\"\n CONSOLE.print(f\"[green]Saving video to {output_filename}\")\n save_video_from_path(\n path=output_folder,\n glob_str=\"x0*png\",\n sec=10,\n output_filename=output_filename,\n )\n\n with torch.no_grad():\n x0 = self.decode_latents(\n model_input.latents.detach(),\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n return x0\n\n def encode_images(self, imgs: Float[Tensor, \"B 3 512 512\"]) -> Float[Tensor, \"B 4 64 64\"]:\n imgs = imgs * 2.0 - 1.0\n sampled_posterior = self.vae.encode(imgs.to(self.vae_device), return_dict=False)[0].sample().to(self.device)\n latents = sampled_posterior * 0.18215\n return latents\n\n def decode_latents(\n self,\n latents: Float[Tensor, \"B 4 H W\"],\n use_decoder_approximation: bool = False,\n ) -> Float[Tensor, \"B 3 Hout Wout\"]:\n if use_decoder_approximation:\n da = get_decoder_approximation().to(latents)\n x = torch.nn.functional.interpolate(latents, scale_factor=self.vae_scale_factor, mode=\"bilinear\")\n x = torch.matmul(x.permute(0, 2, 3, 1), da).permute(0, 3, 1, 2)\n return x\n else:\n scaled_latents = 1 / 0.18215 * latents\n image = self.vae.decode(scaled_latents.to(self.vae_device), return_dict=False)[0].to(self.device)\n image = (image * 0.5 + 0.5).clamp(0, 1)\n return image\n\n def sds_loss(\n self,\n text_embeddings: Union[Float[Tensor, \"BB 77 768\"], Float[Tensor, \"2 77 768\"]],\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n starting_image: Float[Tensor, \"B 3 H W\"],\n text_guidance_scale: Optional[float] = None,\n image_guidance_scale: Optional[float] = None,\n starting_lower_bound: float = 0.02,\n starting_upper_bound: float = 0.98,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n ) -> torch.Tensor:\n \"\"\"Score Distilation Sampling loss proposed in DreamFusion paper (https://dreamfusion3d.github.io/)\n Args:\n text_embeddings: Text embeddings\n image: Rendered image\n mask: Mask, inpaint where 1\n text_guidance_scale: How much to weigh the guidance\n image_guidance_scale: How much to weigh the guidance\n Returns:\n The loss\n \"\"\"\n\n # NOTE: doesn't work for gridding right now\n\n batch_size, _, height, width = image.shape\n\n min_step = int(self.num_train_timesteps * starting_lower_bound)\n max_step = int(self.num_train_timesteps * starting_upper_bound)\n\n t = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device)\n\n model_input = self.get_model_input(\n image=image,\n mask=mask,\n generator=generator,\n starting_image=starting_image,\n starting_timestep=t,\n keep_grad=True,\n )\n\n # predict the noise residual with unet, NO grad!\n with torch.no_grad():\n _, _, noise_pred = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n only_noise_pred=True,\n )\n\n # w(t), sigma_t^2\n w = 1 - self.alphas[t]\n\n grad = w * (noise_pred - model_input.noise)\n grad = torch.nan_to_num(grad)\n\n target = (model_input.latents - grad).detach()\n loss = (\n 0.5\n * torch.nn.functional.mse_loss(model_input.latents, target, reduction=\"sum\")\n / model_input.latents.shape[0]\n )\n\n return loss" }, { "identifier": "RGBInpainterXL", "path": "nerfiller/inpaint/rgb_inpainter.py", "snippet": "class RGBInpainterXL(RGBInpainter):\n def set_pipe(self):\n pipe_kwargs = {\n \"safety_checker\": None,\n \"feature_extractor\": None,\n \"requires_safety_checker\": False,\n \"torch_dtype\": self.dtype,\n }\n self.pipe = AutoPipelineForInpainting.from_pretrained(\n \"diffusers/stable-diffusion-xl-1.0-inpainting-0.1\",\n **pipe_kwargs,\n )\n\n def setup(self):\n # Load LoRA\n if self.lora_model_path:\n self.pipe.load_lora_weights(self.lora_model_path)\n print(f\"Loaded LoRA model from {self.lora_model_path}\")\n\n # self.tokenizer = self.pipe.tokenizer\n # self.text_encoder = self.pipe.text_encoder.to(self.device).eval()\n self.pipe.to(self.device)\n\n self.unet = self.pipe.unet.to(self.device).eval()\n self.vae = self.pipe.vae.to(self.vae_device).eval()\n\n self.vae_scale_factor = 2 ** (len(self.pipe.vae.config.block_out_channels) - 1)\n self.vae_latent_channels = self.pipe.vae.config.latent_channels\n\n # self.scheduler = DDPMScheduler.from_config(self.pipe.scheduler.config)\n self.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)\n self.num_train_timesteps = self.scheduler.num_train_timesteps\n self.alphas = self.scheduler.alphas_cumprod.to(self.device)\n\n # save this in order to delete the pipeline after text encoding\n self.text_encoder_2_config_projection_dim = self.pipe.text_encoder_2.config.projection_dim\n\n print(f\"Loaded RGB inpainter!\")\n\n def compute_text_embeddings(self, prompt: str, negative_prompt: str):\n \"\"\"Get the text embeddings for a string.\"\"\"\n assert self.pipe is not None\n\n (\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ) = self.pipe.encode_prompt(prompt, negative_prompt, device=self.device)\n return [\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ]\n\n def remove_pipe(self):\n del self.pipe\n cleanup()\n\n # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids\n def _get_add_time_ids(\n self,\n original_size,\n crops_coords_top_left,\n target_size,\n aesthetic_score,\n negative_aesthetic_score,\n negative_original_size,\n negative_crops_coords_top_left,\n negative_target_size,\n dtype,\n requires_aesthetics_score=False,\n ):\n if requires_aesthetics_score:\n add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))\n add_neg_time_ids = list(\n negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)\n )\n else:\n add_time_ids = list(original_size + crops_coords_top_left + target_size)\n add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)\n\n passed_add_embed_dim = (\n self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2_config_projection_dim\n )\n expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features\n\n if (\n expected_add_embed_dim > passed_add_embed_dim\n and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim\n ):\n raise ValueError(\n f\"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model.\"\n )\n elif (\n expected_add_embed_dim < passed_add_embed_dim\n and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim\n ):\n raise ValueError(\n f\"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model.\"\n )\n elif expected_add_embed_dim != passed_add_embed_dim:\n raise ValueError(\n f\"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.\"\n )\n\n add_time_ids = torch.tensor([add_time_ids], dtype=dtype)\n add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)\n\n return add_time_ids, add_neg_time_ids\n\n def forward_unet(\n self,\n sample,\n t,\n text_embeddings,\n denoise_in_grid: bool = False,\n ):\n # process embeddings\n (\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ) = text_embeddings\n\n batch_size = sample.shape[0] // 3\n\n # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n height, width = sample.shape[-2:]\n height = height * self.vae_scale_factor\n width = width * self.vae_scale_factor\n\n original_size = (height, width)\n target_size = (height, width)\n\n crops_coords_top_left = (0, 0)\n aesthetic_score = 6.0\n negative_aesthetic_score = 2.5\n negative_crops_coords_top_left = (0, 0)\n\n negative_original_size = original_size\n negative_target_size = target_size\n\n add_time_ids, add_neg_time_ids = self._get_add_time_ids(\n original_size,\n crops_coords_top_left,\n target_size,\n aesthetic_score,\n negative_aesthetic_score,\n negative_original_size,\n negative_crops_coords_top_left,\n negative_target_size,\n dtype=self.dtype,\n )\n\n prompt_embeds = torch.cat(\n [\n prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n ],\n dim=0,\n )\n add_text_embeds = torch.cat(\n [\n pooled_prompt_embeds.repeat(batch_size, 1),\n negative_pooled_prompt_embeds.repeat(batch_size, 1),\n negative_pooled_prompt_embeds.repeat(batch_size, 1),\n ],\n dim=0,\n )\n add_time_ids = torch.cat(\n [\n add_time_ids.repeat(batch_size, 1),\n add_neg_time_ids.repeat(batch_size, 1),\n add_neg_time_ids.repeat(batch_size, 1),\n ],\n dim=0,\n )\n\n prompt_embeds = prompt_embeds.to(self.device)\n add_text_embeds = add_text_embeds.to(self.device)\n add_time_ids = add_time_ids.to(self.device)\n\n if denoise_in_grid:\n grid_sample = make_grid(sample)\n grid_prompt_embeds = prompt_embeds[:3].repeat(grid_sample.shape[0] // 3, 1, 1)\n grid_add_text_embeds = add_text_embeds[:3].repeat(grid_sample.shape[0] // 3, 1)\n grid_add_time_ids = add_time_ids[:3].repeat(grid_sample.shape[0] // 3, 1)\n added_cond_kwargs = {\n \"text_embeds\": grid_add_text_embeds,\n \"time_ids\": grid_add_time_ids,\n }\n noise_pred = self.unet(\n sample=grid_sample,\n timestep=t,\n encoder_hidden_states=grid_prompt_embeds,\n added_cond_kwargs=added_cond_kwargs,\n return_dict=False,\n )[0]\n noise_pred = undo_grid(noise_pred)\n else:\n added_cond_kwargs = {\n \"text_embeds\": add_text_embeds,\n \"time_ids\": add_time_ids,\n }\n noise_pred = self.unet(\n sample=sample,\n timestep=t,\n encoder_hidden_states=prompt_embeds,\n added_cond_kwargs=added_cond_kwargs,\n return_dict=False,\n )[0]\n return noise_pred" }, { "identifier": "DepthInpainter", "path": "nerfiller/inpaint/depth_inpainter.py", "snippet": "class DepthInpainter:\n def __init__(\n self,\n max_depth: float = 10.0,\n tileX: bool = True,\n tileY: bool = False,\n depth_method: str = \"zoedepth\",\n device: str = \"cuda:0\",\n ):\n self.max_depth = max_depth\n self.tileX = tileX\n self.tileY = tileY\n self.depth_method = depth_method\n self.device = device\n self.configure()\n\n def configure(self) -> None:\n logging.info(f\"Loading depth guidance ...\")\n\n # setup the depth network\n\n # zoedepth\n if self.depth_method == \"zoedepth\":\n repo = \"isl-org/ZoeDepth\"\n self.zoe = torch.compile(torch.hub.load(repo, \"ZoeD_NK\", pretrained=True).to(self.device))\n\n # TODO: midas\n\n if self.depth_method == \"midas\":\n model_type = \"DPT_Large\"\n self.midas = torch.hub.load(\"intel-isl/MiDaS\", model_type).to(self.device)\n self.midas.eval()\n midas_transforms = torch.hub.load(\"intel-isl/MiDaS\", \"transforms\")\n if model_type == \"DPT_Large\" or model_type == \"DPT_Hybrid\":\n self.transform = midas_transforms.dpt_transform\n else:\n self.transform = midas_transforms.small_transform\n\n def get_depth(\n self,\n image: Float[Tensor, \"B C H W\"],\n rendered_depth: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n overlapping_region_mask: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n max_depth: Optional[float] = None,\n use_inverse=False,\n fov_x: Optional[float] = None,\n fov_y: Optional[float] = None,\n ):\n assert fov_x == fov_y\n batch_size, _, height, width = image.shape\n if self.depth_method != \"zoedepth\":\n assert batch_size == 1\n\n if self.depth_method == \"zoedepth\":\n predicted_depth = self.zoe.infer(image)\n elif self.depth_method == \"midas\":\n predicted_disparity = self.midas(image * 2 - 1).unsqueeze(1)\n predicted_depth = torch.where(predicted_disparity < 3, 0.0, 1 / predicted_disparity)\n else:\n raise NotImplementedError()\n\n if max_depth:\n predicted_depth[predicted_depth > max_depth] = 0.0\n\n return predicted_depth\n\n def get_distance(\n self,\n image: Float[Tensor, \"B C H W\"],\n fov_x: float,\n fov_y: float,\n rendered_distance: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n overlapping_region_mask: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n max_distance: Optional[float] = None,\n use_inverse=False,\n ):\n rendered_depth = distance_to_depth(rendered_distance, fov_x, fov_y)\n depth = self.get_depth(\n image,\n rendered_depth=rendered_depth,\n overlapping_region_mask=overlapping_region_mask,\n max_depth=max_distance,\n use_inverse=use_inverse,\n fov_x=fov_x,\n fov_y=fov_y,\n )\n distance = depth_to_distance(depth, fov_x, fov_y)\n\n if max_distance:\n distance[distance > max_distance] = 0.0\n overlapping_region_mask[distance > max_distance] = 0.0\n\n return distance" }, { "identifier": "Upscaler", "path": "nerfiller/inpaint/upscaler.py", "snippet": "class Upscaler:\n def __init__(\n self,\n device: str = \"cuda:0\",\n ):\n self.device = device\n # load model and scheduler\n model_id = \"stabilityai/stable-diffusion-x4-upscaler\"\n self.pipeline = StableDiffusionUpscalePipeline.from_pretrained(\n model_id, revision=\"fp16\", torch_dtype=torch.float16\n )\n self.pipeline = self.pipeline.to(self.device)\n self.pipeline.scheduler = DDIMScheduler.from_config(self.pipeline.scheduler.config)\n\n @torch.cuda.amp.autocast(enabled=False)\n def upsample(\n self,\n image: Float[Tensor, \"B 3 H W\"],\n num_inference_steps: int = 20,\n noise_level: int = 20,\n ):\n batch_size = image.shape[0]\n prompt = [\"\"] * batch_size\n upscaled_image = self.pipeline(\n prompt=prompt,\n image=image,\n num_inference_steps=num_inference_steps,\n noise_level=noise_level,\n output_type=\"pt\",\n ).images\n return upscaled_image" }, { "identifier": "get_inpainted_image_row", "path": "nerfiller/utils/image_utils.py", "snippet": "def get_inpainted_image_row(\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n inpainted_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n color: Tuple[float, float, float] = Colors.NEON_PINK.value,\n show_original: bool = False,\n):\n \"\"\"Returns an image concatenated along the x-axis. It has the following form:\n image with inpaint regions highlighted | image with inpainted regions\n Inpaint where mask == 1.\n The default color is neon pink.\n If the inpainted image is None, then just show the `image with inpaint regions highlighted`.\n \"\"\"\n device = image.device\n c = torch.tensor(color, device=device).view(1, 3, 1, 1)\n color_image = torch.ones_like(image) * c\n image_with_highlights = torch.where(mask == 1, color_image, image)\n image_list = [image_with_highlights]\n if inpainted_image is not None:\n image_list = image_list + [inpainted_image]\n if show_original:\n image_list = [image] + image_list\n im = torch.cat(image_list, dim=-2)\n return im" }, { "identifier": "downscale_mask", "path": "nerfiller/utils/mask_utils.py", "snippet": "def downscale_mask(mask, size=None, scale_factor=None, dilate_iters=0, dilate_kernel_size=3):\n \"\"\"\n Downscale the mask in a conservative way. 1s are where to inpaint, 0 where to not inpaint.\n Inpaints extra pixels to prevent leakage under the mask.\n \"\"\"\n assert size or scale_factor\n if size:\n assert scale_factor is None\n if scale_factor:\n assert size is None\n for _ in range(dilate_iters):\n mask = dilate(mask, kernel_size=dilate_kernel_size)\n mask = torch.nn.functional.interpolate(mask, size=size, scale_factor=scale_factor, mode=\"bilinear\")\n mask = (mask != 0.0).float() # expands the mask slightly for no leakage of pixels\n return mask" } ]
from dataclasses import dataclass, field from typing import Literal, Optional, Type from torch.cuda.amp.grad_scaler import GradScaler from nerfstudio.pipelines.base_pipeline import VanillaPipelineConfig, VanillaPipeline from nerfiller.inpaint.rgb_inpainter import RGBInpainter, RGBInpainterXL from nerfiller.inpaint.depth_inpainter import DepthInpainter from nerfiller.inpaint.upscaler import Upscaler from nerfstudio.utils import profiler from nerfiller.utils.image_utils import ( get_inpainted_image_row, ) from nerfstudio.utils.rich_utils import Console from nerfstudio.utils.colormaps import apply_colormap, ColormapOptions from jaxtyping import Float from torch import Tensor from nerfiller.utils.mask_utils import downscale_mask from nerfiller.utils.typing import * from nerfstudio.engine.callbacks import TrainingCallback, TrainingCallbackAttributes import torch import mediapy
12,779
use_annealing: bool = True lower_bound: float = 0.4 """Lower bound for diffusion timesteps to use for image editing""" upper_bound: float = 1.0 """Upper bound for diffusion timesteps to use for image editing""" denoise_in_grid: bool = True dilate_iters: int = 5 dilate_kernel_size: int = 3 allow_camera_mismatch: bool = False tile_resolution: int = 256 upscale: bool = False inpaint_chunk_size: Optional[int] = None render_all_rate: int = 5000 reference_image: Path = Path("reference.png") lora_model_path: Optional[str] = None only_sample_from_latest: bool = True """Only sample rays from the latest inpaints.""" inpaint_method: str = "inpaint" """Strategy for inpainting a batch of images.""" text_guidance_scale: float = 0.0 image_guidance_scale: float = 1.5 inpaint_index_start: int = 0 """We will edit images starting from this index and onward.""" sds_loss_mult: float = 1.0 sds_guidance_mult: float = 10.0 sds_downscale_factor: int = 1 class NeRFillerPipeline(VanillaPipeline): """The pipeline for the NeRFiller method.""" def __init__( self, config: NeRFillerPipelineConfig, device: str, test_mode: Literal["test", "val", "inference"] = "val", world_size: int = 1, local_rank: int = 0, grad_scaler: Optional[GradScaler] = None, ): super().__init__(config, device, test_mode, world_size, local_rank, grad_scaler=grad_scaler) if test_mode != "val": # skip the rest of setup if we aren't going to train return self.grad_scaler = grad_scaler self.start_step = None self.num_train_images = len(self.datamanager.train_dataparser_outputs.image_filenames) self.load_training_modules() def get_training_callbacks( self, training_callback_attributes: TrainingCallbackAttributes ) -> List[TrainingCallback]: self.trainer_base_dir = training_callback_attributes.trainer.base_dir return super().get_training_callbacks(training_callback_attributes) def load_state_dict(self, state_dict: Mapping[str, Any], strict: Optional[bool] = None): is_ddp_model_state = True model_state = {} for key, value in state_dict.items(): if key.startswith("_model."): # remove the "_model." prefix from key model_state[key[len("_model.") :]] = value # make sure that the "module." prefix comes from DDP, # rather than an attribute of the model named "module" if not key.startswith("_model.module."): is_ddp_model_state = False # remove "module." prefix added by DDP if is_ddp_model_state: model_state = {key[len("module.") :]: value for key, value in model_state.items()} pipeline_state = {key: value for key, value in state_dict.items() if not key.startswith("_model.")} if self.config.allow_camera_mismatch: # Don't set the weights for the appearance embedding # This sets the weights to be zero. key = "field.embedding_appearance.embedding.weight" model_state[key] = torch.zeros(self.model.field.embedding_appearance.embedding.weight.shape) try: self.model.load_state_dict(model_state, strict=True) except RuntimeError: if not strict: self.model.load_state_dict(model_state, strict=False) else: raise super().load_state_dict(pipeline_state, strict=False) def load_training_modules(self): """Load the modules.""" # RGB and depth inpainting rgb_inpaint_device = ( self.config.rgb_inpaint_device if self.config.rgb_inpaint_device is not None else self.device ) rgb_inpaint_vae_device = ( self.config.rgb_inpaint_vae_device if self.config.rgb_inpaint_vae_device is not None else rgb_inpaint_device ) if self.config.rgb_inpainter == "sd": self.rgb_inpainter = RGBInpainter( device=rgb_inpaint_device, vae_device=rgb_inpaint_vae_device, lora_model_path=self.config.lora_model_path, ) elif self.config.rgb_inpainter == "sdxl": self.rgb_inpainter = RGBInpainterXL( device=rgb_inpaint_device, vae_device=rgb_inpaint_vae_device, lora_model_path=self.config.lora_model_path, ) depth_inpaint_device = ( self.config.depth_inpaint_device if self.config.depth_inpaint_device is not None else self.device )
CONSOLE = Console() @dataclass class NeRFillerPipelineConfig(VanillaPipelineConfig): """The config for the NeRFiller pipeline.""" _target: Type = field(default_factory=lambda: NeRFillerPipeline) patch_size: int = 32 # inpaint args rgb_inpainter: str = "sd" rgb_inpaint_device: Optional[str] = "cuda:1" """device to put the rgb inpainting module on""" rgb_inpaint_vae_device: Optional[str] = None """device to put the vae inpainting module on. defaults to rgb inpaint device""" depth_inpaint_device: Optional[str] = "cuda:0" """device to put the depth inpainting module on""" upscale_device: Optional[str] = "cuda:0" """device to put the upscaler module on""" prompt: str = "highly detailed, 4K, hdr, sharp focus, image" """positive prompt for text-conditioned inpainting""" negative_prompt: str = "" """negative prompt for text-conditionied inpainting""" depth_method: Literal["zoedepth", "irondepth"] = "zoedepth" """which depth network to use for depth prediction or depth completion""" # sds use_sds: bool = False # du (dataset update) args use_du: bool = True """how often to update the dataset via inpainting. if 0, don't do dataset updating""" edit_rate: int = 1000 """how often to make an edit""" edit_num: int = 40 """number of images to edit at a time""" edit_iters: int = 30001 """how many iterations until we stop making changes""" num_inference_steps: int = 20 multidiffusion_steps: int = 1 randomize_latents: bool = True randomize_within_grid: bool = False use_annealing: bool = True lower_bound: float = 0.4 """Lower bound for diffusion timesteps to use for image editing""" upper_bound: float = 1.0 """Upper bound for diffusion timesteps to use for image editing""" denoise_in_grid: bool = True dilate_iters: int = 5 dilate_kernel_size: int = 3 allow_camera_mismatch: bool = False tile_resolution: int = 256 upscale: bool = False inpaint_chunk_size: Optional[int] = None render_all_rate: int = 5000 reference_image: Path = Path("reference.png") lora_model_path: Optional[str] = None only_sample_from_latest: bool = True """Only sample rays from the latest inpaints.""" inpaint_method: str = "inpaint" """Strategy for inpainting a batch of images.""" text_guidance_scale: float = 0.0 image_guidance_scale: float = 1.5 inpaint_index_start: int = 0 """We will edit images starting from this index and onward.""" sds_loss_mult: float = 1.0 sds_guidance_mult: float = 10.0 sds_downscale_factor: int = 1 class NeRFillerPipeline(VanillaPipeline): """The pipeline for the NeRFiller method.""" def __init__( self, config: NeRFillerPipelineConfig, device: str, test_mode: Literal["test", "val", "inference"] = "val", world_size: int = 1, local_rank: int = 0, grad_scaler: Optional[GradScaler] = None, ): super().__init__(config, device, test_mode, world_size, local_rank, grad_scaler=grad_scaler) if test_mode != "val": # skip the rest of setup if we aren't going to train return self.grad_scaler = grad_scaler self.start_step = None self.num_train_images = len(self.datamanager.train_dataparser_outputs.image_filenames) self.load_training_modules() def get_training_callbacks( self, training_callback_attributes: TrainingCallbackAttributes ) -> List[TrainingCallback]: self.trainer_base_dir = training_callback_attributes.trainer.base_dir return super().get_training_callbacks(training_callback_attributes) def load_state_dict(self, state_dict: Mapping[str, Any], strict: Optional[bool] = None): is_ddp_model_state = True model_state = {} for key, value in state_dict.items(): if key.startswith("_model."): # remove the "_model." prefix from key model_state[key[len("_model.") :]] = value # make sure that the "module." prefix comes from DDP, # rather than an attribute of the model named "module" if not key.startswith("_model.module."): is_ddp_model_state = False # remove "module." prefix added by DDP if is_ddp_model_state: model_state = {key[len("module.") :]: value for key, value in model_state.items()} pipeline_state = {key: value for key, value in state_dict.items() if not key.startswith("_model.")} if self.config.allow_camera_mismatch: # Don't set the weights for the appearance embedding # This sets the weights to be zero. key = "field.embedding_appearance.embedding.weight" model_state[key] = torch.zeros(self.model.field.embedding_appearance.embedding.weight.shape) try: self.model.load_state_dict(model_state, strict=True) except RuntimeError: if not strict: self.model.load_state_dict(model_state, strict=False) else: raise super().load_state_dict(pipeline_state, strict=False) def load_training_modules(self): """Load the modules.""" # RGB and depth inpainting rgb_inpaint_device = ( self.config.rgb_inpaint_device if self.config.rgb_inpaint_device is not None else self.device ) rgb_inpaint_vae_device = ( self.config.rgb_inpaint_vae_device if self.config.rgb_inpaint_vae_device is not None else rgb_inpaint_device ) if self.config.rgb_inpainter == "sd": self.rgb_inpainter = RGBInpainter( device=rgb_inpaint_device, vae_device=rgb_inpaint_vae_device, lora_model_path=self.config.lora_model_path, ) elif self.config.rgb_inpainter == "sdxl": self.rgb_inpainter = RGBInpainterXL( device=rgb_inpaint_device, vae_device=rgb_inpaint_vae_device, lora_model_path=self.config.lora_model_path, ) depth_inpaint_device = ( self.config.depth_inpaint_device if self.config.depth_inpaint_device is not None else self.device )
self.depth_inpainter = DepthInpainter(depth_method=self.config.depth_method, device=depth_inpaint_device)
2
2023-12-07 19:12:08+00:00
16k
nnanhuang/Customize-it-3D
nerf/network_tcnn.py
[ { "identifier": "trunc_exp", "path": "activation.py", "snippet": "class _trunc_exp(Function):\n def forward(ctx, x):\n def backward(ctx, g):" }, { "identifier": "NeRFRenderer", "path": "nerf/renderer.py", "snippet": "class NeRFRenderer(nn.Module):\n def __init__(self, opt):\n super().__init__()\n\n self.opt = opt\n self.bound = opt.bound\n self.cascade = 1 + math.ceil(math.log2(opt.bound))\n self.grid_size = 128\n self.cuda_ray = opt.cuda_ray\n self.min_near = opt.min_near\n self.density_thresh = opt.density_thresh\n self.bg_radius = opt.bg_radius\n\n # prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax)\n # NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing.\n aabb_train = torch.FloatTensor([-opt.bound, -opt.bound, -opt.bound, opt.bound, opt.bound, opt.bound])\n aabb_infer = aabb_train.clone()\n self.register_buffer('aabb_train', aabb_train)\n self.register_buffer('aabb_infer', aabb_infer)\n\n # extra state for cuda raymarching\n if self.cuda_ray:\n # density grid\n density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n # step counter\n step_counter = torch.zeros(16, 2, dtype=torch.int32) # 16 is hardcoded for averaging...\n self.register_buffer('step_counter', step_counter)\n self.mean_count = 0\n self.local_step = 0\n\n \n def forward(self, x, d):\n raise NotImplementedError()\n\n def density(self, x):\n raise NotImplementedError()\n\n def color(self, x, d, mask=None, **kwargs):\n raise NotImplementedError()\n\n def reset_extra_state(self):\n if not self.cuda_ray:\n return \n # density grid\n self.density_grid.zero_()\n self.mean_density = 0\n self.iter_density = 0\n # step counter\n self.step_counter.zero_()\n self.mean_count = 0\n self.local_step = 0\n\n # @torch.no_grad()\n # def export_mesh(self, path, resolution=None, S=128):\n\n # if resolution is None:\n # resolution = self.grid_size\n\n # if self.cuda_ray:\n # density_thresh = min(self.mean_density, self.density_thresh)\n # else:\n # density_thresh = self.density_thresh\n\n # sigmas = np.zeros([resolution, resolution, resolution], dtype=np.float32)\n\n # # query\n # X = torch.linspace(-1, 1, resolution).split(S)\n # Y = torch.linspace(-1, 1, resolution).split(S)\n # Z = torch.linspace(-1, 1, resolution).split(S)\n\n # for xi, xs in enumerate(X):\n # for yi, ys in enumerate(Y):\n # for zi, zs in enumerate(Z):\n # xx, yy, zz = custom_meshgrid(xs, ys, zs)\n # pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [S, 3]\n # val = self.density(pts.to(self.aabb_train.device))\n # sigmas[xi * S: xi * S + len(xs), yi * S: yi * S + len(ys), zi * S: zi * S + len(zs)] = val['sigma'].reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() # [S, 1] --> [x, y, z]\n\n # vertices, triangles = mcubes.marching_cubes(sigmas, density_thresh)\n \n # vertices = vertices / (resolution - 1.0) * 2 - 1\n # vertices = vertices.astype(np.float32)\n # triangles = triangles.astype(np.int32)\n\n # v = torch.from_numpy(vertices).to(self.aabb_train.device)\n # f = torch.from_numpy(triangles).int().to(self.aabb_train.device)\n\n\n # # texture?\n # def _export(v, f, h0=2048, w0=2048, ssaa=1, name=''):\n # # v, f: torch Tensor\n # device = v.device\n # v_np = v.cpu().numpy() # [N, 3]\n # f_np = f.cpu().numpy() # [M, 3]\n\n # print(f'[INFO] running xatlas to unwrap UVs for mesh: v={v_np.shape} f={f_np.shape}')\n\n # # unwrap uvs\n # import xatlas\n # import nvdiffrast.torch as dr\n # from sklearn.neighbors import NearestNeighbors\n # from scipy.ndimage import binary_dilation, binary_erosion\n\n # glctx = dr.RasterizeCudaContext()\n\n # atlas = xatlas.Atlas()\n # atlas.add_mesh(v_np, f_np)\n # chart_options = xatlas.ChartOptions()\n # chart_options.max_iterations = 0 # disable merge_chart for faster unwrap...\n # atlas.generate(chart_options=chart_options)\n # vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n # # vmapping, ft_np, vt_np = xatlas.parametrize(v_np, f_np) # [N], [M, 3], [N, 2]\n\n # vt = torch.from_numpy(vt_np.astype(np.float32)).float().to(device)\n # ft = torch.from_numpy(ft_np.astype(np.int64)).int().to(device)\n\n # # render uv maps\n # uv = vt * 2.0 - 1.0 # uvs to range [-1, 1]\n # uv = torch.cat((uv, torch.zeros_like(uv[..., :1]), torch.ones_like(uv[..., :1])), dim=-1) # [N, 4]\n\n # if ssaa > 1:\n # h = int(h0 * ssaa)\n # w = int(w0 * ssaa)\n # else:\n # h, w = h0, w0\n\n # rast, _ = dr.rasterize(glctx, uv.unsqueeze(0), ft, (h, w)) # [1, h, w, 4]\n # xyzs, _ = dr.interpolate(v.unsqueeze(0), rast, f) # [1, h, w, 3]\n # mask, _ = dr.interpolate(torch.ones_like(v[:, :1]).unsqueeze(0), rast, f) # [1, h, w, 1]\n\n # # masked query \n # xyzs = xyzs.view(-1, 3)\n # mask = (mask > 0).view(-1)\n \n # sigmas = torch.zeros(h * w, device=device, dtype=torch.float32)\n # feats = torch.zeros(h * w, 3, device=device, dtype=torch.float32)\n\n # if mask.any():\n # xyzs = xyzs[mask] # [M, 3]\n\n # # batched inference to avoid OOM\n # all_sigmas = []\n # all_feats = []\n # head = 0\n # while head < xyzs.shape[0]:\n # tail = min(head + 640000, xyzs.shape[0])\n # results_ = self.density(xyzs[head:tail])\n # all_sigmas.append(results_['sigma'].float())\n # all_feats.append(results_['albedo'].float())\n # head += 640000\n\n # sigmas[mask] = torch.cat(all_sigmas, dim=0)\n # feats[mask] = torch.cat(all_feats, dim=0)\n \n # sigmas = sigmas.view(h, w, 1)\n # feats = feats.view(h, w, -1)\n # mask = mask.view(h, w)\n\n # ### alpha mask\n # # quantize [0.0, 1.0] to [0, 255]\n # feats = feats.cpu().numpy()\n # feats = (feats * 255).astype(np.uint8)\n\n # # alphas = alphas.cpu().numpy()\n # # alphas = (alphas * 255).astype(np.uint8)\n\n # ### NN search as an antialiasing ...\n # mask = mask.cpu().numpy()\n\n # inpaint_region = binary_dilation(mask, iterations=3)\n # inpaint_region[mask] = 0\n\n # search_region = mask.copy()\n # not_search_region = binary_erosion(search_region, iterations=2)\n # search_region[not_search_region] = 0\n\n # search_coords = np.stack(np.nonzero(search_region), axis=-1)\n # inpaint_coords = np.stack(np.nonzero(inpaint_region), axis=-1)\n\n # knn = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(search_coords)\n # _, indices = knn.kneighbors(inpaint_coords)\n\n # feats[tuple(inpaint_coords.T)] = feats[tuple(search_coords[indices[:, 0]].T)]\n\n # # do ssaa after the NN search, in numpy\n # feats = cv2.cvtColor(feats, cv2.COLOR_RGB2BGR)\n\n # if ssaa > 1:\n # # alphas = cv2.resize(alphas, (w0, h0), interpolation=cv2.INTER_NEAREST)\n # feats = cv2.resize(feats, (w0, h0), interpolation=cv2.INTER_LINEAR)\n\n # # cv2.imwrite(os.path.join(path, f'alpha.png'), alphas)\n # cv2.imwrite(os.path.join(path, f'{name}albedo.png'), feats)\n\n # # save obj (v, vt, f /)\n # obj_file = os.path.join(path, f'{name}mesh.obj')\n # mtl_file = os.path.join(path, f'{name}mesh.mtl')\n\n # print(f'[INFO] writing obj mesh to {obj_file}')\n # with open(obj_file, \"w\") as fp:\n # fp.write(f'mtllib {name}mesh.mtl \\n')\n \n # print(f'[INFO] writing vertices {v_np.shape}')\n # for v in v_np:\n # fp.write(f'v {v[0]} {v[1]} {v[2]} \\n')\n \n # print(f'[INFO] writing vertices texture coords {vt_np.shape}')\n # for v in vt_np:\n # fp.write(f'vt {v[0]} {1 - v[1]} \\n') \n\n # print(f'[INFO] writing faces {f_np.shape}')\n # fp.write(f'usemtl mat0 \\n')\n # for i in range(len(f_np)):\n # fp.write(f\"f {f_np[i, 0] + 1}/{ft_np[i, 0] + 1} {f_np[i, 1] + 1}/{ft_np[i, 1] + 1} {f_np[i, 2] + 1}/{ft_np[i, 2] + 1} \\n\")\n\n # with open(mtl_file, \"w\") as fp:\n # fp.write(f'newmtl mat0 \\n')\n # fp.write(f'Ka 1.000000 1.000000 1.000000 \\n')\n # fp.write(f'Kd 1.000000 1.000000 1.000000 \\n')\n # fp.write(f'Ks 0.000000 0.000000 0.000000 \\n')\n # fp.write(f'Tr 1.000000 \\n')\n # fp.write(f'illum 1 \\n')\n # fp.write(f'Ns 0.000000 \\n')\n # fp.write(f'map_Kd {name}albedo.png \\n')\n\n # _export(v, f)\n\n @torch.no_grad()\n def export_mesh(self, path, resolution=None, decimate_target=5e4, S=128):\n from meshutils import decimate_mesh, clean_mesh, poisson_mesh_reconstruction\n # if self.opt.dmtet:\n # vertices, triangles = self.dmtet.get_verts_face()\n # vertices = vertices.detach().cpu().numpy()\n # triangles = triangles.detach().cpu().numpy()\n\n # else:\n\n if resolution is None:\n resolution = self.grid_size\n\n if self.cuda_ray:\n # density_thresh = self.density_thresh\n density_thresh = min(self.mean_density, self.density_thresh) \\\n if np.greater(self.mean_density, 0) else self.density_thresh\n else:\n density_thresh = self.density_thresh\n\n sigmas = np.zeros(\n [resolution, resolution, resolution], dtype=np.float32)\n\n # query\n X = torch.linspace(-1, 1, resolution).split(S)\n Y = torch.linspace(-1, 1, resolution).split(S)\n Z = torch.linspace(-1, 1, resolution).split(S)\n \n # # Define the bounding box range\n # bbox_min = [-0.4, -0.4, -math.inf]\n # bbox_max = [0.4, 0.4, math.inf]\n\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n pts = torch.cat(\n [xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [S, 3]\n \n # Check if points are inside the bounding box\n # inside_bbox = (\n # (pts[:, 0] >= bbox_min[0]) & (pts[:, 0] <= bbox_max[0]) &\n # (pts[:, 1] >= bbox_min[1]) & (pts[:, 1] <= bbox_max[1]) &\n # (pts[:, 2] >= bbox_min[2]) & (pts[:, 2] <= bbox_max[2])\n # )\n\n # Only calculate density for points inside the bounding box \n val = self.density(pts.to(self.aabb_train.device))\n # sigmas[xi * S: xi * S + len(xs), yi * S: yi * S + len(ys), zi * S: zi * S + len(zs)] = np.where(inside_bbox.reshape(len(xs), len(ys), len(zs)), val['sigma'].reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy(), 0)\n\n sigmas[xi * S: xi * S + len(xs), yi * S: yi * S + len(ys), zi * S: zi * S + len(\n zs)] = val['sigma'].reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() # [S, 1] --> [x, y, z]\n\n print(\n f'[INFO] marching cubes thresh: {density_thresh} ({sigmas.min()} ~ {sigmas.max()})')\n\n vertices, triangles = mcubes.marching_cubes(sigmas, density_thresh)\n vertices = vertices / (resolution - 1.0) * 2 - 1\n\n # clean\n vertices = vertices.astype(np.float32)\n triangles = triangles.astype(np.int32)\n vertices, triangles = clean_mesh(\n vertices, triangles, remesh=True, remesh_size=0.01)\n\n # decimation\n if decimate_target > 0 and triangles.shape[0] > decimate_target:\n vertices, triangles = decimate_mesh(\n vertices, triangles, decimate_target,optimalplacement = False)\n\n v = torch.from_numpy(vertices).contiguous(\n ).float().to(self.aabb_train.device)\n f = torch.from_numpy(triangles).contiguous().int().to(\n self.aabb_train.device)\n\n # mesh = trimesh.Trimesh(vertices, triangles, process=False) # important, process=True leads to seg fault...\n # mesh.export(os.path.join(path, f'mesh.ply'))\n\n def _export(v, f, h0=2048, w0=2048, ssaa=1, name=''):\n # v, f: torch Tensor\n device = v.device\n v_np = v.cpu().numpy() # [N, 3]\n f_np = f.cpu().numpy() # [M, 3]\n\n print(\n f'[INFO] running xatlas to unwrap UVs for mesh: v={v_np.shape} f={f_np.shape}')\n\n # unwrap uvs\n import xatlas\n import nvdiffrast.torch as dr\n from sklearn.neighbors import NearestNeighbors\n from scipy.ndimage import binary_dilation, binary_erosion\n\n glctx = dr.RasterizeCudaContext()\n \n atlas = xatlas.Atlas()\n atlas.add_mesh(v_np, f_np)\n chart_options = xatlas.ChartOptions()\n chart_options.max_iterations = 4 # for faster unwrap...\n atlas.generate(chart_options=chart_options)\n vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n # vmapping, ft_np, vt_np = xatlas.parametrize(v_np, f_np) # [N], [M, 3], [N, 2]\n\n vt = torch.from_numpy(vt_np.astype(np.float32)).float().to(device)\n ft = torch.from_numpy(ft_np.astype(np.int64)).int().to(device)\n\n # render uv maps\n uv = vt * 2.0 - 1.0 # uvs to range [-1, 1]\n uv = torch.cat((uv, torch.zeros_like(\n uv[..., :1]), torch.ones_like(uv[..., :1])), dim=-1) # [N, 4]\n\n if ssaa > 1:\n h = int(h0 * ssaa)\n w = int(w0 * ssaa)\n else:\n h, w = h0, w0\n \n if glctx is None:\n if h <= 2048 and w <= 2048:\n glctx = dr.RasterizeCudaContext()\n else:\n glctx = dr.RasterizeGLContext()\n\n rast, _ = dr.rasterize(glctx, uv.unsqueeze(\n 0), ft, (h, w)) # [1, h, w, 4]\n xyzs, _ = dr.interpolate(v.unsqueeze(0), rast, f) # [1, h, w, 3]\n mask, _ = dr.interpolate(torch.ones_like(\n v[:, :1]).unsqueeze(0), rast, f) # [1, h, w, 1]\n\n # masked query \n xyzs = xyzs.view(-1, 3)\n mask = (mask > 0).view(-1)\n \n feats = torch.zeros(h * w, 3, device=device, dtype=torch.float32)\n\n if mask.any():\n xyzs = xyzs[mask] # [M, 3]\n\n # batched inference to avoid OOM\n all_feats = []\n head = 0\n while head < xyzs.shape[0]:\n tail = min(head + 640000, xyzs.shape[0])\n results_ = self.density(xyzs[head:tail])\n all_feats.append(results_['albedo'].float())\n head += 640000\n\n feats[mask] = torch.cat(all_feats, dim=0)\n \n feats = feats.view(h, w, -1)\n mask = mask.view(h, w)\n\n # quantize [0.0, 1.0] to [0, 255]\n feats = feats.cpu().numpy()\n feats = (feats * 255).astype(np.uint8)\n\n ### NN search as an antialiasing ...\n mask = mask.cpu().numpy()\n\n inpaint_region = binary_dilation(mask, iterations=3)\n inpaint_region[mask] = 0\n\n search_region = mask.copy()\n not_search_region = binary_erosion(search_region, iterations=2)\n search_region[not_search_region] = 0\n\n search_coords = np.stack(np.nonzero(search_region), axis=-1)\n inpaint_coords = np.stack(np.nonzero(inpaint_region), axis=-1)\n\n knn = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(search_coords)\n _, indices = knn.kneighbors(inpaint_coords)\n\n feats[tuple(inpaint_coords.T)] = feats[tuple(search_coords[indices[:, 0]].T)]\n\n feats = cv2.cvtColor(feats, cv2.COLOR_RGB2BGR)\n\n # do ssaa after the NN search, in numpy\n if ssaa > 1:\n feats = cv2.resize(feats, (w0, h0), interpolation=cv2.INTER_LINEAR)\n\n cv2.imwrite(os.path.join(path, f'{name}albedo.png'), feats)\n\n # save obj (v, vt, f /)\n obj_file = os.path.join(path, f'{name}mesh.obj')\n mtl_file = os.path.join(path, f'{name}mesh.mtl')\n\n print(f'[INFO] writing obj mesh to {obj_file}')\n with open(obj_file, \"w\") as fp:\n fp.write(f'mtllib {name}mesh.mtl \\n')\n\n print(f'[INFO] writing vertices {v_np.shape}')\n for v in v_np:\n fp.write(f'v {v[0]} {v[1]} {v[2]} \\n')\n\n print(\n f'[INFO] writing vertices texture coords {vt_np.shape}')\n for v in vt_np:\n fp.write(f'vt {v[0]} {1 - v[1]} \\n')\n\n print(f'[INFO] writing faces {f_np.shape}')\n fp.write(f'usemtl mat0 \\n')\n for i in range(len(f_np)):\n fp.write(\n f\"f {f_np[i, 0] + 1}/{ft_np[i, 0] + 1} {f_np[i, 1] + 1}/{ft_np[i, 1] + 1} {f_np[i, 2] + 1}/{ft_np[i, 2] + 1} \\n\")\n\n with open(mtl_file, \"w\") as fp:\n fp.write(f'newmtl mat0 \\n')\n fp.write(f'Ka 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Kd 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Ks 0.000000 0.000000 0.000000 \\n')\n fp.write(f'Tr 1.000000 \\n')\n fp.write(f'illum 1 \\n')\n fp.write(f'Ns 0.000000 \\n')\n fp.write(f'map_Kd {name}albedo.png \\n')\n\n _export(v, f)\n\n def run(self, rays_o, rays_d, ref_bg=None, num_steps=128, upsample_steps=128, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # bg_color: [BN, 3] in range [0, 1]\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n results = {}\n\n # choose aabb\n aabb = self.aabb_train if self.training else self.aabb_infer\n\n # sample steps\n nears, fars = near_far_from_bound(rays_o, rays_d, self.bound, type='sphere', min_near=self.min_near)\n \n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = (rays_o[0] + torch.randn(3, device=device, dtype=torch.float))\n light_d = safe_normalize(light_d)\n\n z_vals = torch.linspace(0.0, 1.0, num_steps, device=device).unsqueeze(0) # [1, T]\n z_vals = z_vals.expand((N, num_steps)) # [N, T]\n z_vals = nears + (fars - nears) * z_vals # [N, T], in [nears, fars]\n\n # perturb z_vals\n sample_dist = (fars - nears) / num_steps\n if perturb:\n z_vals = z_vals + (torch.rand(z_vals.shape, device=device) - 0.5) * sample_dist\n\n # generate xyzs\n xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * z_vals.unsqueeze(-1) # [N, 1, 3] * [N, T, 1] -> [N, T, 3]\n xyzs = torch.min(torch.max(xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n #plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n\n # query SDF and RGB\n density_outputs = self.density(xyzs.reshape(-1, 3))\n\n #sigmas = density_outputs['sigma'].view(N, num_steps) # [N, T]\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(N, num_steps, -1)\n\n # upsample z_vals (nerf-like)\n if upsample_steps > 0:\n with torch.no_grad():\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1)\n\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T]\n\n # sample new z_vals\n z_vals_mid = (z_vals[..., :-1] + 0.5 * deltas[..., :-1]) # [N, T-1]\n new_z_vals = sample_pdf(z_vals_mid, weights[:, 1:-1], upsample_steps, det=not self.training).detach() # [N, t]\n\n new_xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * new_z_vals.unsqueeze(-1) # [N, 1, 3] * [N, t, 1] -> [N, t, 3]\n new_xyzs = torch.min(torch.max(new_xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n # only forward new points to save computation\n new_density_outputs = self.density(new_xyzs.reshape(-1, 3))\n #new_sigmas = new_density_outputs['sigma'].view(N, upsample_steps) # [N, t]\n for k, v in new_density_outputs.items():\n new_density_outputs[k] = v.view(N, upsample_steps, -1)\n\n # re-order\n z_vals = torch.cat([z_vals, new_z_vals], dim=1) # [N, T+t]\n z_vals, z_index = torch.sort(z_vals, dim=1)\n\n xyzs = torch.cat([xyzs, new_xyzs], dim=1) # [N, T+t, 3]\n xyzs = torch.gather(xyzs, dim=1, index=z_index.unsqueeze(-1).expand_as(xyzs))\n\n for k in density_outputs:\n tmp_output = torch.cat([density_outputs[k], new_density_outputs[k]], dim=1)\n density_outputs[k] = torch.gather(tmp_output, dim=1, index=z_index.unsqueeze(-1).expand_as(tmp_output))\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T+t-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1)\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T+t]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+t+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T+t]\n\n dirs = rays_d.view(-1, 1, 3).expand_as(xyzs)\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(-1, v.shape[-1])\n\n sigmas, rgbs, normals = self(xyzs.reshape(-1, 3), dirs.reshape(-1, 3), light_d, ratio=ambient_ratio, shading=shading)\n rgbs = rgbs.view(N, -1, 3) # [N, T+t, 3]\n\n if normals is not None:\n\n # calculate normal \n normal_map = normals.reshape(N, -1, 3) # [N, T, 3]\n normal_map = torch.sum(normal_map * weights[:, :, None], dim=1)\n\n # orientation loss\n normals = normals.view(N, -1, 3)\n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.sum(-1).mean()\n\n # surface normal smoothness\n if self.opt.lambda_smooth > 0:\n normals_perturb = self.normal(xyzs.reshape(-1, 3) + torch.randn_like(xyzs).reshape(-1, 3) * 1e-2).view(N, -1, 3)\n loss_smooth = (normals - normals_perturb).abs()\n results['loss_smooth'] = loss_smooth.mean()\n\n # calculate weight_sum (mask)\n weights_sum = weights.sum(dim=-1) # [N]\n \n # calculate depth \n depth = torch.sum(weights * z_vals, dim=-1)\n \n # calculate color\n image = torch.sum(weights.unsqueeze(-1) * rgbs, dim=-2) # [N, 3], in [0, 1]\n\n # mix background color\n if self.bg_radius > 0:\n bg_color = self.background(rays_d.reshape(-1, 3)) # [N, 3]\n elif bg_color is None:\n bg_color = 1\n \n fg_image = image\n bg_image = bg_color.view(*prefix, 3)\n fg_image = fg_image.view(*prefix, 3)\n \n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n \n image = image.view(*prefix, 3)\n depth = depth.view(*prefix, 1)\n\n mask = (nears < fars).reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n results['mask'] = mask\n results['normal'] = normal_map\n if self.bg_radius > 0:\n results['bg'] = bg_image\n\n return results\n\n def run_cuda(self, rays_o, rays_d, depth_scale=None, bg_color=None, dt_gamma=0, light_d=None, ambient_ratio=1.0, shading='albedo', perturb=False, force_all_rays=False, max_steps=1024, T_thresh=1e-4, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # return: image: [B, N, 3], depth: [B, N]\n B = rays_o.shape[0]\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n \n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, self.aabb_train if self.training else self.aabb_infer)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = (rays_o[0] + torch.randn(3, device=device, dtype=torch.float))\n light_d = safe_normalize(light_d)\n\n results = {}\n\n if self.training:\n # setup counter\n counter = self.step_counter[self.local_step % 16]\n counter.zero_() # set to 0\n self.local_step += 1\n xyzs, dirs, deltas, rays = raymarching.march_rays_train(rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, counter, self.mean_count, perturb, 128, force_all_rays, dt_gamma, max_steps) \n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n weights_sum, depth, image = raymarching.composite_rays_train(sigmas, rgbs, deltas, rays, T_thresh)\n # exit()\n # normals related regularizations\n if normals is not None:\n # orientation loss (not very exact in cuda ray mode)\n weights = 1 - torch.exp(-sigmas)\n \n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n\n # surface normal smoothness\n if self.opt.lambda_smooth > 0:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n loss_smooth = (normals - normals_perturb).abs()\n results['loss_smooth'] = loss_smooth.mean()\n\n # _, _, normal_image = raymarching.composite_rays_train(sigmas.detach(), (normals + 1) / 2, deltas, rays, T_thresh)\n # results['normal'] = normal_image\n\n else:\n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n normal = torch.zeros(N, 3, dtype=dtype, device=device)\n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = nears.clone() # [N]\n step = 0\n while step < max_steps: # hard coded max step\n # count alive rays \n n_alive = rays_alive.shape[0]\n # exit loop\n if n_alive <= 0:\n break\n # decide compact_steps\n n_step = max(min(N // n_alive, 8), 1)\n xyzs, dirs, deltas = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, 128, perturb if step == 0 else False, dt_gamma, max_steps)\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n normals = (normals + 1) / 2\n raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, normals, deltas, weights_sum, depth, image, normal, T_thresh)\n rays_alive = rays_alive[rays_alive >= 0]\n step += n_step\n\n # mix background color\n if bg_color is None:\n bg_color = 1\n\n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n image = image.view(*prefix, 3)\n\n if not self.training:\n normal = normal + (1 - weights_sum).unsqueeze(-1) * bg_color\n normal = normal.view(*prefix, 3)\n\n\n bg_depth = self.opt.max_depth\n depth = depth + (1 - weights_sum) * bg_depth\n if depth_scale is not None:\n depth = depth.view(*prefix, 1) * depth_scale.view(*prefix, 1)\n else:\n depth = depth.view(*prefix, 1)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n mask = (nears < fars).reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n results['mask'] = mask\n if not self.training:\n results['normal'] = normal\n\n return results\n\n\n @torch.no_grad()\n def update_extra_state(self, decay=0.95, S=128):\n # call before each epoch to update extra states.\n\n if not self.cuda_ray:\n return \n \n ### update density grid\n tmp_grid = - torch.ones_like(self.density_grid)\n \n X = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n\n for xs in X:\n for ys in Y:\n for zs in Z:\n \n # construct points\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)\n indices = raymarching.morton3D(coords).long() # [N]\n xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1]\n\n # cascading\n for cas in range(self.cascade):\n bound = min(2 ** cas, self.bound)\n half_grid_size = bound / self.grid_size\n # scale to current cascade's resolution\n cas_xyzs = xyzs * (bound - half_grid_size)\n # add noise in [-hgs, hgs]\n cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size\n # query density\n sigmas = self.density(cas_xyzs)['sigma'].reshape(-1).detach()\n # assign \n tmp_grid[cas, indices] = sigmas\n \n # ema update\n valid_mask = self.density_grid >= 0\n self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])\n self.mean_density = torch.mean(self.density_grid[valid_mask]).item()\n self.iter_density += 1\n\n # convert to bitfield\n density_thresh = min(self.mean_density, self.density_thresh)\n self.density_bitfield = raymarching.packbits(self.density_grid, density_thresh, self.density_bitfield)\n\n ### update step counter\n total_step = min(16, self.local_step)\n if total_step > 0:\n self.mean_count = int(self.step_counter[:total_step, 0].sum().item() / total_step)\n self.local_step = 0\n\n # print(f'[density grid] min={self.density_grid.min().item():.4f}, max={self.density_grid.max().item():.4f}, mean={self.mean_density:.4f}, occ_rate={(self.density_grid > density_thresh).sum() / (128**3 * self.cascade):.3f} | [step counter] mean={self.mean_count}')\n\n\n def render(self, rays_o, rays_d, depth_scale=None, staged=False, max_ray_batch=4096, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # return: pred_rgb: [B, N, 3]\n\n if self.cuda_ray:\n _run = self.run_cuda\n else:\n _run = self.run\n\n B, N = rays_o.shape[:2]\n device = rays_o.device\n\n # never stage when cuda_ray\n if staged and not self.cuda_ray:\n depth = torch.empty((B, N, 1), device=device)\n image = torch.empty((B, N, 3), device=device)\n weights_sum = torch.empty((B, N), device=device)\n\n for b in range(B):\n head = 0\n while head < N:\n tail = min(head + max_ray_batch, N)\n results_ = _run(rays_o[b:b+1, head:tail], rays_d[b:b+1, head:tail], **kwargs)\n depth[b:b+1, head:tail] = results_['depth']\n weights_sum[b:b+1, head:tail] = results_['weights_sum']\n image[b:b+1, head:tail] = results_['image']\n head += max_ray_batch\n \n results = {}\n results['depth'] = depth\n results['image'] = image\n results['weights_sum'] = weights_sum\n\n else:\n results = _run(rays_o, rays_d, depth_scale, **kwargs)\n\n return results" }, { "identifier": "get_encoder", "path": "encoding.py", "snippet": "def get_encoder(encoding, input_dim=3, \n multires=6, \n degree=4,\n num_levels=16, level_dim=2, base_resolution=16, log2_hashmap_size=19, desired_resolution=2048, align_corners=False, interpolation='linear',\n **kwargs):\n\n if encoding == 'None':\n return lambda x, **kwargs: x, input_dim\n \n elif encoding == 'frequency_torch':\n encoder = FreqEncoder_torch(input_dim=input_dim, max_freq_log2=multires-1, N_freqs=multires, log_sampling=True)\n\n elif encoding == 'frequency': # CUDA implementation, faster than torch.\n from freqencoder import FreqEncoder\n encoder = FreqEncoder(input_dim=input_dim, degree=multires)\n\n elif encoding == 'sphere_harmonics':\n from shencoder import SHEncoder\n encoder = SHEncoder(input_dim=input_dim, degree=degree)\n\n elif encoding == 'hashgrid':\n from gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='hash', align_corners=align_corners, interpolation=interpolation)\n \n elif encoding == 'tiledgrid':\n from gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='tiled', align_corners=align_corners)\n\n else:\n raise NotImplementedError('Unknown encoding mode, choose from [None, frequency, sphere_harmonics, hashgrid, tiledgrid]')\n\n return encoder, encoder.output_dim" }, { "identifier": "safe_normalize", "path": "nerf/utils.py", "snippet": "def safe_normalize(x, eps=1e-20):\n return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps, max=1e32))" } ]
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import tinycudann as tcnn from activation import trunc_exp from .renderer import NeRFRenderer from encoding import get_encoder from .utils import safe_normalize
12,249
super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x class NeRFNetwork(NeRFRenderer): def __init__(self, opt, bg_color=None, num_layers=3, hidden_dim=64, num_layers_bg=2, hidden_dim_bg=64, ): super().__init__(opt) self.num_layers = num_layers self.hidden_dim = hidden_dim self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') per_level_scale = np.exp2(np.log2(2048 * self.bound / 16) / (16 - 1)) self.encoder = tcnn.Encoding( n_input_dims=3, encoding_config={ "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": per_level_scale, }, dtype=torch.float32 ) self.sigma_net = MLP(32, 4, hidden_dim, num_layers, bias=True) if bg_color is not None: bg64 = torch.nn.functional.interpolate( bg_color, size=64, mode="bicubic", align_corners=True, ) # [1, 1, 512, 512] [80~150] bg64 = bg64.permute(0, 2, 3, 1).view(-1, 3) self.bg_color = torch.tensor(bg64, requires_grad=False, device=self.device) else: self.bg_color = torch.rand((4096, 3), requires_grad=False, device=self.device) # background network if self.bg_radius > 0: self.num_layers_bg = num_layers_bg self.hidden_dim_bg = hidden_dim_bg self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3) self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True) else: self.bg_net = None def set_device(self, device): self.encoder.to(device) self.sigma_net.to(device) def gaussian(self, x): # x: [B, N, 3] d = (x ** 2).sum(-1) g = self.opt.blob_density * torch.exp(-d / (2 * self.opt.blob_radius ** 2)) return g def common_forward(self, x): # x: [N, 3], in [-bound, bound] # sigma h = (x + self.bound) / (2 * self.bound) # to [0, 1] h = self.encoder(h) h = self.sigma_net(h) sigma = trunc_exp(h[..., 0] + self.gaussian(x)) albedo = torch.sigmoid(h[..., 1:]) return sigma, albedo # ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192 def finite_difference_normal(self, x, epsilon=1e-2): # x: [N, 3] dx_pos, _ = self.common_forward((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dx_neg, _ = self.common_forward((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dy_pos, _ = self.common_forward((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dy_neg, _ = self.common_forward((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dz_pos, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound)) dz_neg, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound)) normal = torch.stack([ 0.5 * (dx_pos - dx_neg) / epsilon, 0.5 * (dy_pos - dy_neg) / epsilon, 0.5 * (dz_pos - dz_neg) / epsilon ], dim=-1) return -normal def normal(self, x): normal = self.finite_difference_normal(x)
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x class NeRFNetwork(NeRFRenderer): def __init__(self, opt, bg_color=None, num_layers=3, hidden_dim=64, num_layers_bg=2, hidden_dim_bg=64, ): super().__init__(opt) self.num_layers = num_layers self.hidden_dim = hidden_dim self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') per_level_scale = np.exp2(np.log2(2048 * self.bound / 16) / (16 - 1)) self.encoder = tcnn.Encoding( n_input_dims=3, encoding_config={ "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": per_level_scale, }, dtype=torch.float32 ) self.sigma_net = MLP(32, 4, hidden_dim, num_layers, bias=True) if bg_color is not None: bg64 = torch.nn.functional.interpolate( bg_color, size=64, mode="bicubic", align_corners=True, ) # [1, 1, 512, 512] [80~150] bg64 = bg64.permute(0, 2, 3, 1).view(-1, 3) self.bg_color = torch.tensor(bg64, requires_grad=False, device=self.device) else: self.bg_color = torch.rand((4096, 3), requires_grad=False, device=self.device) # background network if self.bg_radius > 0: self.num_layers_bg = num_layers_bg self.hidden_dim_bg = hidden_dim_bg self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3) self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True) else: self.bg_net = None def set_device(self, device): self.encoder.to(device) self.sigma_net.to(device) def gaussian(self, x): # x: [B, N, 3] d = (x ** 2).sum(-1) g = self.opt.blob_density * torch.exp(-d / (2 * self.opt.blob_radius ** 2)) return g def common_forward(self, x): # x: [N, 3], in [-bound, bound] # sigma h = (x + self.bound) / (2 * self.bound) # to [0, 1] h = self.encoder(h) h = self.sigma_net(h) sigma = trunc_exp(h[..., 0] + self.gaussian(x)) albedo = torch.sigmoid(h[..., 1:]) return sigma, albedo # ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192 def finite_difference_normal(self, x, epsilon=1e-2): # x: [N, 3] dx_pos, _ = self.common_forward((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dx_neg, _ = self.common_forward((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dy_pos, _ = self.common_forward((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dy_neg, _ = self.common_forward((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dz_pos, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound)) dz_neg, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound)) normal = torch.stack([ 0.5 * (dx_pos - dx_neg) / epsilon, 0.5 * (dy_pos - dy_neg) / epsilon, 0.5 * (dz_pos - dz_neg) / epsilon ], dim=-1) return -normal def normal(self, x): normal = self.finite_difference_normal(x)
normal = safe_normalize(normal)
3
2023-12-14 11:03:35+00:00
16k
mkang315/ASF-YOLO
segment/predict.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # ONNX Runtime: *.onnx\n # ONNX OpenCV DNN: *.onnx --dnn\n # OpenVINO: *_openvino_model\n # CoreML: *.mlmodel\n # TensorRT: *.engine\n # TensorFlow SavedModel: *_saved_model\n # TensorFlow GraphDef: *.pb\n # TensorFlow Lite: *.tflite\n # TensorFlow Edge TPU: *_edgetpu.tflite\n # PaddlePaddle: *_paddle_model\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)\n fp16 &= pt or jit or onnx or engine # FP16\n nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)\n stride = 32 # default stride\n cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA\n if not (pt or triton):\n w = attempt_download(w) # download if not local\n\n if pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)\n stride = max(int(model.stride.max()), 32) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n model.half() if fp16 else model.float()\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files, map_location=device)\n model.half() if fp16 else model.float()\n if extra_files['config.txt']: # load metadata dict\n d = json.loads(extra_files['config.txt'],\n object_hook=lambda d: {int(k) if k.isdigit() else k: v\n for k, v in d.items()})\n stride, names = int(d['stride']), d['names']\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements('opencv-python>=4.5.4')\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n output_names = [x.name for x in session.get_outputs()]\n meta = session.get_modelmeta().custom_metadata_map # metadata\n if 'stride' in meta:\n stride, names = int(meta['stride']), eval(meta['names'])\n elif xml: # OpenVINO\n LOGGER.info(f'Loading {w} for OpenVINO inference...')\n check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/\n from openvino.runtime import Core, Layout, get_batch\n ie = Core()\n if not Path(w).is_file(): # if not *.xml\n w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir\n network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))\n if network.get_parameters()[0].get_layout().empty:\n network.get_parameters()[0].set_layout(Layout(\"NCHW\"))\n batch_dim = get_batch(network)\n if batch_dim.is_static:\n batch_size = batch_dim.get_length()\n executable_network = ie.compile_model(network, device_name=\"CPU\") # device_name=\"MYRIAD\" for Intel NCS2\n stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0\n if device.type == 'cpu':\n device = torch.device('cuda:0')\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n context = model.create_execution_context()\n bindings = OrderedDict()\n output_names = []\n fp16 = False # default updated below\n dynamic = False\n for i in range(model.num_bindings):\n name = model.get_binding_name(i)\n dtype = trt.nptype(model.get_binding_dtype(i))\n if model.binding_is_input(i):\n if -1 in tuple(model.get_binding_shape(i)): # dynamic\n dynamic = True\n context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))\n if dtype == np.float16:\n fp16 = True\n else: # output\n output_names.append(name)\n shape = tuple(context.get_binding_shape(i))\n im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)\n bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif saved_model: # TF SavedModel\n LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')\n import tensorflow as tf\n keras = False # assume TF1 saved_model\n model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)\n elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), []) # wrapped\n ge = x.graph.as_graph_element\n return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))\n\n def gd_outputs(gd):\n name_list, input_list = [], []\n for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef\n name_list.append(node.name)\n input_list.extend(node.input)\n return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))\n\n gd = tf.Graph().as_graph_def() # TF GraphDef\n with open(w, 'rb') as f:\n gd.ParseFromString(f.read())\n frozen_func = wrap_frozen_graph(gd, inputs=\"x:0\", outputs=gd_outputs(gd))\n elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu\n from tflite_runtime.interpreter import Interpreter, load_delegate\n except ImportError:\n import tensorflow as tf\n Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,\n if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n delegate = {\n 'Linux': 'libedgetpu.so.1',\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])\n else: # TFLite\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n interpreter = Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n # load metadata\n with contextlib.suppress(zipfile.BadZipFile):\n with zipfile.ZipFile(w, \"r\") as model:\n meta_file = model.namelist()[0]\n meta = ast.literal_eval(model.read(meta_file).decode(\"utf-8\"))\n stride, names = int(meta['stride']), meta['names']\n elif tfjs: # TF.js\n raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')\n elif paddle: # PaddlePaddle\n LOGGER.info(f'Loading {w} for PaddlePaddle inference...')\n check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')\n import paddle.inference as pdi\n if not Path(w).is_file(): # if not *.pdmodel\n w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir\n weights = Path(w).with_suffix('.pdiparams')\n config = pdi.Config(str(w), str(weights))\n if cuda:\n config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)\n predictor = pdi.create_predictor(config)\n input_handle = predictor.get_input_handle(predictor.get_input_names()[0])\n output_names = predictor.get_output_names()\n elif triton: # NVIDIA Triton Inference Server\n LOGGER.info(f'Using {w} as Triton Inference Server...')\n check_requirements('tritonclient[all]')\n from utils.triton import TritonRemoteModel\n model = TritonRemoteModel(url=w)\n nhwc = model.runtime.startswith(\"tensorflow\")\n else:\n raise NotImplementedError(f'ERROR: {w} is not a supported format')\n\n # class names\n if 'names' not in locals():\n names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}\n if names[0] == 'n01440764' and len(names) == 1000: # ImageNet\n names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names\n\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.fp16 and im.dtype != torch.float16:\n im = im.half() # to FP16\n if self.nhwc:\n im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)\n\n if self.pt: # PyTorch\n y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)\n elif self.jit: # TorchScript\n y = self.model(im)\n elif self.dnn: # ONNX OpenCV DNN\n im = im.cpu().numpy() # torch to numpy\n self.net.setInput(im)\n y = self.net.forward()\n elif self.onnx: # ONNX Runtime\n im = im.cpu().numpy() # torch to numpy\n y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})\n elif self.xml: # OpenVINO\n im = im.cpu().numpy() # FP32\n y = list(self.executable_network([im]).values())\n elif self.engine: # TensorRT\n if self.dynamic and im.shape != self.bindings['images'].shape:\n i = self.model.get_binding_index('images')\n self.context.set_binding_shape(i, im.shape) # reshape if dynamic\n self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)\n for name in self.output_names:\n i = self.model.get_binding_index(name)\n self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))\n s = self.bindings['images'].shape\n assert im.shape == s, f\"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}\"\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = [self.bindings[x].data for x in sorted(self.output_names)]\n elif self.coreml: # CoreML\n im = im.cpu().numpy()\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.ANTIALIAS)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n if 'confidence' in y:\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n else:\n y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)\n elif self.paddle: # PaddlePaddle\n im = im.cpu().numpy().astype(np.float32)\n self.input_handle.copy_from_cpu(im)\n self.predictor.run()\n y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]\n elif self.triton: # NVIDIA Triton Inference Server\n y = self.model(im)\n else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n im = im.cpu().numpy()\n if self.saved_model: # SavedModel\n y = self.model(im, training=False) if self.keras else self.model(im)\n elif self.pb: # GraphDef\n y = self.frozen_func(x=self.tf.constant(im))\n else: # Lite or Edge TPU\n input = self.input_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = []\n for output in self.output_details:\n x = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n x = (x.astype(np.float32) - zero_point) * scale # re-scale\n y.append(x)\n y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]\n y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels\n\n if isinstance(y, (list, tuple)):\n return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]\n else:\n return self.from_numpy(y)\n\n def from_numpy(self, x):\n return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x\n\n def warmup(self, imgsz=(1, 3, 640, 640)):\n # Warmup model by running inference once\n warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton\n if any(warmup_types) and (self.device.type != 'cpu' or self.triton):\n im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input\n for _ in range(2 if self.jit else 1): #\n self.forward(im) # warmup\n\n @staticmethod\n def _model_type(p='path/to/model.pt'):\n # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx\n # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]\n from export import export_formats\n from utils.downloads import is_url\n sf = list(export_formats().Suffix) # export suffixes\n if not is_url(p, check=False):\n check_suffix(p, sf) # checks\n url = urlparse(p) # if url may be Triton inference server\n types = [s in Path(p).name for s in sf]\n types[8] &= not types[9] # tflite &= not edgetpu\n triton = not any(types) and all([any(s in url.scheme for s in [\"http\", \"grpc\"]), url.netloc])\n return types + [triton]\n\n @staticmethod\n def _load_metadata(f=Path('path/to/meta.yaml')):\n # Load metadata from meta.yaml if it exists\n if f.exists():\n d = yaml_load(f)\n return d['stride'], d['names'] # assign stride, names\n return None, None" }, { "identifier": "IMG_FORMATS", "path": "utils/dataloaders.py", "snippet": "IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes" }, { "identifier": "VID_FORMATS", "path": "utils/dataloaders.py", "snippet": "VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes" }, { "identifier": "LoadImages", "path": "utils/dataloaders.py", "snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):\n files = []\n for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:\n p = str(Path(p).resolve())\n if '*' in p:\n files.extend(sorted(glob.glob(p, recursive=True))) # glob\n elif os.path.isdir(p):\n files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir\n elif os.path.isfile(p):\n files.append(p) # files\n else:\n raise FileNotFoundError(f'{p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n self.transforms = transforms # optional\n self.vid_stride = vid_stride # video frame-rate stride\n if any(videos):\n self._new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n for _ in range(self.vid_stride):\n self.cap.grab()\n ret_val, im0 = self.cap.retrieve()\n while not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n path = self.files[self.count]\n self._new_video(path)\n ret_val, im0 = self.cap.read()\n\n self.frame += 1\n # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False\n s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '\n\n else:\n # Read image\n self.count += 1\n im0 = cv2.imread(path) # BGR\n assert im0 is not None, f'Image Not Found {path}'\n s = f'image {self.count}/{self.nf} {path}: '\n\n if self.transforms:\n im = self.transforms(im0) # transforms\n else:\n im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize\n im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n im = np.ascontiguousarray(im) # contiguous\n\n return path, im, im0, self.cap, s\n\n def _new_video(self, path):\n # Create a new video capture object\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)\n self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees\n # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493\n\n def _cv2_rotate(self, im):\n # Rotate a cv2 video manually\n if self.orientation == 0:\n return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)\n elif self.orientation == 180:\n return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)\n elif self.orientation == 90:\n return cv2.rotate(im, cv2.ROTATE_180)\n return im\n\n def __len__(self):\n return self.nf # number of files" }, { "identifier": "LoadScreenshots", "path": "utils/dataloaders.py", "snippet": "class LoadScreenshots:\n # YOLOv5 screenshot dataloader, i.e. `python detect.py --source \"screen 0 100 100 512 256\"`\n def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):\n # source = [screen_number left top width height] (pixels)\n check_requirements('mss')\n import mss\n\n source, *params = source.split()\n self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0\n if len(params) == 1:\n self.screen = int(params[0])\n elif len(params) == 4:\n left, top, width, height = (int(x) for x in params)\n elif len(params) == 5:\n self.screen, left, top, width, height = (int(x) for x in params)\n self.img_size = img_size\n self.stride = stride\n self.transforms = transforms\n self.auto = auto\n self.mode = 'stream'\n self.frame = 0\n self.sct = mss.mss()\n\n # Parse monitor shape\n monitor = self.sct.monitors[self.screen]\n self.top = monitor[\"top\"] if top is None else (monitor[\"top\"] + top)\n self.left = monitor[\"left\"] if left is None else (monitor[\"left\"] + left)\n self.width = width or monitor[\"width\"]\n self.height = height or monitor[\"height\"]\n self.monitor = {\"left\": self.left, \"top\": self.top, \"width\": self.width, \"height\": self.height}\n\n def __iter__(self):\n return self\n\n def __next__(self):\n # mss screen capture: get raw pixels from the screen as np array\n im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR\n s = f\"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: \"\n\n if self.transforms:\n im = self.transforms(im0) # transforms\n else:\n im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize\n im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n im = np.ascontiguousarray(im) # contiguous\n self.frame += 1\n return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s" }, { "identifier": "LoadStreams", "path": "utils/dataloaders.py", "snippet": "class LoadStreams:\n # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`\n def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):\n torch.backends.cudnn.benchmark = True # faster for fixed-size inference\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n self.vid_stride = vid_stride # video frame-rate stride\n sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]\n n = len(sources)\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n st = f'{i + 1}/{n}: {s}... '\n if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video\n # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc'\n check_requirements(('pafy', 'youtube_dl==2020.12.2'))\n import pafy\n s = pafy.new(s).getbest(preftype=\"mp4\").url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n if s == 0:\n assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'\n assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'{st}Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n LOGGER.info(f\"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n self.threads[i].start()\n LOGGER.info('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n self.auto = auto and self.rect\n self.transforms = transforms # optional\n if not self.rect:\n LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap, stream):\n # Read stream `i` frames in daemon thread\n n, f = 0, self.frames[i] # frame number, frame array\n while cap.isOpened() and n < f:\n n += 1\n cap.grab() # .read() = .grab() followed by .retrieve()\n if n % self.vid_stride == 0:\n success, im = cap.retrieve()\n if success:\n self.imgs[i] = im\n else:\n LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')\n self.imgs[i] = np.zeros_like(self.imgs[i])\n cap.open(stream) # re-open stream if signal was lost\n time.sleep(0.0) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n im0 = self.imgs.copy()\n if self.transforms:\n im = np.stack([self.transforms(x) for x in im0]) # transforms\n else:\n im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize\n im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n im = np.ascontiguousarray(im) # contiguous\n\n return self.sources, im, im0, None, ''\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "FILE = Path(__file__).resolve()\nROOT = FILE.parents[1] # YOLOv5 root directory\nRANK = int(os.getenv('RANK', -1))\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads\nDATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory\nAUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode\nVERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode\nTQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format\nFONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf\nLOGGING_NAME = \"yolov5\"\nLOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)\nCONFIG_DIR = user_config_dir() # Ultralytics settings dir\ndef is_ascii(s=''):\ndef is_chinese(s='人工智能'):\ndef is_colab():\ndef is_notebook():\ndef is_kaggle():\ndef is_docker() -> bool:\ndef is_writeable(dir, test=False):\ndef set_logging(name=LOGGING_NAME, verbose=True):\ndef user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):\n def __init__(self, t=0.0):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\n def time(self):\n def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):\n def _timeout_handler(self, signum, frame):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def __init__(self, new_dir):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\ndef methods(instance):\ndef print_args(args: Optional[dict] = None, show_file=True, show_func=False):\ndef init_seeds(seed=0, deterministic=False):\ndef intersect_dicts(da, db, exclude=()):\ndef get_default_args(func):\ndef get_latest_run(search_dir='.'):\ndef file_age(path=__file__):\ndef file_date(path=__file__):\ndef file_size(path):\ndef check_online():\n def run_once():\ndef git_describe(path=ROOT): # path must be a directory\ndef check_git_status(repo='ultralytics/yolov5', branch='master'):\ndef check_git_info(path='.'):\ndef check_python(minimum='3.7.0'):\ndef check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''):\ndef check_img_size(imgsz, s=32, floor=0):\ndef check_imshow(warn=False):\ndef check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):\ndef check_yaml(file, suffix=('.yaml', '.yml')):\ndef check_file(file, suffix=''):\ndef check_font(font=FONT, progress=False):\ndef check_dataset(data, autodownload=True):\ndef check_amp(model):\n def amp_allclose(model, im):\ndef yaml_load(file='data.yaml'):\ndef yaml_save(file='data.yaml', data={}):\ndef unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):\ndef url2file(url):\ndef download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):\n def download_one(url, dir):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\ndef scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):\ndef clip_boxes(boxes, shape):\ndef clip_segments(segments, shape):\ndef my_soft_nms(bboxes, scores, iou_thresh=0.5, sigma=0.5, score_threshold=0.25):\ndef non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nm=0, # number of masks\n):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=False, sep='', mkdir=False):\ndef imread(path, flags=cv2.IMREAD_COLOR):\ndef imwrite(path, im):\ndef imshow(path, im):\nclass Profile(contextlib.ContextDecorator):\nclass Timeout(contextlib.ContextDecorator):\nclass WorkingDirectory(contextlib.ContextDecorator):" }, { "identifier": "Annotator", "path": "utils/plots.py", "snippet": "RANK = int(os.getenv('RANK', -1))\nclass Colors:\nclass Annotator:\n def __init__(self):\n def __call__(self, i, bgr=False):\n def hex2rgb(h): # rgb order (PIL)\ndef check_pil_font(font=FONT, size=10):\n def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):\n def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False):\n def rectangle(self, xy, fill=None, outline=None, width=1):\n def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'):\n def fromarray(self, im):\n def result(self):\ndef feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\ndef hist2d(x, y, n=100):\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n def butter_lowpass(cutoff, fs, order):\ndef output_to_target(output, max_det=300):\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None):\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):\ndef plot_val_txt(): # from utils.plots import *; plot_val()\ndef plot_targets_txt(): # from utils.plots import *; plot_targets_txt()\ndef plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\ndef plot_labels(labels, names=(), save_dir=Path('')):\ndef imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')):\ndef plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()\ndef plot_results(file='path/to/results.csv', dir=''):\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=''):\ndef save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True):" }, { "identifier": "masks2segments", "path": "utils/segment/general.py", "snippet": "def masks2segments(masks, strategy='largest'):\n # Convert masks(n,160,160) into segments(n,xy)\n segments = []\n for x in masks.int().cpu().numpy().astype('uint8'):\n c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]\n if c:\n if strategy == 'concat': # concatenate all segments\n c = np.concatenate([x.reshape(-1, 2) for x in c])\n elif strategy == 'largest': # select largest segment\n c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)\n else:\n c = np.zeros((0, 2)) # no segments found\n segments.append(c.astype('float32'))\n return segments" }, { "identifier": "process_mask", "path": "utils/segment/general.py", "snippet": "def process_mask(protos, masks_in, bboxes, shape, upsample=False):\n \"\"\"\n Crop before upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n ih, iw = shape\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW\n\n downsampled_bboxes = bboxes.clone()\n downsampled_bboxes[:, 0] *= mw / iw\n downsampled_bboxes[:, 2] *= mw / iw\n downsampled_bboxes[:, 3] *= mh / ih\n downsampled_bboxes[:, 1] *= mh / ih\n\n masks = crop_mask(masks, downsampled_bboxes) # CHW\n if upsample:\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n return masks.gt_(0.5)" }, { "identifier": "process_mask_native", "path": "utils/segment/general.py", "snippet": "def process_mask_native(protos, masks_in, bboxes, dst_shape):\n \"\"\"\n Crop after upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n c, mh, mw = protos.shape # CHW\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)\n gain = min(mh / dst_shape[0], mw / dst_shape[1]) # gain = old / new\n pad = (mw - dst_shape[1] * gain) / 2, (mh - dst_shape[0] * gain) / 2 # wh padding\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(mh - pad[1]), int(mw - pad[0])\n masks = masks[:, top:bottom, left:right]\n\n masks = F.interpolate(masks[None], dst_shape, mode='bilinear', align_corners=False)[0] # CHW\n masks = crop_mask(masks, bboxes) # CHW\n return masks.gt_(0.5)" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "smart_inference_mode", "path": "utils/torch_utils.py", "snippet": "def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')):\n # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator\n def decorate(fn):\n return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)\n\n return decorate" } ]
import argparse import os import platform import sys import time import torch from pathlib import Path from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode
13,746
exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference vid_stride=1, # video frame-rate stride retina_masks=False, ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader bs = 1 # batch_size if webcam: view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim # Inference with dt[1]: act = time.time() visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred, proto = model(im, augment=augment, visualize=visualize)[:2] print('time.time():',time.time()-act) # NMS with dt[2]: pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): if retina_masks: # scale bbox first the crop masks det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC else: masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size # Segments if save_txt: segments = reversed(masks2segments(masks)) segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) for x in segments] # Print results for c in det[:, 5].unique(): n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ if retina_masks else im[i] annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file segj = segments[j].reshape(-1) # (n,2) to (n*2) line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop:
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. Usage - sources: $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @smart_inference_mode() def run( weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/predict-seg', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference vid_stride=1, # video frame-rate stride retina_masks=False, ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader bs = 1 # batch_size if webcam: view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim # Inference with dt[1]: act = time.time() visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred, proto = model(im, augment=augment, visualize=visualize)[:2] print('time.time():',time.time()-act) # NMS with dt[2]: pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): if retina_masks: # scale bbox first the crop masks det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC else: masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size # Segments if save_txt: segments = reversed(masks2segments(masks)) segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) for x in segments] # Print results for c in det[:, 5].unique(): n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ if retina_masks else im[i] annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file segj = segments[j].reshape(-1) # (n,2) to (n*2) line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop:
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
7
2023-12-10 14:18:29+00:00
16k
youngskkim/CRN
models/camera_radar_net_det.py
[ { "identifier": "BaseBEVDepth", "path": "models/base_bev_depth.py", "snippet": "class BaseBEVDepth(nn.Module):\n \"\"\"Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`.\n\n Args:\n backbone_conf (dict): Config of backbone.\n head_conf (dict): Config of head.\n \"\"\"\n\n def __init__(self, backbone_conf, head_conf):\n super(BaseBEVDepth, self).__init__()\n self.backbone_img = BaseLSSFPN(**backbone_conf)\n self.head = BEVDepthHead(**head_conf)\n\n # for inference time measurement\n self.idx = 0\n self.times_dict = {\n 'img': [],\n 'img_backbone': [],\n 'img_dep': [],\n 'img_transform': [],\n 'img_pool': [],\n\n 'head': [],\n 'head_backbone': [],\n 'head_head': [],\n }\n\n def forward(self,\n sweep_imgs,\n mats_dict,\n is_train=False\n ):\n \"\"\"Forward function for BEVDepth\n\n Args:\n sweep_imgs (Tensor): Input images.\n mats_dict(dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego with shape of (B, num_sweeps,\n num_cameras, 4, 4).\n intrin_mats(Tensor): Intrinsic matrix with shape\n of (B, num_sweeps, num_cameras, 4, 4).\n ida_mats(Tensor): Transformation matrix for ida with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n bda_mat(Tensor): Rotation matrix for bda with shape\n of (B, 4, 4).\n\n Returns:\n tuple(list[dict]): Output results for tasks.\n \"\"\"\n if is_train:\n self.time = None\n\n x, depth, _ = self.backbone_img(sweep_imgs, mats_dict,\n is_return_depth=True)\n preds, _ = self.head(x)\n return preds, depth\n else:\n if self.idx < 100: # skip few iterations for warmup\n self.times = None\n elif self.idx == 100:\n self.times = self.times_dict\n\n x, self.times = self.backbone_img(sweep_imgs, mats_dict,\n times=self.times)\n preds, self.times = self.head(x, times=self.times)\n\n if self.idx == 1000:\n time_mean = {}\n for k, v in self.times.items():\n time_mean[k] = sum(v) / len(v)\n print('img: %.2f' % time_mean['img'])\n print(' img_backbone: %.2f' % time_mean['img_backbone'])\n print(' img_dep: %.2f' % time_mean['img_dep'])\n print(' img_transform: %.2f' % time_mean['img_transform'])\n print(' img_pool: %.2f' % time_mean['img_pool'])\n print('head: %.2f' % time_mean['head'])\n print(' head_backbone: %.2f' % time_mean['head_backbone'])\n print(' head_head: %.2f' % time_mean['head_head'])\n total = time_mean['img'] + time_mean['head']\n print('total: %.2f' % total)\n print(' ')\n print('FPS: %.2f' % (1000/total))\n\n self.idx += 1\n return preds\n\n def get_targets(self, gt_boxes, gt_labels):\n \"\"\"Generate training targets for a single sample.\n\n Args:\n gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n gt_labels_3d (torch.Tensor): Labels of boxes.\n\n Returns:\n tuple[list[torch.Tensor]]: Tuple of target including \\\n the following results in order.\n\n - list[torch.Tensor]: Heatmap scores.\n - list[torch.Tensor]: Ground truth boxes.\n - list[torch.Tensor]: Indexes indicating the position \\\n of the valid boxes.\n - list[torch.Tensor]: Masks indicating which boxes \\\n are valid.\n \"\"\"\n return self.head.get_targets(gt_boxes, gt_labels)\n\n def loss(self, targets, preds_dicts):\n \"\"\"Loss function for BEVDepth.\n\n Args:\n gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n truth gt boxes.\n gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n preds_dicts (dict): Output of forward function.\n\n Returns:\n dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n \"\"\"\n return self.head.loss(targets, preds_dicts)\n\n def get_bboxes(self, preds_dicts, img_metas=None, img=None, rescale=False):\n \"\"\"Generate bboxes from bbox head predictions.\n\n Args:\n preds_dicts (tuple[list[dict]]): Prediction results.\n img_metas (list[dict]): Point cloud and image's meta info.\n\n Returns:\n list[dict]: Decoded bbox, scores and labels after nms.\n \"\"\"\n return self.head.get_bboxes(preds_dicts, img_metas, img, rescale)" }, { "identifier": "RVTLSSFPN", "path": "layers/backbones/rvt_lss_fpn.py", "snippet": "class RVTLSSFPN(BaseLSSFPN):\n def __init__(self, **kwargs):\n super(RVTLSSFPN, self).__init__(**kwargs)\n\n self.register_buffer('frustum', self.create_frustum())\n self.z_bound = kwargs['z_bound']\n self.radar_view_transform = kwargs['radar_view_transform']\n self.camera_aware = kwargs['camera_aware']\n\n self.depth_net = self._configure_depth_net(kwargs['depth_net_conf'])\n self.view_aggregation_net = ViewAggregation(self.output_channels*2,\n self.output_channels*2,\n self.output_channels)\n\n def _configure_depth_net(self, depth_net_conf):\n return DepthNet(\n depth_net_conf['in_channels'],\n depth_net_conf['mid_channels'],\n self.output_channels,\n self.depth_channels,\n camera_aware=self.camera_aware\n )\n\n def get_geometry_collapsed(self, sensor2ego_mat, intrin_mat, ida_mat, bda_mat,\n z_min=-5., z_max=3.):\n batch_size, num_cams, _, _ = sensor2ego_mat.shape\n\n # undo post-transformation\n # B x N x D x H x W x 3\n points = self.frustum\n ida_mat = ida_mat.view(batch_size, num_cams, 1, 1, 1, 4, 4)\n points = ida_mat.inverse().matmul(points.unsqueeze(-1)).double()\n # cam_to_ego\n points = torch.cat(\n (points[:, :, :, :, :, :2] * points[:, :, :, :, :, 2:3],\n points[:, :, :, :, :, 2:]), 5)\n\n combine = sensor2ego_mat.matmul(torch.inverse(intrin_mat)).double()\n points = combine.view(batch_size, num_cams, 1, 1, 1, 4,\n 4).matmul(points).half()\n if bda_mat is not None:\n bda_mat = bda_mat.unsqueeze(1).repeat(1, num_cams, 1, 1).view(\n batch_size, num_cams, 1, 1, 1, 4, 4)\n points = (bda_mat @ points).squeeze(-1)\n else:\n points = points.squeeze(-1)\n\n points_out = points[:, :, :, 0:1, :, :3]\n points_valid_z = ((points[..., 2] > z_min) & (points[..., 2] < z_max))\n\n return points_out, points_valid_z\n\n def _forward_view_aggregation_net(self, img_feat_with_depth):\n # BEVConv2D [n, c, d, h, w] -> [n, h, c, w, d]\n img_feat_with_depth = img_feat_with_depth.permute(\n 0, 3, 1, 4, 2).contiguous() # [n, c, d, h, w] -> [n, h, c, w, d]\n n, h, c, w, d = img_feat_with_depth.shape\n img_feat_with_depth = img_feat_with_depth.view(-1, c, w, d)\n img_feat_with_depth = (\n self.view_aggregation_net(img_feat_with_depth).view(\n n, h, c//2, w, d).permute(0, 2, 4, 1, 3).contiguous().float())\n return img_feat_with_depth\n\n def _forward_depth_net(self, feat, mats_dict):\n return self.depth_net(feat, mats_dict)\n\n def _split_batch_cam(self, feat, inv=False, num_cams=6):\n batch_size = feat.shape[0]\n if not inv:\n return feat.reshape(batch_size // num_cams, num_cams, *feat.shape[1:])\n else:\n return feat.reshape(batch_size * num_cams, *feat.shape[2:])\n\n def _forward_single_sweep(self,\n sweep_index,\n sweep_imgs,\n mats_dict,\n pts_context,\n pts_occupancy,\n return_depth=False):\n \"\"\"Forward function for single sweep.\n\n Args:\n sweep_index (int): Index of sweeps.\n sweep_imgs (Tensor): Input images.\n mats_dict (dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego.\n intrin_mats(Tensor): Intrinsic matrix.\n ida_mats(Tensor): Transformation matrix for ida.\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera.\n bda_mat(Tensor): Rotation matrix for bda.\n ptss_context(Tensor): Input point context feature.\n ptss_occupancy(Tensor): Input point occupancy.\n return_depth (bool, optional): Whether to return depth.\n Default: False.\n\n Returns:\n Tensor: BEV feature map.\n \"\"\"\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t4 = torch.cuda.Event(enable_timing=True)\n t5 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n batch_size, num_sweeps, num_cams, num_channels, img_height, \\\n img_width = sweep_imgs.shape\n\n # extract image feature\n img_feats = self.get_cam_feats(sweep_imgs)\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['img_backbone'].append(t1.elapsed_time(t2))\n\n source_features = img_feats[:, 0, ...]\n source_features = self._split_batch_cam(source_features, inv=True, num_cams=num_cams)\n\n # predict image context feature, depth distribution\n depth_feature = self._forward_depth_net(\n source_features,\n mats_dict,\n )\n if self.times is not None:\n t3.record()\n torch.cuda.synchronize()\n self.times['img_dep'].append(t2.elapsed_time(t3))\n\n image_feature = depth_feature[:, self.depth_channels:(self.depth_channels + self.output_channels)]\n\n depth_occupancy = depth_feature[:, :self.depth_channels].softmax(\n dim=1, dtype=depth_feature.dtype)\n img_feat_with_depth = depth_occupancy.unsqueeze(1) * image_feature.unsqueeze(2)\n\n # calculate frustum grid within valid height\n geom_xyz, geom_xyz_valid = self.get_geometry_collapsed(\n mats_dict['sensor2ego_mats'][:, sweep_index, ...],\n mats_dict['intrin_mats'][:, sweep_index, ...],\n mats_dict['ida_mats'][:, sweep_index, ...],\n mats_dict.get('bda_mat', None))\n\n geom_xyz_valid = self._split_batch_cam(geom_xyz_valid, inv=True, num_cams=num_cams).unsqueeze(1)\n img_feat_with_depth = (img_feat_with_depth * geom_xyz_valid).sum(3).unsqueeze(3)\n\n if self.radar_view_transform:\n radar_occupancy = pts_occupancy.permute(0, 2, 1, 3).contiguous()\n image_feature_collapsed = (image_feature * geom_xyz_valid.max(2).values).sum(2).unsqueeze(2)\n img_feat_with_radar = radar_occupancy.unsqueeze(1) * image_feature_collapsed.unsqueeze(2)\n\n img_context = torch.cat([img_feat_with_depth, img_feat_with_radar], dim=1)\n img_context = self._forward_view_aggregation_net(img_context)\n else:\n img_context = img_feat_with_depth\n if self.times is not None:\n t4.record()\n torch.cuda.synchronize()\n self.times['img_transform'].append(t3.elapsed_time(t4))\n\n img_context = self._split_batch_cam(img_context, num_cams=num_cams)\n img_context = img_context.permute(0, 1, 3, 4, 5, 2).contiguous()\n\n pts_context = self._split_batch_cam(pts_context, num_cams=num_cams)\n pts_context = pts_context.unsqueeze(-2).permute(0, 1, 3, 4, 5, 2).contiguous()\n\n fused_context = torch.cat([img_context, pts_context], dim=-1)\n\n geom_xyz = ((geom_xyz - (self.voxel_coord - self.voxel_size / 2.0)) /\n self.voxel_size).int()\n geom_xyz[..., 2] = 0 # collapse z-axis\n geo_pos = torch.ones_like(geom_xyz)\n \n # sparse voxel pooling\n feature_map, _ = average_voxel_pooling(geom_xyz, fused_context.contiguous(), geo_pos,\n self.voxel_num.cuda())\n if self.times is not None:\n t5.record()\n torch.cuda.synchronize()\n self.times['img_pool'].append(t4.elapsed_time(t5))\n\n if return_depth:\n return feature_map.contiguous(), depth_feature[:, :self.depth_channels].softmax(1)\n return feature_map.contiguous()\n\n def forward(self,\n sweep_imgs,\n mats_dict,\n ptss_context,\n ptss_occupancy,\n times=None,\n return_depth=False):\n \"\"\"Forward function.\n\n Args:\n sweep_imgs(Tensor): Input images with shape of (B, num_sweeps,\n num_cameras, 3, H, W).\n mats_dict(dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego with shape of (B, num_sweeps,\n num_cameras, 4, 4).\n intrin_mats(Tensor): Intrinsic matrix with shape\n of (B, num_sweeps, num_cameras, 4, 4).\n ida_mats(Tensor): Transformation matrix for ida with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n bda_mat(Tensor): Rotation matrix for bda with shape\n of (B, 4, 4).\n ptss_context(Tensor): Input point context feature with shape of\n (B * num_cameras, num_sweeps, C, D, W).\n ptss_occupancy(Tensor): Input point occupancy with shape of\n (B * num_cameras, num_sweeps, 1, D, W).\n times(Dict, optional): Inference time measurement.\n is_return_depth (bool, optional): Whether to return depth.\n Default: False.\n\n Return:\n Tensor: bev feature map.\n \"\"\"\n self.times = times\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n batch_size, num_sweeps, num_cams, num_channels, img_height, \\\n img_width = sweep_imgs.shape\n key_frame_res = self._forward_single_sweep(\n 0,\n sweep_imgs[:, 0:1, ...],\n mats_dict,\n ptss_context[:, 0, ...] if ptss_context is not None else None,\n ptss_occupancy[:, 0, ...] if ptss_occupancy is not None else None,\n return_depth=return_depth)\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['img'].append(t1.elapsed_time(t2))\n\n if num_sweeps == 1:\n if return_depth:\n return key_frame_res[0].unsqueeze(1), key_frame_res[1], self.times\n else:\n return key_frame_res.unsqueeze(1), self.times\n\n key_frame_feature = key_frame_res[0] if return_depth else key_frame_res\n ret_feature_list = [key_frame_feature]\n for sweep_index in range(1, num_sweeps):\n with torch.no_grad():\n feature_map = self._forward_single_sweep(\n sweep_index,\n sweep_imgs[:, sweep_index:sweep_index + 1, ...],\n mats_dict,\n ptss_context[:, sweep_index, ...] if ptss_context is not None else None,\n ptss_occupancy[:, sweep_index, ...] if ptss_occupancy is not None else None,\n return_depth=False)\n ret_feature_list.append(feature_map)\n\n if return_depth:\n return torch.stack(ret_feature_list, 1), key_frame_res[1], self.times\n else:\n return torch.stack(ret_feature_list, 1), self.times" }, { "identifier": "PtsBackbone", "path": "layers/backbones/pts_backbone.py", "snippet": "class PtsBackbone(nn.Module):\n \"\"\"Pillar Feature Net.\n\n The network prepares the pillar features and performs forward pass\n through PFNLayers.\n\n Args:\n in_channels (int, optional): Number of input features,\n either x, y, z or x, y, z, r. Defaults to 4.\n feat_channels (tuple, optional): Number of features in each of the\n N PFNLayers. Defaults to (64, ).\n with_distance (bool, optional): Whether to include Euclidean distance\n to points. Defaults to False.\n with_cluster_center (bool, optional): [description]. Defaults to True.\n with_voxel_center (bool, optional): [description]. Defaults to True.\n voxel_size (tuple[float], optional): Size of voxels, only utilize x\n and y size. Defaults to (0.2, 0.2, 4).\n point_cloud_range (tuple[float], optional): Point cloud range, only\n utilizes x and y min. Defaults to (0, -40, -3, 70.4, 40, 1).\n norm_cfg ([type], optional): [description].\n Defaults to dict(type='BN1d', eps=1e-3, momentum=0.01).\n mode (str, optional): The mode to gather point features. Options are\n 'max' or 'avg'. Defaults to 'max'.\n legacy (bool, optional): Whether to use the new behavior or\n the original behavior. Defaults to True.\n \"\"\"\n\n def __init__(self,\n pts_voxel_layer,\n pts_voxel_encoder,\n pts_middle_encoder,\n pts_backbone,\n pts_neck,\n return_context=True,\n return_occupancy=True,\n **kwargs,\n ):\n super(PtsBackbone, self).__init__()\n\n self.pts_voxel_layer = Voxelization(**pts_voxel_layer)\n self.pts_voxel_encoder = builder.build_voxel_encoder(pts_voxel_encoder)\n self.pts_middle_encoder = builder.build_middle_encoder(pts_middle_encoder)\n self.pts_backbone = builder.build_backbone(pts_backbone)\n self.return_context = return_context\n self.return_occupancy = return_occupancy\n mid_channels = pts_backbone['out_channels'][-1]\n if pts_neck is not None:\n self.pts_neck = builder.build_neck(pts_neck)\n mid_channels = sum(pts_neck['out_channels'])\n else:\n self.pts_neck = None\n\n if self.return_context:\n if 'out_channels_pts' in kwargs:\n out_channels = kwargs['out_channels_pts']\n else:\n out_channels = 80\n self.pred_context = nn.Sequential(\n nn.Conv2d(mid_channels,\n mid_channels//2,\n kernel_size=3,\n stride=1,\n padding=1,\n padding_mode='reflect'),\n nn.BatchNorm2d(mid_channels//2),\n nn.ReLU(inplace=True),\n nn.Conv2d(mid_channels//2,\n out_channels,\n kernel_size=1,\n stride=1,\n padding=0),\n )\n\n if self.return_occupancy:\n self.pred_occupancy = nn.Sequential(\n nn.Conv2d(mid_channels,\n mid_channels//2,\n kernel_size=3,\n stride=1,\n padding=1,\n padding_mode='reflect'),\n nn.BatchNorm2d(mid_channels//2),\n nn.ReLU(inplace=True),\n nn.Conv2d(mid_channels//2,\n 1,\n kernel_size=1,\n stride=1,\n padding=0),\n )\n\n if 'occupancy_init' in kwargs:\n occupancy_init = kwargs['occupancy_init']\n else:\n occupancy_init = 0.01\n self.pred_occupancy[-1].bias.data.fill_(bias_init_with_prob(occupancy_init))\n\n def voxelize(self, points):\n \"\"\"Apply dynamic voxelization to points.\n\n Args:\n points (list[torch.Tensor]): Points of each sample.\n\n Returns:\n tuple[torch.Tensor]: Concatenated points, number of points\n per voxel, and coordinates.\n \"\"\"\n voxels, coors, num_points = [], [], []\n batch_size, _, _ = points.shape\n points_list = [points[i] for i in range(batch_size)]\n\n for res in points_list:\n res_voxels, res_coors, res_num_points = self.pts_voxel_layer(res)\n voxels.append(res_voxels)\n coors.append(res_coors)\n num_points.append(res_num_points)\n voxels = torch.cat(voxels, dim=0)\n num_points = torch.cat(num_points, dim=0)\n coors_batch = []\n for i, coor in enumerate(coors):\n coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)\n coors_batch.append(coor_pad)\n coors_batch = torch.cat(coors_batch, dim=0)\n return voxels, num_points, coors_batch\n\n def _forward_single_sweep(self, pts):\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t4 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n B, N, P, F = pts.shape\n batch_size = B * N\n pts = pts.contiguous().view(B*N, P, F)\n\n voxels, num_points, coors = self.voxelize(pts)\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['pts_voxelize'].append(t1.elapsed_time(t2))\n\n voxel_features = self.pts_voxel_encoder(voxels, num_points, coors)\n x = self.pts_middle_encoder(voxel_features, coors, batch_size)\n x = self.pts_backbone(x)\n if self.pts_neck is not None:\n x = self.pts_neck(x)\n\n if self.times is not None:\n t3.record()\n torch.cuda.synchronize()\n self.times['pts_backbone'].append(t2.elapsed_time(t3))\n\n x_context = None\n x_occupancy = None\n if self.return_context:\n x_context = self.pred_context(x[-1]).unsqueeze(1)\n if self.return_occupancy:\n x_occupancy = self.pred_occupancy(x[-1]).unsqueeze(1).sigmoid()\n\n if self.times is not None:\n t4.record()\n torch.cuda.synchronize()\n self.times['pts_head'].append(t3.elapsed_time(t4))\n\n return x_context, x_occupancy\n\n def forward(self, ptss, times=None):\n self.times = times\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n batch_size, num_sweeps, num_cams, _, _ = ptss.shape\n\n key_context, key_occupancy = self._forward_single_sweep(ptss[:, 0, ...])\n \n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['pts'].append(t1.elapsed_time(t2))\n\n if num_sweeps == 1:\n return key_context, key_occupancy, self.times\n\n context_list = [key_context]\n occupancy_list = [key_occupancy]\n for sweep_index in range(1, num_sweeps):\n with torch.no_grad():\n context, occupancy = self._forward_single_sweep(ptss[:, sweep_index, ...])\n context_list.append(context)\n occupancy_list.append(occupancy)\n\n ret_context = None\n ret_occupancy = None\n if self.return_context:\n ret_context = torch.cat(context_list, 1)\n if self.return_occupancy:\n ret_occupancy = torch.cat(occupancy_list, 1)\n return ret_context, ret_occupancy, self.times" }, { "identifier": "MFAFuser", "path": "layers/fuser/multimodal_feature_aggregation.py", "snippet": "class MFAFuser(nn.Module):\n def __init__(self, num_sweeps=4, img_dims=80, pts_dims=128, embed_dims=256,\n num_layers=6, num_heads=4, bev_shape=(128, 128)):\n super(MFAFuser, self).__init__()\n\n self.num_modalities = 2\n self.use_cams_embeds = False\n\n self.num_heads = num_heads\n\n self.img_dims = img_dims\n self.pts_dims = pts_dims\n self.embed_dims = embed_dims\n _pos_dim_ = self.embed_dims//2\n _ffn_dim_ = self.embed_dims*2\n\n self.norm_img = build_norm_layer(dict(type='LN'), img_dims)[1]\n self.norm_pts = build_norm_layer(dict(type='LN'), pts_dims)[1]\n self.input_proj = nn.Linear(img_dims + pts_dims, self.embed_dims)\n\n self.bev_h, self.bev_w = bev_shape\n\n self.positional_encoding = build_positional_encoding(\n dict(\n type='LearnedPositionalEncoding',\n num_feats=_pos_dim_,\n row_num_embed=self.bev_h,\n col_num_embed=self.bev_w,\n ),\n )\n self.register_buffer('ref_2d', self.get_reference_points(self.bev_h, self.bev_w))\n\n ffn_cfgs = dict(\n type='FFN',\n embed_dims=self.embed_dims,\n feedforward_channels=_ffn_dim_,\n num_fcs=2,\n ffn_drop=0.1,\n act_cfg=dict(type='ReLU', inplace=True),\n )\n norm_cfgs = dict(type='LN')\n\n self.ffn_layers = ModuleList()\n for _ in range(num_layers):\n self.ffn_layers.append(\n build_feedforward_network(ffn_cfgs)\n )\n self.norm_layers1 = ModuleList()\n for _ in range(num_layers):\n self.norm_layers1.append(\n build_norm_layer(norm_cfgs, self.embed_dims)[1],\n )\n self.norm_layers2 = ModuleList()\n for _ in range(num_layers):\n self.norm_layers2.append(\n build_norm_layer(norm_cfgs, self.embed_dims)[1],\n )\n self.attn_layers = ModuleList()\n for _ in range(num_layers):\n self.attn_layers.append(\n DeformableCrossAttention(\n img_dims=self.img_dims,\n pts_dims=self.pts_dims,\n embed_dims=self.embed_dims,\n num_heads=self.num_heads,\n num_modalities=self.num_modalities,\n num_points=4\n ),\n )\n\n self.reduce_conv = nn.Sequential(\n nn.Conv2d(embed_dims*num_sweeps,\n embed_dims,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False),\n nn.BatchNorm2d(embed_dims),\n nn.ReLU(inplace=True),\n )\n\n self.init_weights()\n\n def init_weights(self):\n \"\"\"Initialize the transformer weights.\"\"\"\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n for m in self.modules():\n if isinstance(m, DeformableCrossAttention):\n try:\n m.init_weight()\n except AttributeError:\n m.init_weights()\n\n @staticmethod\n def get_reference_points(H, W, dtype=torch.float):\n \"\"\"Get the reference points used in SCA and TSA.\n Args:\n H, W: spatial shape of bev.\n Z: hight of pillar.\n D: sample D points uniformly from each pillar.\n device (obj:`device`): The device where\n reference_points should be.\n Returns:\n Tensor: reference points used in decoder, has \\\n shape (bs, num_keys, num_levels, 2).\n \"\"\"\n ref_y, ref_x = torch.meshgrid(\n torch.linspace(\n 0.5, H - 0.5, H, dtype=dtype),\n torch.linspace(\n 0.5, W - 0.5, W, dtype=dtype)\n )\n ref_y = ref_y.reshape(-1)[None] / H\n ref_x = ref_x.reshape(-1)[None] / W\n ref_2d = torch.stack((ref_x, ref_y), -1)\n ref_2d = ref_2d.unsqueeze(2).unsqueeze(3)\n return ref_2d\n\n @auto_fp16(apply_to=('feat_img', 'feat_pts'))\n def _forward_single_sweep(self, feat_img, feat_pts):\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t4 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n bs = feat_img.shape[0]\n ref_2d_stack = self.ref_2d.repeat(bs, 1, 1, self.num_modalities, 1)\n\n feat_img = self.norm_img(feat_img.permute(0, 2, 3, 1).contiguous()).permute(0, 3, 1, 2).contiguous()\n feat_pts = self.norm_pts(feat_pts.permute(0, 2, 3, 1).contiguous()).permute(0, 3, 1, 2).contiguous()\n\n feat_flatten = []\n spatial_shapes = []\n for feat in [feat_img, feat_pts]:\n _, _, h, w = feat.shape\n spatial_shape = (h, w)\n feat = feat.flatten(2).permute(0, 2, 1).contiguous() # [bs, num_cam, c, dw] -> [num_cam, bs, dw, c]\n spatial_shapes.append(spatial_shape)\n feat_flatten.append(feat)\n\n spatial_shapes = torch.as_tensor(\n spatial_shapes, dtype=torch.long, device=feat_img.device)\n level_start_index = torch.cat((spatial_shapes.new_zeros(\n (1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))\n\n bev_queries = torch.cat(feat_flatten, -1)\n bev_queries = self.input_proj(bev_queries)\n\n bev_mask = torch.zeros((bs, self.bev_h, self.bev_w),\n device=bev_queries.device).to(feat_img.dtype)\n bev_pos = self.positional_encoding(bev_mask).to(feat_img.dtype)\n bev_pos = bev_pos.flatten(2).permute(0, 2, 1).contiguous()\n\n feat_img = feat_flatten[0]\n feat_pts = feat_flatten[1]\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['fusion_pre'].append(t1.elapsed_time(t2))\n\n for attn_layer, ffn_layer, norm_layer1, norm_layer2 in \\\n zip(self.attn_layers, self.ffn_layers, self.norm_layers1, self.norm_layers2):\n # post norm\n bev_queries = attn_layer(\n bev_queries,\n feat_img,\n feat_pts,\n identity=None,\n query_pos=bev_pos,\n reference_points=ref_2d_stack,\n spatial_shapes=spatial_shapes,\n level_start_index=level_start_index,\n )\n bev_queries = norm_layer1(bev_queries)\n bev_queries = ffn_layer(bev_queries, identity=None)\n bev_queries = norm_layer2(bev_queries)\n if self.times is not None:\n t3.record()\n torch.cuda.synchronize()\n self.times['fusion_layer'].append(t2.elapsed_time(t3))\n\n output = bev_queries.permute(0, 2, 1).contiguous().reshape(bs, self.embed_dims, h, w)\n if self.times is not None:\n t4.record()\n torch.cuda.synchronize()\n self.times['fusion_post'].append(t3.elapsed_time(t4))\n\n return output\n\n def forward(self, feats, times=None):\n self.times = times\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n num_sweeps = feats.shape[1]\n key_frame_res = self._forward_single_sweep(\n feats[:, 0, :self.img_dims],\n feats[:, 0, self.img_dims:self.img_dims+self.pts_dims]\n )\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['fusion'].append(t1.elapsed_time(t2))\n\n if num_sweeps == 1:\n return key_frame_res, self.times\n\n ret_feature_list = [key_frame_res]\n for sweep_index in range(1, num_sweeps):\n with torch.no_grad():\n feature_map = self._forward_single_sweep(\n feats[:, sweep_index, :self.img_dims],\n feats[:, sweep_index, self.img_dims:self.img_dims+self.pts_dims])\n ret_feature_list.append(feature_map)\n\n return self.reduce_conv(torch.cat(ret_feature_list, 1)).float(), self.times" }, { "identifier": "BEVDepthHead", "path": "layers/heads/bev_depth_head_det.py", "snippet": "class BEVDepthHead(CenterHead):\n \"\"\"Head for BevDepth.\n\n Args:\n in_channels(int): Number of channels after bev_neck.\n tasks(dict): Tasks for head.\n bbox_coder(dict): Config of bbox coder.\n common_heads(dict): Config of head for each task.\n loss_cls(dict): Config of classification loss.\n loss_bbox(dict): Config of regression loss.\n gaussian_overlap(float): Gaussian overlap used for `get_targets`.\n min_radius(int): Min radius used for `get_targets`.\n train_cfg(dict): Config used in the training process.\n test_cfg(dict): Config used in the test process.\n bev_backbone_conf(dict): Cnfig of bev_backbone.\n bev_neck_conf(dict): Cnfig of bev_neck.\n \"\"\"\n def __init__(\n self,\n in_channels=256,\n tasks=None,\n bbox_coder=None,\n common_heads=dict(),\n loss_cls=dict(type='GaussianFocalLoss', reduction='mean'),\n loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25),\n gaussian_overlap=0.1,\n min_radius=2,\n train_cfg=None,\n test_cfg=None,\n bev_backbone_conf=bev_backbone_conf,\n bev_neck_conf=bev_neck_conf,\n separate_head=dict(type='SeparateHead',\n init_bias=-2.19,\n final_kernel=3),\n ):\n super(BEVDepthHead, self).__init__(\n in_channels=in_channels,\n tasks=tasks,\n bbox_coder=bbox_coder,\n common_heads=common_heads,\n loss_cls=loss_cls,\n loss_bbox=loss_bbox,\n separate_head=separate_head,\n )\n self.trunk = build_backbone(bev_backbone_conf)\n self.trunk.init_weights()\n self.neck = build_neck(bev_neck_conf)\n self.neck.init_weights()\n del self.trunk.maxpool\n self.gaussian_overlap = gaussian_overlap\n self.min_radius = min_radius\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n @autocast(False)\n def forward(self, x, times=None):\n \"\"\"Forward pass.\n\n Args:\n x (list[torch.Tensor]): Multi-level features, e.g.,\n features produced by FPN.\n\n Returns:\n tuple(list[dict]): Output results for tasks.\n \"\"\"\n if times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n # FPN\n trunk_outs = [x]\n if self.trunk.deep_stem:\n x = self.trunk.stem(x)\n else:\n x = self.trunk.conv1(x)\n x = self.trunk.norm1(x)\n x = self.trunk.relu(x)\n for i, layer_name in enumerate(self.trunk.res_layers):\n res_layer = getattr(self.trunk, layer_name)\n x = res_layer(x)\n if i in self.trunk.out_indices:\n trunk_outs.append(x)\n fpn_output = self.neck(trunk_outs)\n\n if times is not None:\n t2.record()\n torch.cuda.synchronize()\n times['head_backbone'].append(t1.elapsed_time(t2))\n\n ret_values = super().forward(fpn_output)\n\n if times is not None:\n t3.record()\n torch.cuda.synchronize()\n times['head_head'].append(t2.elapsed_time(t3))\n times['head'].append(t1.elapsed_time(t3))\n\n return ret_values, times\n\n def get_targets_single(self, gt_bboxes_3d, gt_labels_3d):\n \"\"\"Generate training targets for a single sample.\n\n Args:\n gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n gt_labels_3d (torch.Tensor): Labels of boxes.\n\n Returns:\n tuple[list[torch.Tensor]]: Tuple of target including \\\n the following results in order.\n\n - list[torch.Tensor]: Heatmap scores.\n - list[torch.Tensor]: Ground truth boxes.\n - list[torch.Tensor]: Indexes indicating the position \\\n of the valid boxes.\n - list[torch.Tensor]: Masks indicating which boxes \\\n are valid.\n \"\"\"\n max_objs = self.train_cfg['max_objs'] * self.train_cfg['dense_reg']\n grid_size = torch.tensor(self.train_cfg['grid_size'])\n pc_range = torch.tensor(self.train_cfg['point_cloud_range'])\n voxel_size = torch.tensor(self.train_cfg['voxel_size'])\n\n feature_map_size = grid_size[:2] // self.train_cfg['out_size_factor']\n\n # reorganize the gt_dict by tasks\n task_masks = []\n flag = 0\n for class_name in self.class_names:\n task_masks.append([\n torch.where(gt_labels_3d == class_name.index(i) + flag)\n for i in class_name\n ])\n flag += len(class_name)\n\n task_boxes = []\n task_classes = []\n flag2 = 0\n for idx, mask in enumerate(task_masks):\n task_box = []\n task_class = []\n for m in mask:\n task_box.append(gt_bboxes_3d[m])\n # 0 is background for each task, so we need to add 1 here.\n task_class.append(gt_labels_3d[m] + 1 - flag2)\n task_boxes.append(\n torch.cat(task_box, axis=0).to(gt_bboxes_3d.device))\n task_classes.append(\n torch.cat(task_class).long().to(gt_bboxes_3d.device))\n flag2 += len(mask)\n draw_gaussian = draw_heatmap_gaussian\n heatmaps, anno_boxes, inds, masks = [], [], [], []\n\n for idx, task_head in enumerate(self.task_heads):\n heatmap = gt_bboxes_3d.new_zeros(\n (len(self.class_names[idx]), feature_map_size[1],\n feature_map_size[0]),\n device='cuda')\n\n anno_box = gt_bboxes_3d.new_zeros((max_objs, 10),\n dtype=torch.float32,\n device='cuda')\n\n ind = gt_labels_3d.new_zeros((max_objs),\n dtype=torch.int64,\n device='cuda')\n mask = gt_bboxes_3d.new_zeros((max_objs),\n dtype=torch.uint8,\n device='cuda')\n\n num_objs = min(task_boxes[idx].shape[0], max_objs)\n\n for k in range(num_objs):\n cls_id = task_classes[idx][k] - 1\n\n width = task_boxes[idx][k][3]\n length = task_boxes[idx][k][4]\n width = width / voxel_size[0] / self.train_cfg[\n 'out_size_factor']\n length = length / voxel_size[1] / self.train_cfg[\n 'out_size_factor']\n\n if width > 0 and length > 0:\n radius = gaussian_radius(\n (length, width),\n min_overlap=self.train_cfg['gaussian_overlap'])\n radius = max(self.train_cfg['min_radius'], int(radius))\n\n # be really careful for the coordinate system of\n # your box annotation.\n x, y, z = task_boxes[idx][k][0], task_boxes[idx][k][\n 1], task_boxes[idx][k][2]\n\n coor_x = (\n x - pc_range[0]\n ) / voxel_size[0] / self.train_cfg['out_size_factor']\n coor_y = (\n y - pc_range[1]\n ) / voxel_size[1] / self.train_cfg['out_size_factor']\n\n center = torch.tensor([coor_x, coor_y],\n dtype=torch.float32,\n device='cuda')\n center_int = center.to(torch.int32)\n\n # throw out not in range objects to avoid out of array\n # area when creating the heatmap\n if not (0 <= center_int[0] < feature_map_size[0]\n and 0 <= center_int[1] < feature_map_size[1]):\n continue\n\n draw_gaussian(heatmap[cls_id], center_int, radius)\n\n new_idx = k\n x, y = center_int[0], center_int[1]\n\n assert y * feature_map_size[0] + x < feature_map_size[\n 0] * feature_map_size[1]\n\n ind[new_idx] = y * feature_map_size[0] + x\n mask[new_idx] = 1\n\n vx, vy = task_boxes[idx][k][7:]\n rot = task_boxes[idx][k][6]\n box_dim = task_boxes[idx][k][3:6]\n if self.norm_bbox:\n box_dim = box_dim.log()\n anno_box[new_idx] = torch.cat([\n center - torch.tensor([x, y], device='cuda'),\n z.unsqueeze(0),\n box_dim,\n torch.sin(rot).unsqueeze(0),\n torch.cos(rot).unsqueeze(0),\n vx.unsqueeze(0),\n vy.unsqueeze(0),\n ])\n\n heatmaps.append(heatmap)\n anno_boxes.append(anno_box)\n masks.append(mask)\n inds.append(ind)\n return heatmaps, anno_boxes, inds, masks\n\n def loss(self, targets, preds_dicts, **kwargs):\n \"\"\"Loss function for BEVDepthHead.\n\n Args:\n gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n truth gt boxes.\n gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n preds_dicts (dict): Output of forward function.\n\n Returns:\n dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n \"\"\"\n heatmaps, anno_boxes, inds, masks = targets\n return_loss = 0\n return_loss_heatmap, return_loss_bbox = 0, 0\n for task_id, preds_dict in enumerate(preds_dicts):\n # heatmap focal loss\n preds_dict[0]['heatmap'] = clip_sigmoid(preds_dict[0]['heatmap'])\n num_pos = heatmaps[task_id].eq(1).float().sum().item()\n cls_avg_factor = torch.clamp(reduce_mean(\n heatmaps[task_id].new_tensor(num_pos)),\n min=1).item()\n loss_heatmap = self.loss_cls(preds_dict[0]['heatmap'],\n heatmaps[task_id],\n avg_factor=cls_avg_factor)\n target_box = anno_boxes[task_id]\n # reconstruct the anno_box from multiple reg heads\n preds_dict[0]['anno_box'] = torch.cat(\n (\n preds_dict[0]['reg'],\n preds_dict[0]['height'],\n preds_dict[0]['dim'],\n preds_dict[0]['rot'],\n preds_dict[0]['vel'],\n ),\n dim=1,\n )\n\n # Regression loss for dimension, offset, height, rotation\n num = masks[task_id].float().sum()\n ind = inds[task_id]\n pred = preds_dict[0]['anno_box'].permute(0, 2, 3, 1).contiguous()\n pred = pred.view(pred.size(0), -1, pred.size(3))\n pred = self._gather_feat(pred, ind)\n mask = masks[task_id].unsqueeze(2).expand_as(target_box).float()\n num = torch.clamp(reduce_mean(target_box.new_tensor(num)),\n min=1e-4).item()\n isnotnan = (~torch.isnan(target_box)).float()\n mask *= isnotnan\n code_weights = self.train_cfg['code_weights']\n bbox_weights = mask * mask.new_tensor(code_weights)\n loss_bbox = self.loss_bbox(pred,\n target_box,\n bbox_weights,\n avg_factor=num)\n return_loss += loss_bbox\n return_loss += loss_heatmap\n return_loss_bbox += loss_bbox\n return_loss_heatmap += loss_heatmap\n return return_loss, return_loss_heatmap, return_loss_bbox" } ]
import mmcv from models.base_bev_depth import BaseBEVDepth from layers.backbones.rvt_lss_fpn import RVTLSSFPN from layers.backbones.pts_backbone import PtsBackbone from layers.fuser.multimodal_feature_aggregation import MFAFuser from layers.heads.bev_depth_head_det import BEVDepthHead
11,809
logger = mmcv.utils.get_logger('mmdet') logger.setLevel('WARNING') __all__ = ['CameraRadarNetDet'] class CameraRadarNetDet(BaseBEVDepth): """Source code of `CRN`, `https://arxiv.org/abs/2304.00670`. Args: backbone_img_conf (dict): Config of image backbone. backbone_pts_conf (dict): Config of point backbone. fuser_conf (dict): Config of BEV feature fuser. head_conf (dict): Config of head. """ def __init__(self, backbone_img_conf, backbone_pts_conf, fuser_conf, head_conf): super(BaseBEVDepth, self).__init__() self.backbone_img = RVTLSSFPN(**backbone_img_conf)
logger = mmcv.utils.get_logger('mmdet') logger.setLevel('WARNING') __all__ = ['CameraRadarNetDet'] class CameraRadarNetDet(BaseBEVDepth): """Source code of `CRN`, `https://arxiv.org/abs/2304.00670`. Args: backbone_img_conf (dict): Config of image backbone. backbone_pts_conf (dict): Config of point backbone. fuser_conf (dict): Config of BEV feature fuser. head_conf (dict): Config of head. """ def __init__(self, backbone_img_conf, backbone_pts_conf, fuser_conf, head_conf): super(BaseBEVDepth, self).__init__() self.backbone_img = RVTLSSFPN(**backbone_img_conf)
self.backbone_pts = PtsBackbone(**backbone_pts_conf)
2
2023-12-06 14:57:49+00:00
16k
LIU-Yuxin/SyncMVD
src/pipeline.py
[ { "identifier": "UVProjection", "path": "src/renderer/project.py", "snippet": "class UVProjection():\n\tdef __init__(self, texture_size=96, render_size=64, sampling_mode=\"nearest\", channels=3, device=None):\n\t\tself.channels = channels\n\t\tself.device = device or torch.device(\"cpu\")\n\t\tself.lights = AmbientLights(ambient_color=((1.0,)*channels,), device=self.device)\n\t\tself.target_size = (texture_size,texture_size)\n\t\tself.render_size = render_size\n\t\tself.sampling_mode = sampling_mode\n\n\n\t# Load obj mesh, rescale the mesh to fit into the bounding box\n\tdef load_mesh(self, mesh_path, scale_factor=2.0, auto_center=True, autouv=False):\n\t\tmesh = load_objs_as_meshes([mesh_path], device=self.device)\n\t\tif auto_center:\n\t\t\tverts = mesh.verts_packed()\n\t\t\tmax_bb = (verts - 0).max(0)[0]\n\t\t\tmin_bb = (verts - 0).min(0)[0]\n\t\t\tscale = (max_bb - min_bb).max()/2\n\t\t\tcenter = (max_bb+min_bb) /2\n\t\t\tmesh.offset_verts_(-center)\n\t\t\tmesh.scale_verts_((scale_factor / float(scale)))\t\t\n\t\telse:\n\t\t\tmesh.scale_verts_((scale_factor))\n\n\t\tif autouv or (mesh.textures is None):\n\t\t\tmesh = self.uv_unwrap(mesh)\n\t\tself.mesh = mesh\n\n\n\tdef load_glb_mesh(self, mesh_path, scale_factor=2.0, auto_center=True, autouv=False):\n\t\tfrom pytorch3d.io.experimental_gltf_io import MeshGlbFormat\n\t\tio = IO()\n\t\tio.register_meshes_format(MeshGlbFormat())\n\t\twith open(mesh_path, \"rb\") as f:\n\t\t\tmesh = io.load_mesh(f, include_textures=True, device=self.device)\n\t\tif auto_center:\n\t\t\tverts = mesh.verts_packed()\n\t\t\tmax_bb = (verts - 0).max(0)[0]\n\t\t\tmin_bb = (verts - 0).min(0)[0]\n\t\t\tscale = (max_bb - min_bb).max()/2 \n\t\t\tcenter = (max_bb+min_bb) /2\n\t\t\tmesh.offset_verts_(-center)\n\t\t\tmesh.scale_verts_((scale_factor / float(scale)))\n\t\telse:\n\t\t\tmesh.scale_verts_((scale_factor))\n\t\tif autouv or (mesh.textures is None):\n\t\t\tmesh = self.uv_unwrap(mesh)\n\t\tself.mesh = mesh\n\n\n\t# Save obj mesh\n\tdef save_mesh(self, mesh_path, texture):\n\t\tsave_obj(mesh_path, \n\t\t\t\tself.mesh.verts_list()[0],\n\t\t\t\tself.mesh.faces_list()[0],\n\t\t\t\tverts_uvs= self.mesh.textures.verts_uvs_list()[0],\n\t\t\t\tfaces_uvs= self.mesh.textures.faces_uvs_list()[0],\n\t\t\t\ttexture_map=texture)\n\n\t# Code referred to TEXTure code (https://github.com/TEXTurePaper/TEXTurePaper.git)\n\tdef uv_unwrap(self, mesh):\n\t\tverts_list = mesh.verts_list()[0]\n\t\tfaces_list = mesh.faces_list()[0]\n\n\n\t\timport xatlas\n\t\timport numpy as np\n\t\tv_np = verts_list.cpu().numpy()\n\t\tf_np = faces_list.int().cpu().numpy()\n\t\tatlas = xatlas.Atlas()\n\t\tatlas.add_mesh(v_np, f_np)\n\t\tchart_options = xatlas.ChartOptions()\n\t\tchart_options.max_iterations = 4\n\t\tatlas.generate(chart_options=chart_options)\n\t\tvmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n\t\tvt = torch.from_numpy(vt_np.astype(np.float32)).type(verts_list.dtype).to(mesh.device)\n\t\tft = torch.from_numpy(ft_np.astype(np.int64)).type(faces_list.dtype).to(mesh.device)\n\n\t\tnew_map = torch.zeros(self.target_size+(self.channels,), device=mesh.device)\n\t\tnew_tex = TexturesUV(\n\t\t\t[new_map], \n\t\t\t[ft], \n\t\t\t[vt], \n\t\t\tsampling_mode=self.sampling_mode\n\t\t\t)\n\n\t\tmesh.textures = new_tex\n\t\treturn mesh\n\n\n\t'''\n\t\tA functions that disconnect faces in the mesh according to\n\t\tits UV seams. The number of vertices are made equal to the\n\t\tnumber of unique vertices its UV layout, while the faces list\n\t\tis intact.\n\t'''\n\tdef disconnect_faces(self):\n\t\tmesh = self.mesh\n\t\tverts_list = mesh.verts_list()\n\t\tfaces_list = mesh.faces_list()\n\t\tverts_uvs_list = mesh.textures.verts_uvs_list()\n\t\tfaces_uvs_list = mesh.textures.faces_uvs_list()\n\t\tpacked_list = [v[f] for v,f in zip(verts_list, faces_list)]\n\t\tverts_disconnect_list = [\n\t\t\ttorch.zeros(\n\t\t\t\t(verts_uvs_list[i].shape[0], 3), \n\t\t\t\tdtype=verts_list[0].dtype, \n\t\t\t\tdevice=verts_list[0].device\n\t\t\t) \n\t\t\tfor i in range(len(verts_list))]\n\t\tfor i in range(len(verts_list)):\n\t\t\tverts_disconnect_list[i][faces_uvs_list] = packed_list[i]\n\t\tassert not mesh.has_verts_normals(), \"Not implemented for vertex normals\"\n\t\tself.mesh_d = Meshes(verts_disconnect_list, faces_uvs_list, mesh.textures)\n\t\treturn self.mesh_d\n\n\n\t'''\n\t\tA function that construct a temp mesh for back-projection.\n\t\tTake a disconnected mesh and a rasterizer, the function calculates\n\t\tthe projected faces as the UV, as use its original UV with pseudo\n\t\tz value as world space geometry.\n\t'''\n\tdef construct_uv_mesh(self):\n\t\tmesh = self.mesh_d\n\t\tverts_list = mesh.verts_list()\n\t\tverts_uvs_list = mesh.textures.verts_uvs_list()\n\t\t# faces_list = [torch.flip(faces, [-1]) for faces in mesh.faces_list()]\n\t\tnew_verts_list = []\n\t\tfor i, (verts, verts_uv) in enumerate(zip(verts_list, verts_uvs_list)):\n\t\t\tverts = verts.clone()\n\t\t\tverts_uv = verts_uv.clone()\n\t\t\tverts[...,0:2] = verts_uv[...,:]\n\t\t\tverts = (verts - 0.5) * 2\n\t\t\tverts[...,2] *= 1\n\t\t\tnew_verts_list.append(verts)\n\t\ttextures_uv = mesh.textures.clone()\n\t\tself.mesh_uv = Meshes(new_verts_list, mesh.faces_list(), textures_uv)\n\t\treturn self.mesh_uv\n\n\n\t# Set texture for the current mesh.\n\tdef set_texture_map(self, texture):\n\t\tnew_map = texture.permute(1, 2, 0)\n\t\tnew_map = new_map.to(self.device)\n\t\tnew_tex = TexturesUV(\n\t\t\t[new_map], \n\t\t\tself.mesh.textures.faces_uvs_padded(), \n\t\t\tself.mesh.textures.verts_uvs_padded(), \n\t\t\tsampling_mode=self.sampling_mode\n\t\t\t)\n\t\tself.mesh.textures = new_tex\n\n\n\t# Set the initial normal noise texture\n\t# No generator here for replication of the experiment result. Add one as you wish\n\tdef set_noise_texture(self, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\tnoise_texture = torch.normal(0, 1, (channels,) + self.target_size, device=self.device)\n\t\tself.set_texture_map(noise_texture)\n\t\treturn noise_texture\n\n\n\t# Set the cameras given the camera poses and centers\n\tdef set_cameras(self, camera_poses, centers=None, camera_distance=2.7, scale=None):\n\t\telev = torch.FloatTensor([pose[0] for pose in camera_poses])\n\t\tazim = torch.FloatTensor([pose[1] for pose in camera_poses])\n\t\tR, T = look_at_view_transform(dist=camera_distance, elev=elev, azim=azim, at=centers or ((0,0,0),))\n\t\tself.cameras = FoVOrthographicCameras(device=self.device, R=R, T=T, scale_xyz=scale or ((1,1,1),))\n\n\n\t# Set all necessary internal data for rendering and texture baking\n\t# Can be used to refresh after changing camera positions\n\tdef set_cameras_and_render_settings(self, camera_poses, centers=None, camera_distance=2.7, render_size=None, scale=None):\n\t\tself.set_cameras(camera_poses, centers, camera_distance, scale=scale)\n\t\tif render_size is None:\n\t\t\trender_size = self.render_size\n\t\tif not hasattr(self, \"renderer\"):\n\t\t\tself.setup_renderer(size=render_size)\n\t\tif not hasattr(self, \"mesh_d\"):\n\t\t\tself.disconnect_faces()\n\t\tif not hasattr(self, \"mesh_uv\"):\n\t\t\tself.construct_uv_mesh()\n\t\tself.calculate_tex_gradient()\n\t\tself.calculate_visible_triangle_mask()\n\t\t_,_,_,cos_maps,_, _ = self.render_geometry()\n\t\tself.calculate_cos_angle_weights(cos_maps)\n\n\n\t# Setup renderers for rendering\n\t# max faces per bin set to 30000 to avoid overflow in many test cases.\n\t# You can use default value to let pytorch3d handle that for you.\n\tdef setup_renderer(self, size=64, blur=0.0, face_per_pix=1, perspective_correct=False, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\n\t\tself.raster_settings = RasterizationSettings(\n\t\t\timage_size=size, \n\t\t\tblur_radius=blur, \n\t\t\tfaces_per_pixel=face_per_pix,\n\t\t\tperspective_correct=perspective_correct,\n\t\t\tcull_backfaces=True,\n\t\t\tmax_faces_per_bin=30000,\n\t\t)\n\n\t\tself.renderer = MeshRenderer(\n\t\t\trasterizer=MeshRasterizer(\n\t\t\t\tcameras=self.cameras, \n\t\t\t\traster_settings=self.raster_settings,\n\n\t\t\t),\n\t\t\tshader=HardNChannelFlatShader(\n\t\t\t\tdevice=self.device, \n\t\t\t\tcameras=self.cameras,\n\t\t\t\tlights=self.lights,\n\t\t\t\tchannels=channels\n\t\t\t\t# materials=materials\n\t\t\t)\n\t\t)\n\n\n\t# Bake screen-space cosine weights to UV space\n\t# May be able to reimplement using the generic \"bake_texture\" function, but it works so leave it here for now\n\[email protected]_grad()\n\tdef calculate_cos_angle_weights(self, cos_angles, fill=True, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\tcos_maps = []\n\t\ttmp_mesh = self.mesh.clone()\n\t\tfor i in range(len(self.cameras)):\n\t\t\t\n\t\t\tzero_map = torch.zeros(self.target_size+(channels,), device=self.device, requires_grad=True)\n\t\t\toptimizer = torch.optim.SGD([zero_map], lr=1, momentum=0)\n\t\t\toptimizer.zero_grad()\n\t\t\tzero_tex = TexturesUV([zero_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\t\ttmp_mesh.textures = zero_tex\n\n\t\t\timages_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights)\n\n\t\t\tloss = torch.sum((cos_angles[i,:,:,0:1]**1 - images_predicted)**2)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\tif fill:\n\t\t\t\tzero_map = zero_map.detach() / (self.gradient_maps[i] + 1E-8)\n\t\t\t\tzero_map = voronoi_solve(zero_map, self.gradient_maps[i][...,0])\n\t\t\telse:\n\t\t\t\tzero_map = zero_map.detach() / (self.gradient_maps[i]+1E-8)\n\t\t\tcos_maps.append(zero_map)\n\t\tself.cos_maps = cos_maps\n\n\t\t\n\t# Get geometric info from fragment shader\n\t# Can be used for generating conditioning image and cosine weights\n\t# Returns some information you may not need, remember to release them for memory saving\n\[email protected]_grad()\n\tdef render_geometry(self, image_size=None):\n\t\tif image_size:\n\t\t\tsize = self.renderer.rasterizer.raster_settings.image_size\n\t\t\tself.renderer.rasterizer.raster_settings.image_size = image_size\n\t\tshader = self.renderer.shader\n\t\tself.renderer.shader = HardGeometryShader(device=self.device, cameras=self.cameras[0], lights=self.lights)\n\t\ttmp_mesh = self.mesh.clone()\n\t\t\n\t\tverts, normals, depths, cos_angles, texels, fragments = self.renderer(tmp_mesh.extend(len(self.cameras)), cameras=self.cameras, lights=self.lights)\n\t\tself.renderer.shader = shader\n\n\t\tif image_size:\n\t\t\tself.renderer.rasterizer.raster_settings.image_size = size\n\n\t\treturn verts, normals, depths, cos_angles, texels, fragments\n\n\n\t# Project world normal to view space and normalize\n\[email protected]_grad()\n\tdef decode_view_normal(self, normals):\n\t\tw2v_mat = self.cameras.get_full_projection_transform()\n\t\tnormals_view = torch.clone(normals)[:,:,:,0:3]\n\t\tnormals_view = normals_view.reshape(normals_view.shape[0], -1, 3)\n\t\tnormals_view = w2v_mat.transform_normals(normals_view)\n\t\tnormals_view = normals_view.reshape(normals.shape[0:3]+(3,))\n\t\tnormals_view[:,:,:,2] *= -1\n\t\tnormals = (normals_view[...,0:3]+1) * normals[...,3:] / 2 + torch.FloatTensor(((((0.5,0.5,1))))).to(self.device) * (1 - normals[...,3:])\n\t\t# normals = torch.cat([normal for normal in normals], dim=1)\n\t\tnormals = normals.clamp(0, 1)\n\t\treturn normals\n\n\n\t# Normalize absolute depth to inverse depth\n\[email protected]_grad()\n\tdef decode_normalized_depth(self, depths, batched_norm=False):\n\t\tview_z, mask = depths.unbind(-1)\n\t\tview_z = view_z * mask + 100 * (1-mask)\n\t\tinv_z = 1 / view_z\n\t\tinv_z_min = inv_z * mask + 100 * (1-mask)\n\t\tif not batched_norm:\n\t\t\tmax_ = torch.max(inv_z, 1, keepdim=True)\n\t\t\tmax_ = torch.max(max_[0], 2, keepdim=True)[0]\n\n\t\t\tmin_ = torch.min(inv_z_min, 1, keepdim=True)\n\t\t\tmin_ = torch.min(min_[0], 2, keepdim=True)[0]\n\t\telse:\n\t\t\tmax_ = torch.max(inv_z)\n\t\t\tmin_ = torch.min(inv_z_min)\n\t\tinv_z = (inv_z - min_) / (max_ - min_)\n\t\tinv_z = inv_z.clamp(0,1)\n\t\tinv_z = inv_z[...,None].repeat(1,1,1,3)\n\n\t\treturn inv_z\n\n\n\t# Multiple screen pixels could pass gradient to a same texel\n\t# We can precalculate this gradient strength and use it to normalize gradients when we bake textures\n\[email protected]_grad()\n\tdef calculate_tex_gradient(self, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\ttmp_mesh = self.mesh.clone()\n\t\tgradient_maps = []\n\t\tfor i in range(len(self.cameras)):\n\t\t\tzero_map = torch.zeros(self.target_size+(channels,), device=self.device, requires_grad=True)\n\t\t\toptimizer = torch.optim.SGD([zero_map], lr=1, momentum=0)\n\t\t\toptimizer.zero_grad()\n\t\t\tzero_tex = TexturesUV([zero_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\t\ttmp_mesh.textures = zero_tex\n\t\t\timages_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights)\n\t\t\tloss = torch.sum((1 - images_predicted)**2)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\tgradient_maps.append(zero_map.detach())\n\n\t\tself.gradient_maps = gradient_maps\n\n\n\t# Get the UV space masks of triangles visible in each view\n\t# First get face ids from each view, then filter pixels on UV space to generate masks\n\[email protected]_grad()\n\tdef calculate_visible_triangle_mask(self, channels=None, image_size=(512,512)):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\n\t\tpix2face_list = []\n\t\tfor i in range(len(self.cameras)):\n\t\t\tself.renderer.rasterizer.raster_settings.image_size=image_size\n\t\t\tpix2face = self.renderer.rasterizer(self.mesh_d, cameras=self.cameras[i]).pix_to_face\n\t\t\tself.renderer.rasterizer.raster_settings.image_size=self.render_size\n\t\t\tpix2face_list.append(pix2face)\n\n\t\tif not hasattr(self, \"mesh_uv\"):\n\t\t\tself.construct_uv_mesh()\n\n\t\traster_settings = RasterizationSettings(\n\t\t\timage_size=self.target_size, \n\t\t\tblur_radius=0, \n\t\t\tfaces_per_pixel=1,\n\t\t\tperspective_correct=False,\n\t\t\tcull_backfaces=False,\n\t\t\tmax_faces_per_bin=30000,\n\t\t\t)\n\n\t\tR, T = look_at_view_transform(dist=2, elev=0, azim=0)\n\t\tcameras = FoVOrthographicCameras(device=self.device, R=R, T=T)\n\n\t\trasterizer=MeshRasterizer(\n\t\t\tcameras=cameras, \n\t\t\traster_settings=raster_settings\n\t\t)\n\t\tuv_pix2face = rasterizer(self.mesh_uv).pix_to_face\n\n\t\tvisible_triangles = []\n\t\tfor i in range(len(pix2face_list)):\n\t\t\tvalid_faceid = torch.unique(pix2face_list[i])\n\t\t\tvalid_faceid = valid_faceid[1:] if valid_faceid[0]==-1 else valid_faceid\n\t\t\tmask = torch.isin(uv_pix2face[0], valid_faceid, assume_unique=False)\n\t\t\t# uv_pix2face[0][~mask] = -1\n\t\t\ttriangle_mask = torch.ones(self.target_size+(1,), device=self.device)\n\t\t\ttriangle_mask[~mask] = 0\n\t\t\t\n\t\t\ttriangle_mask[:,1:][triangle_mask[:,:-1] > 0] = 1\n\t\t\ttriangle_mask[:,:-1][triangle_mask[:,1:] > 0] = 1\n\t\t\ttriangle_mask[1:,:][triangle_mask[:-1,:] > 0] = 1\n\t\t\ttriangle_mask[:-1,:][triangle_mask[1:,:] > 0] = 1\n\t\t\tvisible_triangles.append(triangle_mask)\n\n\t\tself.visible_triangles = visible_triangles\n\n\n\n\t# Render the current mesh and texture from current cameras\n\tdef render_textured_views(self):\n\t\tmeshes = self.mesh.extend(len(self.cameras))\n\t\timages_predicted = self.renderer(meshes, cameras=self.cameras, lights=self.lights)\n\n\t\treturn [image.permute(2, 0, 1) for image in images_predicted]\n\n\n\t# Bake views into a texture\n\t# First bake into individual textures then combine based on cosine weight\n\[email protected]_grad()\n\tdef bake_texture(self, views=None, main_views=[], cos_weighted=True, channels=None, exp=None, noisy=False, generator=None):\n\t\tif not exp:\n\t\t\texp=1\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\tviews = [view.permute(1, 2, 0) for view in views]\n\n\t\ttmp_mesh = self.mesh\n\t\tbake_maps = [torch.zeros(self.target_size+(views[0].shape[2],), device=self.device, requires_grad=True) for view in views]\n\t\toptimizer = torch.optim.SGD(bake_maps, lr=1, momentum=0)\n\t\toptimizer.zero_grad()\n\t\tloss = 0\n\t\tfor i in range(len(self.cameras)): \n\t\t\tbake_tex = TexturesUV([bake_maps[i]], tmp_mesh.textures.faces_uvs_padded(), tmp_mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\t\ttmp_mesh.textures = bake_tex\n\t\t\timages_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights, device=self.device)\n\t\t\tpredicted_rgb = images_predicted[..., :-1]\n\t\t\tloss += (((predicted_rgb[...] - views[i]))**2).sum()\n\t\tloss.backward(retain_graph=False)\n\t\toptimizer.step()\n\n\t\ttotal_weights = 0\n\t\tbaked = 0\n\t\tfor i in range(len(bake_maps)):\n\t\t\tnormalized_baked_map = bake_maps[i].detach() / (self.gradient_maps[i] + 1E-8)\n\t\t\tbake_map = voronoi_solve(normalized_baked_map, self.gradient_maps[i][...,0])\n\t\t\tweight = self.visible_triangles[i] * (self.cos_maps[i]) ** exp\n\t\t\tif noisy:\n\t\t\t\tnoise = torch.rand(weight.shape[:-1]+(1,), generator=generator).type(weight.dtype).to(weight.device)\n\t\t\t\tweight *= noise\n\t\t\ttotal_weights += weight\n\t\t\tbaked += bake_map * weight\n\t\tbaked /= total_weights + 1E-8\n\t\tbaked = voronoi_solve(baked, total_weights[...,0])\n\n\t\tbake_tex = TexturesUV([baked], tmp_mesh.textures.faces_uvs_padded(), tmp_mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\ttmp_mesh.textures = bake_tex\n\t\textended_mesh = tmp_mesh.extend(len(self.cameras))\n\t\timages_predicted = self.renderer(extended_mesh, cameras=self.cameras, lights=self.lights)\n\t\tlearned_views = [image.permute(2, 0, 1) for image in images_predicted]\n\n\t\treturn learned_views, baked.permute(2, 0, 1), total_weights.permute(2, 0, 1)\n\n\n\t# Move the internel data to a specific device\n\tdef to(self, device):\n\t\tfor mesh_name in [\"mesh\", \"mesh_d\", \"mesh_uv\"]:\n\t\t\tif hasattr(self, mesh_name):\n\t\t\t\tmesh = getattr(self, mesh_name)\n\t\t\t\tsetattr(self, mesh_name, mesh.to(device))\n\t\tfor list_name in [\"visible_triangles\", \"visibility_maps\", \"cos_maps\"]:\n\t\t\tif hasattr(self, list_name):\n\t\t\t\tmap_list = getattr(self, list_name)\n\t\t\t\tfor i in range(len(map_list)):\n\t\t\t\t\tmap_list[i] = map_list[i].to(device)" }, { "identifier": "SamplewiseAttnProcessor2_0", "path": "src/syncmvd/attention.py", "snippet": "class SamplewiseAttnProcessor2_0:\n\tr\"\"\"\n\tProcessor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).\n\t\"\"\"\n\n\tdef __init__(self, custom_attention_mask=None, ref_attention_mask=None, ref_weight=0):\n\t\tif not hasattr(F, \"scaled_dot_product_attention\"):\n\t\t\traise ImportError(\"AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.\")\n\t\tself.ref_weight = ref_weight\n\t\tself.custom_attention_mask = custom_attention_mask\n\t\tself.ref_attention_mask = ref_attention_mask\n\n\tdef __call__(\n\t\tself,\n\t\tattn: Attention,\n\t\thidden_states,\n\t\tencoder_hidden_states=None,\n\t\tattention_mask=None,\n\t\ttemb=None,\n\t):\n\n\t\tresidual = hidden_states\n\n\t\tif attn.spatial_norm is not None:\n\t\t\thidden_states = attn.spatial_norm(hidden_states, temb)\n\n\t\tinput_ndim = hidden_states.ndim\n\n\n\t\tif input_ndim == 4:\n\t\t\tbatch_size, channel, height, width = hidden_states.shape\n\t\t\thidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)\n\n\t\tbatch_size, sequence_length, channels = (\n\t\t\thidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape\n\t\t)\n\n\t\tif attention_mask is not None:\n\t\t\tattention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)\n\t\t\t# scaled_dot_product_attention expects attention_mask shape to be\n\t\t\t# (batch, heads, source_length, target_length)\n\t\t\tattention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])\n\n\t\tif attn.group_norm is not None:\n\t\t\thidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)\n\n\t\tquery = attn.to_q(hidden_states)\n\n\t\tif encoder_hidden_states is None:\n\t\t\tencoder_hidden_states = torch.clone(hidden_states)\n\t\telif attn.norm_cross:\n\t\t\tencoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)\n\n\n\t\t'''\n\t\t\treshape encoder hidden state to a single batch\n\t\t'''\n\t\tencoder_hidden_states_f = encoder_hidden_states.reshape(1, -1, channels)\n\n\n\n\t\tkey = attn.to_k(encoder_hidden_states)\n\t\tvalue = attn.to_v(encoder_hidden_states)\n\n\t\tinner_dim = key.shape[-1]\n\t\thead_dim = inner_dim // attn.heads\n\n\t\tquery = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)\n\n\t\t'''\n\t\t\teach time select 1 sample from q and compute with concated kv\n\t\t\tconcat result hidden states afterwards\n\t\t'''\n\t\thidden_state_list = []\n\n\t\tfor b_idx in range(batch_size):\n\t\t\t\n\t\t\tquery_b = query[b_idx:b_idx+1]\n\n\t\t\tif self.ref_weight > 0 or True:\n\t\t\t\tkey_ref = key.clone()\n\t\t\t\tvalue_ref = value.clone()\n\n\t\t\t\tkeys = [key_ref[view_idx] for view_idx in self.ref_attention_mask]\n\t\t\t\tvalues = [value_ref[view_idx] for view_idx in self.ref_attention_mask]\n\n\t\t\t\tkey_ref = torch.stack(keys)\n\t\t\t\tkey_ref = key_ref.view(key_ref.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\t\tvalue_ref = torch.stack(values)\n\t\t\t\tvalue_ref = value_ref.view(value_ref.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\tkey_a = key.clone()\n\t\t\tvalue_a = value.clone()\n\n\t\t\t# key_a = key_a[max(0,b_idx-1):min(b_idx+1,batch_size)+1]\n\n\t\t\tkeys = [key_a[view_idx] for view_idx in self.custom_attention_mask[b_idx]]\n\t\t\tvalues = [value_a[view_idx] for view_idx in self.custom_attention_mask[b_idx]]\n\n\t\t\t# keys = (key_a[b_idx-1], key_a[b_idx], key_a[(b_idx+1)%batch_size])\n\t\t\t# values = (value_a[b_idx-1], value_a[b_idx], value_a[(b_idx+1)%batch_size])\n\t\t\t\n\t\t\t# if b_idx not in [0, batch_size-1, batch_size//2]:\n\t\t\t# \tkeys = keys + (key_a[min(batch_size-2, 2*(batch_size//2) - b_idx)],)\n\t\t\t# \tvalues = values + (value_a[min(batch_size-2, 2*(batch_size//2) - b_idx)],)\n\t\t\tkey_a = torch.stack(keys)\n\t\t\tkey_a = key_a.view(key_a.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\t# value_a = value_a[max(0,b_idx-1):min(b_idx+1,batch_size)+1]\n\t\t\tvalue_a = torch.stack(values)\n\t\t\tvalue_a = value_a.view(value_a.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\thidden_state_a = F.scaled_dot_product_attention(\n\t\t\t\tquery_b, key_a, value_a, attn_mask=None, dropout_p=0.0, is_causal=False\n\t\t\t)\n\n\t\t\tif self.ref_weight > 0 or True:\n\t\t\t\thidden_state_ref = F.scaled_dot_product_attention(\n\t\t\t\t\tquery_b, key_ref, value_ref, attn_mask=None, dropout_p=0.0, is_causal=False\n\t\t\t\t)\n\n\t\t\t\thidden_state = (hidden_state_a + self.ref_weight * hidden_state_ref) / (1+self.ref_weight)\n\t\t\telse:\n\t\t\t\thidden_state = hidden_state_a\n\n\t\t\t# the output of sdp = (batch, num_heads, seq_len, head_dim)\n\t\t\t# TODO: add support for attn.scale when we move to Torch 2.1\n\t\t\t\n\t\t\thidden_state_list.append(hidden_state)\n\n\t\thidden_states = torch.cat(hidden_state_list)\n\n\n\t\thidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)\n\t\thidden_states = hidden_states.to(query.dtype)\n\n\t\t# linear proj\n\t\thidden_states = attn.to_out[0](hidden_states)\n\t\t# dropout\n\t\thidden_states = attn.to_out[1](hidden_states)\n\n\t\tif input_ndim == 4:\n\t\t\thidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)\n\n\t\tif attn.residual_connection:\n\t\t\thidden_states = hidden_states + residual\n\n\t\thidden_states = hidden_states / attn.rescale_output_factor\n\n\t\treturn hidden_states" }, { "identifier": "replace_attention_processors", "path": "src/syncmvd/attention.py", "snippet": "def replace_attention_processors(module, processor, attention_mask=None, ref_attention_mask=None, ref_weight=0):\n\tattn_processors = module.attn_processors\n\tfor k, v in attn_processors.items():\n\t\tif \"attn1\" in k:\n\t\t\tattn_processors[k] = processor(custom_attention_mask=attention_mask, ref_attention_mask=ref_attention_mask, ref_weight=ref_weight)\n\tmodule.set_attn_processor(attn_processors)" }, { "identifier": "step_tex", "path": "src/syncmvd/step.py", "snippet": "@torch.no_grad()\ndef step_tex(\n\t\tscheduler,\n\t\tuvp,\n\t\tmodel_output: torch.FloatTensor,\n\t\ttimestep: int,\n\t\tsample: torch.FloatTensor,\n\t\ttexture: None,\n\t\tgenerator=None,\n\t\treturn_dict: bool = True,\n\t\tguidance_scale = 1,\n\t\tmain_views = [],\n\t\thires_original_views = True,\n\t\texp=None,\n\t\tcos_weighted=True\n):\n\tt = timestep\n\n\tprev_t = scheduler.previous_timestep(t)\n\n\tif model_output.shape[1] == sample.shape[1] * 2 and scheduler.variance_type in [\"learned\", \"learned_range\"]:\n\t\tmodel_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)\n\telse:\n\t\tpredicted_variance = None\n\n\t# 1. compute alphas, betas\n\talpha_prod_t = scheduler.alphas_cumprod[t]\n\talpha_prod_t_prev = scheduler.alphas_cumprod[prev_t] if prev_t >= 0 else scheduler.one\n\tbeta_prod_t = 1 - alpha_prod_t\n\tbeta_prod_t_prev = 1 - alpha_prod_t_prev\n\tcurrent_alpha_t = alpha_prod_t / alpha_prod_t_prev\n\tcurrent_beta_t = 1 - current_alpha_t\n\n\t# 2. compute predicted original sample from predicted noise also called\n\t# \"predicted x_0\" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf\n\tif scheduler.config.prediction_type == \"epsilon\":\n\t\tpred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)\n\telif scheduler.config.prediction_type == \"sample\":\n\t\tpred_original_sample = model_output\n\telif scheduler.config.prediction_type == \"v_prediction\":\n\t\tpred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output\n\telse:\n\t\traise ValueError(\n\t\t\tf\"prediction_type given as {scheduler.config.prediction_type} must be one of `epsilon`, `sample` or\"\n\t\t\t\" `v_prediction` for the DDPMScheduler.\"\n\t\t)\n\n\t# 3. Clip or threshold \"predicted x_0\"\n\tif scheduler.config.thresholding:\n\t\tpred_original_sample = scheduler._threshold_sample(pred_original_sample)\n\telif scheduler.config.clip_sample:\n\t\tpred_original_sample = pred_original_sample.clamp(\n\t\t\t-scheduler.config.clip_sample_range, scheduler.config.clip_sample_range\n\t\t)\n\n\t# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t\n\t# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf\n\tpred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t\n\tcurrent_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t\n\n\t'''\n\t\tAdd multidiffusion here\n\t'''\n\n\tif texture is None:\n\t\tsample_views = [view for view in sample]\n\t\tsample_views, texture, _ = uvp.bake_texture(views=sample_views, main_views=main_views, exp=exp)\n\t\tsample_views = torch.stack(sample_views, axis=0)[:,:-1,...]\n\n\n\toriginal_views = [view for view in pred_original_sample]\n\toriginal_views, original_tex, visibility_weights = uvp.bake_texture(views=original_views, main_views=main_views, exp=exp)\n\tuvp.set_texture_map(original_tex)\n\toriginal_views = uvp.render_textured_views()\n\toriginal_views = torch.stack(original_views, axis=0)[:,:-1,...]\n\n\t# 5. Compute predicted previous sample µ_t\n\t# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf\n\t# pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample\n\tprev_tex = pred_original_sample_coeff * original_tex + current_sample_coeff * texture\n\n\t# 6. Add noise\n\tvariance = 0\n\n\tif predicted_variance is not None:\n\t\tvariance_views = [view for view in predicted_variance]\n\t\tvariance_views, variance_tex, visibility_weights = uvp.bake_texture(views=variance_views, main_views=main_views, cos_weighted=cos_weighted, exp=exp)\n\t\tvariance_views = torch.stack(variance_views, axis=0)[:,:-1,...]\n\telse:\n\t\tvariance_tex = None\n\n\tif t > 0:\n\t\tdevice = texture.device\n\t\tvariance_noise = randn_tensor(\n\t\t\ttexture.shape, generator=generator, device=device, dtype=texture.dtype\n\t\t)\n\t\tif scheduler.variance_type == \"fixed_small_log\":\n\t\t\tvariance = scheduler._get_variance(t, predicted_variance=variance_tex) * variance_noise\n\t\telif scheduler.variance_type == \"learned_range\":\n\t\t\tvariance = scheduler._get_variance(t, predicted_variance=variance_tex)\n\t\t\tvariance = torch.exp(0.5 * variance) * variance_noise\n\t\telse:\n\t\t\tvariance = (scheduler._get_variance(t, predicted_variance=variance_tex) ** 0.5) * variance_noise\n\n\tprev_tex = prev_tex + variance\n\n\tuvp.set_texture_map(prev_tex)\n\tprev_views = uvp.render_textured_views()\n\tpred_prev_sample = torch.clone(sample)\n\tfor i, view in enumerate(prev_views):\n\t\tpred_prev_sample[i] = view[:-1]\n\tmasks = [view[-1:] for view in prev_views]\n\n\treturn {\"prev_sample\": pred_prev_sample, \"pred_original_sample\":pred_original_sample, \"prev_tex\": prev_tex}\n\n\tif not return_dict:\n\t\treturn pred_prev_sample, pred_original_sample\n\tpass" } ]
import os import numpy as np import math import random import torch import select import sys from typing import Any, Callable, Dict, List, Optional, Tuple, Union from PIL import Image from IPython.display import display from torch import functional as F from torch import nn from torchvision.transforms import Compose, Resize, GaussianBlur, InterpolationMode from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers import DDPMScheduler, DDIMScheduler, UniPCMultistepScheduler from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.image_processor import VaeImageProcessor from diffusers.utils import ( BaseOutput, randn_tensor, numpy_to_pil, pt_to_pil, # make_image_grid, is_accelerate_available, is_accelerate_version, is_compiled_module, logging, randn_tensor, replace_example_docstring ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.models.attention_processor import Attention, AttentionProcessor from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from .renderer.project import UVProjection as UVP from .syncmvd.attention import SamplewiseAttnProcessor2_0, replace_attention_processors from .syncmvd.prompt import * from .syncmvd.step import step_tex from .utils import *
11,978
# 7.1 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order intermediate_results = [] background_colors = [random.choice(list(color_constants.keys())) for i in range(len(self.camera_poses))] dbres_sizes_list = [] mbres_size_list = [] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # mix prompt embeds according to azim angle positive_prompt_embeds = [azim_prompt(prompt_embed_dict, pose) for pose in self.camera_poses] positive_prompt_embeds = torch.stack(positive_prompt_embeds, axis=0) negative_prompt_embeds = [azim_neg_prompt(negative_prompt_embed_dict, pose) for pose in self.camera_poses] negative_prompt_embeds = torch.stack(negative_prompt_embeds, axis=0) # expand the latents if we are doing classifier free guidance latent_model_input = self.scheduler.scale_model_input(latents, t) ''' Use groups to manage prompt and results Make sure negative and positive prompt does not perform attention together ''' prompt_embeds_groups = {"positive": positive_prompt_embeds} result_groups = {} if do_classifier_free_guidance: prompt_embeds_groups["negative"] = negative_prompt_embeds for prompt_tag, prompt_embeds in prompt_embeds_groups.items(): if prompt_tag == "positive" or not guess_mode: # controlnet(s) inference control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] # Split into micro-batches according to group meta info # Ignore this feature for now down_block_res_samples_list = [] mid_block_res_sample_list = [] model_input_batches = [torch.index_select(control_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(controlnet_prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] conditioning_images_batches = [torch.index_select(conditioning_images, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch ,prompt_embeds_batch, conditioning_images_batch \ in zip (model_input_batches, prompt_embeds_batches, conditioning_images_batches): down_block_res_samples, mid_block_res_sample = self.controlnet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, controlnet_cond=conditioning_images_batch, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) down_block_res_samples_list.append(down_block_res_samples) mid_block_res_sample_list.append(mid_block_res_sample) ''' For the ith element of down_block_res_samples, concat the ith element of all mini-batch result ''' model_input_batches = prompt_embeds_batches = conditioning_images_batches = None if guess_mode: for dbres in down_block_res_samples_list: dbres_sizes = [] for res in dbres: dbres_sizes.append(res.shape) dbres_sizes_list.append(dbres_sizes) for mbres in mid_block_res_sample_list: mbres_size_list.append(mbres.shape) else: # Infered ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. # We copy the tensor shapes from a conditional batch down_block_res_samples_list = [] mid_block_res_sample_list = [] for dbres_sizes in dbres_sizes_list: down_block_res_samples_list.append([torch.zeros(shape, device=self._execution_device, dtype=latents.dtype) for shape in dbres_sizes]) for mbres in mbres_size_list: mid_block_res_sample_list.append(torch.zeros(mbres, device=self._execution_device, dtype=latents.dtype)) dbres_sizes_list = [] mbres_size_list = [] ''' predict the noise residual, split into mini-batches Downblock res samples has n samples, we split each sample into m batches and re group them into m lists of n mini batch samples. ''' noise_pred_list = [] model_input_batches = [torch.index_select(latent_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch, prompt_embeds_batch, down_block_res_samples_batch, mid_block_res_sample_batch, meta \ in zip(model_input_batches, prompt_embeds_batches, down_block_res_samples_list, mid_block_res_sample_list, self.group_metas): if t > num_timesteps * (1- ref_attention_end):
if torch.cuda.is_available(): device = torch.device("cuda:0") torch.cuda.set_device(device) else: device = torch.device("cpu") # Background colors color_constants = {"black": [-1, -1, -1], "white": [1, 1, 1], "maroon": [0, -1, -1], "red": [1, -1, -1], "olive": [0, 0, -1], "yellow": [1, 1, -1], "green": [-1, 0, -1], "lime": [-1 ,1, -1], "teal": [-1, 0, 0], "aqua": [-1, 1, 1], "navy": [-1, -1, 0], "blue": [-1, -1, 1], "purple": [0, -1 , 0], "fuchsia": [1, -1, 1]} color_names = list(color_constants.keys()) # Used to generate depth or normal conditioning images @torch.no_grad() def get_conditioning_images(uvp, output_size, render_size=512, blur_filter=5, cond_type="normal"): verts, normals, depths, cos_maps, texels, fragments = uvp.render_geometry(image_size=render_size) masks = normals[...,3][:,None,...] masks = Resize((output_size//8,)*2, antialias=True)(masks) normals_transforms = Compose([ Resize((output_size,)*2, interpolation=InterpolationMode.BILINEAR, antialias=True), GaussianBlur(blur_filter, blur_filter//3+1)] ) if cond_type == "normal": view_normals = uvp.decode_view_normal(normals).permute(0,3,1,2) *2 - 1 conditional_images = normals_transforms(view_normals) # Some problem here, depth controlnet don't work when depth is normalized # But it do generate using the unnormalized form as below elif cond_type == "depth": view_depths = uvp.decode_normalized_depth(depths).permute(0,3,1,2) conditional_images = normals_transforms(view_depths) return conditional_images, masks # Revert time 0 background to time t to composite with time t foreground @torch.no_grad() def composite_rendered_view(scheduler, backgrounds, foregrounds, masks, t): composited_images = [] for i, (background, foreground, mask) in enumerate(zip(backgrounds, foregrounds, masks)): if t > 0: alphas_cumprod = scheduler.alphas_cumprod[t] noise = torch.normal(0, 1, background.shape, device=background.device) background = (1-alphas_cumprod) * noise + alphas_cumprod * background composited = foreground * mask + background * (1-mask) composited_images.append(composited) composited_tensor = torch.stack(composited_images) return composited_tensor # Split into micro-batches to use less memory in each unet prediction # But need more investigation on reducing memory usage # Assume it has no possitive effect and use a large "max_batch_size" to skip splitting def split_groups(attention_mask, max_batch_size, ref_view=[]): group_sets = [] group = set() ref_group = set() idx = 0 while idx < len(attention_mask): new_group = group | set([idx]) new_ref_group = (ref_group | set(attention_mask[idx] + ref_view)) - new_group if len(new_group) + len(new_ref_group) <= max_batch_size: group = new_group ref_group = new_ref_group idx += 1 else: assert len(group) != 0, "Cannot fit into a group" group_sets.append((group, ref_group)) group = set() ref_group = set() if len(group)>0: group_sets.append((group, ref_group)) group_metas = [] for group, ref_group in group_sets: in_mask = sorted(list(group | ref_group)) out_mask = [] group_attention_masks = [] for idx in in_mask: if idx in group: out_mask.append(in_mask.index(idx)) group_attention_masks.append([in_mask.index(idxx) for idxx in attention_mask[idx] if idxx in in_mask]) ref_attention_mask = [in_mask.index(idx) for idx in ref_view] group_metas.append([in_mask, out_mask, group_attention_masks, ref_attention_mask]) return group_metas ''' MultiView-Diffusion Stable-Diffusion Pipeline Modified from a Diffusers StableDiffusionControlNetPipeline Just mimic the pipeline structure but did not follow any API convention ''' class StableSyncMVDPipeline(StableDiffusionControlNetPipeline): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel]], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = False, ): super().__init__( vae, text_encoder, tokenizer, unet, controlnet, scheduler, safety_checker, feature_extractor, requires_safety_checker ) self.scheduler = DDPMScheduler.from_config(self.scheduler.config) self.model_cpu_offload_seq = "vae->text_encoder->unet->vae" self.enable_model_cpu_offload() self.enable_vae_slicing() self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def initialize_pipeline( self, mesh_path=None, mesh_transform=None, mesh_autouv=None, camera_azims=None, camera_centers=None, top_cameras=True, ref_views=[], latent_size=None, render_rgb_size=None, texture_size=None, texture_rgb_size=None, max_batch_size=24, logging_config=None, ): # Make output dir output_dir = logging_config["output_dir"] self.result_dir = f"{output_dir}/results" self.intermediate_dir = f"{output_dir}/intermediate" dirs = [output_dir, self.result_dir, self.intermediate_dir] for dir_ in dirs: if not os.path.isdir(dir_): os.mkdir(dir_) # Define the cameras for rendering self.camera_poses = [] self.attention_mask=[] self.centers = camera_centers cam_count = len(camera_azims) front_view_diff = 360 back_view_diff = 360 front_view_idx = 0 back_view_idx = 0 for i, azim in enumerate(camera_azims): if azim < 0: azim += 360 self.camera_poses.append((0, azim)) self.attention_mask.append([(cam_count+i-1)%cam_count, i, (i+1)%cam_count]) if abs(azim) < front_view_diff: front_view_idx = i front_view_diff = abs(azim) if abs(azim - 180) < back_view_diff: back_view_idx = i back_view_diff = abs(azim - 180) # Add two additional cameras for painting the top surfaces if top_cameras: self.camera_poses.append((30, 0)) self.camera_poses.append((30, 180)) self.attention_mask.append([front_view_idx, cam_count]) self.attention_mask.append([back_view_idx, cam_count+1]) # Reference view for attention (all views attend the the views in this list) # A forward view will be used if not specified if len(ref_views) == 0: ref_views = [front_view_idx] # Calculate in-group attention mask self.group_metas = split_groups(self.attention_mask, max_batch_size, ref_views) # Set up pytorch3D for projection between screen space and UV space # uvp is for latent and uvp_rgb for rgb color self.uvp = UVP(texture_size=texture_size, render_size=latent_size, sampling_mode="nearest", channels=4, device=self._execution_device) if mesh_path.lower().endswith(".obj"): self.uvp.load_mesh(mesh_path, scale_factor=mesh_transform["scale"] or 1, autouv=mesh_autouv) elif mesh_path.lower().endswith(".glb"): self.uvp.load_glb_mesh(mesh_path, scale_factor=mesh_transform["scale"] or 1, autouv=mesh_autouv) else: assert False, "The mesh file format is not supported. Use .obj or .glb." self.uvp.set_cameras_and_render_settings(self.camera_poses, centers=camera_centers, camera_distance=4.0) self.uvp_rgb = UVP(texture_size=texture_rgb_size, render_size=render_rgb_size, sampling_mode="nearest", channels=3, device=self._execution_device) self.uvp_rgb.mesh = self.uvp.mesh.clone() self.uvp_rgb.set_cameras_and_render_settings(self.camera_poses, centers=camera_centers, camera_distance=4.0) _,_,_,cos_maps,_, _ = self.uvp_rgb.render_geometry() self.uvp_rgb.calculate_cos_angle_weights(cos_maps, fill=False) # Save some VRAM del _, cos_maps self.uvp.to("cpu") self.uvp_rgb.to("cpu") color_images = torch.FloatTensor([color_constants[name] for name in color_names]).reshape(-1,3,1,1).to(dtype=self.text_encoder.dtype, device=self._execution_device) color_images = torch.ones( (1,1,latent_size*8, latent_size*8), device=self._execution_device, dtype=self.text_encoder.dtype ) * color_images color_images *= ((0.5*color_images)+0.5) color_latents = encode_latents(self.vae, color_images) self.color_latents = {color[0]:color[1] for color in zip(color_names, [latent for latent in color_latents])} self.vae = self.vae.to("cpu") print("Done Initialization") ''' Modified from a StableDiffusion ControlNet pipeline Multi ControlNet not supported yet ''' @torch.no_grad() def __call__( self, prompt: str = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: str = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, return_dict: bool = False, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, max_batch_size=6, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_guess_mode: bool = False, controlnet_conditioning_scale: Union[float, List[float]] = 0.7, controlnet_conditioning_end_scale: Union[float, List[float]] = 0.9, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 0.99, guidance_rescale: float = 0.0, mesh_path: str = None, mesh_transform: dict = None, mesh_autouv = False, camera_azims=None, camera_centers=None, top_cameras=True, texture_size = 1536, render_rgb_size=1024, texture_rgb_size = 1024, multiview_diffusion_end=0.8, shuffle_background_change=0.4, shuffle_background_end=0.99, #0.4 use_directional_prompt=True, ref_attention_end=0.2, logging_config=None, cond_type="depth", ): # Setup pipeline settings self.initialize_pipeline( mesh_path=mesh_path, mesh_transform=mesh_transform, mesh_autouv=mesh_autouv, camera_azims=camera_azims, camera_centers=camera_centers, top_cameras=top_cameras, ref_views=[], latent_size=height//8, render_rgb_size=render_rgb_size, texture_size=texture_size, texture_rgb_size=texture_rgb_size, max_batch_size=max_batch_size, logging_config=logging_config ) num_timesteps = self.scheduler.config.num_train_timesteps initial_controlnet_conditioning_scale = controlnet_conditioning_scale log_interval = logging_config.get("log_interval", 10) view_fast_preview = logging_config.get("view_fast_preview", True) tex_fast_preview = logging_config.get("tex_fast_preview", True) controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): # mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 mult = 1 control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [ control_guidance_end ] # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, torch.zeros((1,3,height,width), device=self._execution_device), callback_steps, negative_prompt, None, None, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, list): assert len(prompt) == 1 and len(negative_prompt) == 1, "Only implemented for 1 (negative) prompt" assert num_images_per_prompt == 1, "Only implemented for 1 image per-prompt" batch_size = len(self.uvp.cameras) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): # controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = controlnet_guess_mode or global_pool_conditions # 3. Encode input prompt prompt, negative_prompt = prepare_directional_prompt(prompt, negative_prompt) text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=None, negative_prompt_embeds=None, lora_scale=text_encoder_lora_scale, ) negative_prompt_embeds, prompt_embeds = torch.chunk(prompt_embeds, 2) prompt_embed_dict = dict(zip(direction_names, [emb for emb in prompt_embeds])) negative_prompt_embed_dict = dict(zip(direction_names, [emb for emb in negative_prompt_embeds])) # (4. Prepare image) This pipeline use internal conditional images from Pytorch3D self.uvp.to(self._execution_device) conditioning_images, masks = get_conditioning_images(self.uvp, height, cond_type=cond_type) conditioning_images = conditioning_images.type(prompt_embeds.dtype) cond = (conditioning_images/2+0.5).permute(0,2,3,1).cpu().numpy() cond = np.concatenate([img for img in cond], axis=1) numpy_to_pil(cond)[0].save(f"{self.intermediate_dir}/cond.jpg") # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, None, ) latent_tex = self.uvp.set_noise_texture() noise_views = self.uvp.render_textured_views() foregrounds = [view[:-1] for view in noise_views] masks = [view[-1:] for view in noise_views] composited_tensor = composite_rendered_view(self.scheduler, latents, foregrounds, masks, timesteps[0]+1) latents = composited_tensor.type(latents.dtype) self.uvp.to("cpu") # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order intermediate_results = [] background_colors = [random.choice(list(color_constants.keys())) for i in range(len(self.camera_poses))] dbres_sizes_list = [] mbres_size_list = [] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # mix prompt embeds according to azim angle positive_prompt_embeds = [azim_prompt(prompt_embed_dict, pose) for pose in self.camera_poses] positive_prompt_embeds = torch.stack(positive_prompt_embeds, axis=0) negative_prompt_embeds = [azim_neg_prompt(negative_prompt_embed_dict, pose) for pose in self.camera_poses] negative_prompt_embeds = torch.stack(negative_prompt_embeds, axis=0) # expand the latents if we are doing classifier free guidance latent_model_input = self.scheduler.scale_model_input(latents, t) ''' Use groups to manage prompt and results Make sure negative and positive prompt does not perform attention together ''' prompt_embeds_groups = {"positive": positive_prompt_embeds} result_groups = {} if do_classifier_free_guidance: prompt_embeds_groups["negative"] = negative_prompt_embeds for prompt_tag, prompt_embeds in prompt_embeds_groups.items(): if prompt_tag == "positive" or not guess_mode: # controlnet(s) inference control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] # Split into micro-batches according to group meta info # Ignore this feature for now down_block_res_samples_list = [] mid_block_res_sample_list = [] model_input_batches = [torch.index_select(control_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(controlnet_prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] conditioning_images_batches = [torch.index_select(conditioning_images, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch ,prompt_embeds_batch, conditioning_images_batch \ in zip (model_input_batches, prompt_embeds_batches, conditioning_images_batches): down_block_res_samples, mid_block_res_sample = self.controlnet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, controlnet_cond=conditioning_images_batch, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) down_block_res_samples_list.append(down_block_res_samples) mid_block_res_sample_list.append(mid_block_res_sample) ''' For the ith element of down_block_res_samples, concat the ith element of all mini-batch result ''' model_input_batches = prompt_embeds_batches = conditioning_images_batches = None if guess_mode: for dbres in down_block_res_samples_list: dbres_sizes = [] for res in dbres: dbres_sizes.append(res.shape) dbres_sizes_list.append(dbres_sizes) for mbres in mid_block_res_sample_list: mbres_size_list.append(mbres.shape) else: # Infered ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. # We copy the tensor shapes from a conditional batch down_block_res_samples_list = [] mid_block_res_sample_list = [] for dbres_sizes in dbres_sizes_list: down_block_res_samples_list.append([torch.zeros(shape, device=self._execution_device, dtype=latents.dtype) for shape in dbres_sizes]) for mbres in mbres_size_list: mid_block_res_sample_list.append(torch.zeros(mbres, device=self._execution_device, dtype=latents.dtype)) dbres_sizes_list = [] mbres_size_list = [] ''' predict the noise residual, split into mini-batches Downblock res samples has n samples, we split each sample into m batches and re group them into m lists of n mini batch samples. ''' noise_pred_list = [] model_input_batches = [torch.index_select(latent_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch, prompt_embeds_batch, down_block_res_samples_batch, mid_block_res_sample_batch, meta \ in zip(model_input_batches, prompt_embeds_batches, down_block_res_samples_list, mid_block_res_sample_list, self.group_metas): if t > num_timesteps * (1- ref_attention_end):
replace_attention_processors(self.unet, SamplewiseAttnProcessor2_0, attention_mask=meta[2], ref_attention_mask=meta[3], ref_weight=1)
1
2023-12-09 03:27:58+00:00
16k
SqueezeBits/owlite
owlite/owlite.py
[ { "identifier": "OWLITE_DEVICE_NAME", "path": "owlite_core/cli/device.py", "snippet": "OWLITE_DEVICE_NAME = CONNECTED_DEVICE[\"device\"] if CONNECTED_DEVICE else None" }, { "identifier": "OWLITE_FRONT_BASE_URL", "path": "owlite_core/constants.py", "snippet": "OWLITE_FRONT_BASE_URL = \"https://owlite.ai\"" }, { "identifier": "OWLITE_REPO_PATH", "path": "owlite_core/constants.py", "snippet": "OWLITE_REPO_PATH = os.path.join(os.getenv(\"OWLITE_REPO_DIR\", os.path.join(os.getcwd(), \"owlite\")))" }, { "identifier": "OWLITE_REPORT_URL", "path": "owlite_core/constants.py", "snippet": "OWLITE_REPORT_URL = \"https://tally.so/r/mOl5Zk\"" }, { "identifier": "OWLITE_SETTINGS", "path": "owlite_core/owlite_settings.py", "snippet": "OWLITE_SETTINGS = OwLiteSettings()" }, { "identifier": "download_trt_engine", "path": "owlite/api/device/devices.py", "snippet": "def download_trt_engine(benchmark_key: str, path_to_save: str) -> None:\n \"\"\"Downloads built TensorRT engine.\n\n Args:\n benchmark_key (str): A key to identify benchmark job.\n path_to_save (str): The path to save downloaded TensorRT engine.\n\n Raises:\n RuntimeError: When device is not set.\n HTTPError: When request was not successful.\n \"\"\"\n device_name = OWLITE_DEVICE_NAME\n if device_name is None:\n log.error(\"Device is not set. Please set device and try again\")\n raise RuntimeError(\"Device not found\")\n\n payload = {\n \"device_name\": device_name,\n \"benchmark_key\": benchmark_key,\n }\n resp = DEVICE_API_BASE.post(\"/devices/trt\", json=payload)\n assert isinstance(resp, dict)\n\n file_url = resp[\"trt_engine_url\"]\n\n download_file_from_url(file_url, path_to_save)" }, { "identifier": "poll_run_benchmark", "path": "owlite/api/device/devices.py", "snippet": "def poll_run_benchmark(project_id: str, benchmark_key: str) -> None:\n \"\"\"Polls for TensorRT benchmark result.\n\n Args:\n project_id (str): The id of a project.\n benchmark_key (str): A key to identify benchmark job.\n\n Raises:\n ValueError: When unexpected signal is caught by SIGINT handler.\n RuntimeError: When error occurred during TensorRT execution.\n \"\"\"\n\n def sigint_handler(sig: signal.Signals, frame: Any) -> None:\n if sig != signal.SIGINT:\n raise ValueError(f\"Unexpected signals: {sig} (frame={frame})\")\n print(\"\")\n log.info(\n f\"Exit from current experiment. \"\n f\"Continue creating config at \"\n f\"{OWLITE_FRONT_BASE_URL}/project/detail/{project_id}\"\n )\n sys.exit(sig)\n\n original_sigint_handler = signal.signal(signal.SIGINT, sigint_handler) # type: ignore\n\n log.info(\"Polling for benchmark result, you are free to CTRL-C away\")\n\n count = 0\n info = get_benchmark_queue_info(benchmark_key)\n benchmark_status = info[\"benchmark_status\"]\n in_progress = (\n BenchmarkStatus.PRE_FETCHING.value,\n BenchmarkStatus.BENCHMARKING.value,\n )\n while True:\n if count % 5 == 0:\n info = get_benchmark_queue_info(benchmark_key)\n new_status = info[\"benchmark_status\"]\n\n if new_status < 0:\n print(\"\")\n log.error(\n \"Runtime error occurred during TensorRT engine execution or benchmark. Please try again. \"\n f\"If the problem persists, please report us at {OWLITE_REPORT_URL} for further assistance\"\n )\n raise RuntimeError(\"Benchmarking error\")\n\n if benchmark_status != new_status and new_status in in_progress:\n benchmark_status = new_status\n count = 0\n\n elif new_status == BenchmarkStatus.BENCHMARK_DONE.value:\n print(\"\\nBenchmarking done\")\n signal.signal(signal.SIGINT, original_sigint_handler)\n return\n\n if benchmark_status in in_progress:\n if benchmark_status == BenchmarkStatus.PRE_FETCHING.value and info[\"prefetch\"] is not None:\n message = f\"Your position in the queue: {info['prefetch']} {'. ' * (count % 4)}\"\n\n else:\n dots_before = \".\" * count\n owl_emoji = \"\\U0001F989\"\n dots_after = \".\" * (19 - count)\n\n message = f\"[{dots_before}{owl_emoji}{dots_after}]\"\n\n print(f\"\\r{message:<50}\", end=\"\", flush=True)\n\n count = (count + 1) % 20\n time.sleep(2)" }, { "identifier": "request_trt_benchmark", "path": "owlite/api/device/devices.py", "snippet": "def request_trt_benchmark(benchmark_key: str, bin_path: str) -> None:\n \"\"\"Uploads ONNX weight binary file and request TensorRT benchmark.\n\n Args:\n benchmark_key (str): A key to identify benchmark job.\n bin_path (str): The path of a ONNX weight binary file.\n\n Raises:\n FileNotFoundError: When bin file does not exists at given path.\n ValueError: When device is not set.\n HTTPError: When request was not successful.\n \"\"\"\n\n if not os.path.exists(bin_path):\n log.error(\n f\"Unable to locate the ONNX bin file at the specified path: {bin_path}. \"\n \"Please ensure the file exists and the path is accurate. \"\n \"If the file is missing, recreate the ONNX file and retry\"\n )\n raise FileNotFoundError(\"ONNX bin file not found\")\n\n device_name = OWLITE_DEVICE_NAME\n if device_name is None:\n log.error(\"Connected device not found. Please connect device by 'owlite device connect'\")\n raise ValueError(\"Device not found\")\n\n payload = {\n \"device_name\": device_name,\n \"benchmark_key\": benchmark_key,\n }\n\n resp = DEVICE_API_BASE.post(\"/devices/jobs/export\", json=payload)\n assert isinstance(resp, dict)\n\n file_dest_url = resp[\"bin_file_url\"]\n\n file_upload_resp = upload_file_to_url(bin_path, file_dest_url)\n if not file_upload_resp.ok:\n file_upload_resp.raise_for_status()" }, { "identifier": "get_configuration", "path": "owlite/api/dove/doves.py", "snippet": "def get_configuration(\n project_id: str,\n baseline_name: str,\n run_name: str,\n) -> str:\n \"\"\"Gets configuration options to apply.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of a run.\n\n Returns:\n str: The compiled configuration string.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n }\n resp = DOVE_API_BASE.post(\"/compile\", json=payload)\n assert isinstance(resp, dict)\n\n return json.dumps(resp)" }, { "identifier": "upload_baseline", "path": "owlite/api/dove/doves.py", "snippet": "def upload_baseline(\n project_id: str,\n baseline_name: str,\n onnx_path: str,\n model: GraphModule,\n) -> None:\n \"\"\"Uploads baseline's onnx proto and graph module.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n onnx_path (str): The path to baseline onnx proto file.\n model (GraphModule): The traced graph module.\n\n Raises:\n TypeError: When the `model` is not an instance of `torch.fx.GraphModule`.\n HTTPError: When the request was not successful.\n \"\"\"\n if isinstance(model, (DataParallel, DistributedDataParallel)):\n _model_type = f\"torch.nn.parallel.{type(model).__name__}\"\n log.error(\n f\"{_model_type} is not supported by upload_baseline, please use 'attribute' module to unwrap model \"\n f\"{_model_type}. Try owlite.api.dove.doves.upload_baseline(..., model = model.module)\"\n )\n raise TypeError(f\"{_model_type} is not supported by upload_baseline\")\n if not isinstance(model, GraphModule):\n raise TypeError(f\"model of upload_baseline must be GraphModule, but got {type(model)}\")\n\n proto = onnx.load(onnx_path, load_external_data=False)\n input_shape = json.dumps(extract_input_signature_from_onnx_proto(proto))\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"gm\": serialize(model),\n \"onnx\": base64.b64encode(proto.SerializeToString()),\n \"input_shape\": input_shape,\n }\n\n DOVE_API_BASE.post(\"/upload\", payload)" }, { "identifier": "check_baseline_existence", "path": "owlite/api/main/baselines.py", "snippet": "def check_baseline_existence(project_id: str, baseline_name: str) -> bool:\n \"\"\"Checks if baseline with given name exists at project with given project id.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name to check.\n\n Returns:\n bool: True if baseline exists in given project, False otherwise.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n }\n\n try:\n resp = MAIN_API_BASE.post(\"/projects/baselines/check\", json=payload)\n assert isinstance(resp, bool)\n\n return resp\n\n except requests.exceptions.HTTPError as e:\n if e.response is not None and e.response.status_code == 404:\n return False\n\n raise e" }, { "identifier": "create_baseline", "path": "owlite/api/main/baselines.py", "snippet": "def create_baseline(project_id: str, baseline_name: str) -> str:\n \"\"\"Creates a baseline experiment with given baseline name at project with given project id.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline to create.\n\n Returns:\n str: The name of created baseline.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n }\n\n resp = MAIN_API_BASE.post(\"/projects/baselines\", json=payload)\n assert isinstance(resp, dict)\n\n return resp[\"baseline_name\"]" }, { "identifier": "create_or_load_project", "path": "owlite/api/main/projects.py", "snippet": "def create_or_load_project(project_name: str, description: str = \"\") -> str:\n \"\"\"Creates a project with given name and description and return the id of created project, if\n a project with given name already exists and accessible by current user, return the id of\n existing project.\n\n Args:\n project_name (str): The name of a project.\n description (str): The description of a project. Defaults to \"\".\n\n Returns:\n str: The id of a created project.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n log.debug(f\"Attempt creating project with name {project_name}.\")\n\n payload = {\n \"project_name\": project_name,\n \"description\": description,\n }\n\n try:\n resp = MAIN_API_BASE.post(\"/projects\", json=payload)\n\n assert isinstance(resp, dict) and resp[\"name\"] == project_name\n\n log.info(f\"Created new project '{project_name}'\")\n return resp[\"id\"]\n\n except HTTPError as err:\n if err.response is not None and err.response.status_code == 409:\n # project with given name already was created by user before\n\n data = json.loads(err.response.content)\n project_id = data[\"detail\"]\n\n log.debug(f\"Conflict detected, project with name {project_name} already exists, loading existing project.\")\n log.info(f\"Loaded existing project '{project_name}'\")\n return project_id\n\n raise err" }, { "identifier": "copy_run", "path": "owlite/api/main/runs.py", "snippet": "def copy_run(project_id: str, baseline_name: str, duplicate_from: str, run_name: str) -> str:\n \"\"\"Copies existing experiment and create a new experiment. Compression configuration is also cloned.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n duplicate_from (str): The name of an experiment to clone.\n run_name (str): The name of a new experiment.\n\n Returns:\n str: The name of a created experiment.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": duplicate_from,\n \"new_run_name\": run_name,\n }\n\n resp = MAIN_API_BASE.post(\"/projects/runs/copy\", json=payload)\n assert isinstance(resp, dict)\n return str(resp[\"name\"])" }, { "identifier": "create_run", "path": "owlite/api/main/runs.py", "snippet": "def create_run(project_id: str, baseline_name: str, run_name: str) -> None:\n \"\"\"Creates an experiment.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of a new experiment.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n }\n\n res = MAIN_API_BASE.post(\"/projects/runs\", json=payload)\n assert isinstance(res, dict)" }, { "identifier": "get_benchmark_key", "path": "owlite/api/main/runs.py", "snippet": "def get_benchmark_key(project_id: str, baseline_name: str, run_name: str) -> str:\n \"\"\"Gets a key to identify a benchmark job.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of an experiment.\n\n Returns:\n str: A key to identify a benchmark job.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n }\n\n resp = MAIN_API_BASE.post(\"/projects/runs/keys\", json=payload)\n\n assert isinstance(resp, str)\n return resp" }, { "identifier": "get_run_info", "path": "owlite/api/main/runs.py", "snippet": "def get_run_info(project_id: str, baseline_name: str, run_name: str) -> Optional[dict]:\n \"\"\"Gets information of an experiment.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of an experiment.\n\n Returns:\n Optional[dict]: The information of an experiment if exists, None otherwise.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n }\n\n try:\n res = MAIN_API_BASE.post(\"/projects/runs/info\", json=payload)\n\n except requests.exceptions.HTTPError as e:\n if e.response is not None and e.response.status_code == 404:\n return None\n\n raise e\n\n assert isinstance(res, dict)\n return res" }, { "identifier": "update_run_info", "path": "owlite/api/main/runs.py", "snippet": "def update_run_info(\n project_id: str,\n baseline_name: str,\n run_name: str,\n logs: str,\n) -> None:\n \"\"\"Updates information for a specific experiment with model metrics.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of an experiment.\n logs (str): Logs to be stored in the database.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n \"logs\": logs,\n }\n\n resp = MAIN_API_BASE.post(\"/projects/runs/update\", json=payload)\n assert isinstance(resp, str)" }, { "identifier": "upload_run_onnx_proto", "path": "owlite/api/main/runs.py", "snippet": "def upload_run_onnx_proto(\n project_id: str,\n baseline_name: str,\n run_name: str,\n onnx_path: str,\n dynamic_axes: Optional[dict[str, dict[int, dict[str, int]]]] = None,\n) -> None:\n \"\"\"Uploads experiment's onnx proto and graph module. Note that parameters are not uploaded.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of an experiment.\n onnx_path (str): The path to experiment onnx proto file.\n dynamic_axes (Optional[dict[str, dict[int, dict[str, int]]]], optional): Dynamic axes setting,\n please refer to owlite.onnx.export for detail.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n input_signature = extract_input_signature_from_onnx_proto(onnx_path)\n if dynamic_axes is not None:\n new_input_signature = []\n for name, shape in input_signature:\n axis_setting = dynamic_axes.get(name)\n if axis_setting is not None:\n axis = next(iter(axis_setting))\n setting = axis_setting.get(axis)\n assert setting is not None\n range_setting = [\n setting.get(\"min\"),\n setting.get(\"opt\"),\n setting.get(\"max\"),\n setting.get(\"test\"),\n ]\n shape[axis] = range_setting # type: ignore\n new_input_signature.append((name, shape))\n input_signature = new_input_signature\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n \"input_shape\": json.dumps(input_signature),\n }\n\n file_dest_url = MAIN_API_BASE.post(\"/projects/runs/data/upload\", json=payload)\n\n assert file_dest_url is not None and isinstance(file_dest_url, str)\n file_upload_resp = upload_file_to_url(onnx_path, file_dest_url)\n\n if not file_upload_resp.ok:\n file_upload_resp.raise_for_status()" }, { "identifier": "symbolic_trace", "path": "owlite/backend/fx/trace.py", "snippet": "def symbolic_trace(model: torch.nn.Module, *args: Tensor, **kwargs: dict[str, Any]) -> GraphModule:\n \"\"\"Like `torch.fx.symbolic_trace`, this function traces the input `model` to convert it into a GraphModule.\n In order for the tracing to be successful, the `model` must be able to pass `torch.compile(model, fullgraph=True)`.\n\n Args:\n model (torch.nn.Module): a torch.nn.Module instance.\n\n Raises:\n TypeError: if the `model` is not an instance of `torch.nn.Module`\n RuntimeError: if the tracing fails.\n\n Returns:\n GraphModule: the converted GraphModule.\n \"\"\"\n if not isinstance(model, torch.nn.Module):\n raise TypeError(f\"Expected torch.nn.Module instance but object of type {type(model)} given: {model}\")\n if isinstance(model, (DataParallel, DistributedDataParallel)):\n _model_type = f\"torch.nn.parallel.{type(model).__name__}\"\n log.error(\n f\"{_model_type} is not supported by symbolic trace, please use 'attribute' module to unwrap model \"\n f\"from {_model_type}. Try owlite.fx.symbolic_trace(model.module, ...)\"\n )\n raise TypeError(f\"{_model_type} is not supported by symbolic trace\")\n training_status = model.training\n # move input args and kwargs to model device\n device = get_most_common_device(model)\n dtype = get_most_common_floating_point_type(model)\n log.debug(f\"Tracing with device={device}, dtype={dtype}\")\n\n args = move_tensors_to(args, device, dtype)\n kwargs = move_tensors_to(kwargs, device, dtype)\n\n backend = BackendProvider()\n torch_dynamo.reset()\n optimized_model = torch.compile(model, fullgraph=True, backend=backend)\n output = optimized_model(*args, **kwargs)\n\n graph_module = backend.graph_module\n\n if graph_module is None:\n raise RuntimeError(\"Failed to create torch.fx.GraphModule while running optimized model\")\n\n graph_module = apply_graph_module_transforms(graph_module)\n graph_module = insert_output_adapter(graph_module, output)\n\n original_params = inspect.signature(model.forward).parameters\n graph_module_params = inspect.signature(graph_module.forward).parameters\n\n ignored_params = OrderedDict(\n filter(\n lambda item: (\n item[0] not in graph_module_params\n and item[1].kind\n not in (\n inspect._ParameterKind.VAR_POSITIONAL,\n inspect._ParameterKind.VAR_KEYWORD,\n )\n ),\n original_params.items(),\n )\n )\n if ignored_params:\n log.warning(\n \"The following parameters will be dropped from the graph module's forward method: \"\n f\"{', '.join(ignored_params)}\"\n )\n graph_module.train(training_status)\n graph_module.meta[\"owlite_status\"] = OwLiteStatus.NOT_COMPRESSED\n return graph_module" }, { "identifier": "configure_dynamic_dimensions", "path": "owlite/backend/onnx/dynamize.py", "snippet": "def configure_dynamic_dimensions(\n input_signature: list[tuple[str, Union[tuple[int, ...], str]]], dynamic_axes: dict[str, dict[int, dict[str, int]]]\n) -> DynamicDimensions:\n \"\"\"Configures dynamic dimension setting to be used by `dynamize` with given ONNX proto and dynamic axes setting.\n\n Args:\n input_signature (list[tuple[str, Union[tuple[int, ...], str]]]): A list of tuples mapping fx graph input names\n to their shape if they are torch.Tensor instances or to their class name otherwise.\n dynamic_axes (Optional[dict[str, dict[int, dict[str, int]]]], optional):\n To specify axes of tensors dynamic(i.e. known only at run-time), set `dynamic_axes` to a dict with schema:\n\n * KEY (str): an input name.\n\n * VALUE (dict[int, dict[str, int]]): a single item dictionary whose key is dynamic dimension of input\n and value is a dynamic range setting dictionary containing min, opt, max, test dimension size settings.\n\n Raises:\n ValueError: When dynamic ONNX proto is given or when invalid `dynamic_axes` is given.\n\n Returns:\n DynamicDimensions: Dynamic dimension setting to be used as an input of `dynamize`.\n \"\"\"\n\n if not check_dynamic_axes_setting(input_signature, dynamic_axes):\n raise ValueError(\"Invalid dynamic axes setting\")\n\n settings = {}\n dynamic_dim_size = None\n onnx_inputs_dict = dict(input_signature)\n for name, setting in dynamic_axes.items():\n dynamic_axis = next(iter(setting))\n\n shape = onnx_inputs_dict[name]\n assert shape is not None\n\n dynamic_dim_size = shape[dynamic_axis]\n\n min_val = setting[dynamic_axis].get(\"min\")\n max_val = setting[dynamic_axis].get(\"max\")\n opt_val = setting[dynamic_axis].get(\"opt\")\n opt_val = setting[dynamic_axis].get(\"test\")\n\n if dynamic_axis < 0:\n dynamic_axis = len(shape) + dynamic_axis\n\n settings[name] = DynamicSetting(shape, dynamic_axis, min_val, max_val, opt_val) # type: ignore\n\n assert dynamic_dim_size is not None and isinstance(dynamic_dim_size, int)\n return DynamicDimensions(dynamic_dim_size, settings)" }, { "identifier": "export", "path": "owlite/backend/onnx/export.py", "snippet": "def export(\n module: torch.nn.Module,\n args: Union[tuple[Any, ...], torch.Tensor],\n f: str,\n export_params: bool = True,\n verbose: bool = False,\n training: torch._C._onnx.TrainingMode = torch._C._onnx.TrainingMode.EVAL,\n input_names: Optional[Sequence[str]] = None,\n output_names: Optional[Sequence[str]] = None,\n operator_export_type: torch._C._onnx.OperatorExportTypes = torch._C._onnx.OperatorExportTypes.ONNX,\n opset_version: int = 17,\n do_constant_folding: bool = True,\n keep_initializers_as_inputs: Optional[bool] = None,\n custom_opsets: Optional[Mapping[str, int]] = None,\n export_modules_as_functions: Union[bool, Collection[type[torch.nn.Module]]] = False,\n use_fast_export: bool = True,\n apply_transforms: bool = True,\n simplify: bool = True,\n check_n: int = 1,\n skip_fuse_bn: bool = False,\n skipped_optimizers: Optional[list[str]] = None,\n dynamic_dimensions: Optional[DynamicDimensions] = None,\n) -> None:\n r\"\"\"Exports a model into ONNX format.\n\n Args:\n module (torch.nn.Module): The model to be exported.\n args (Union[tuple[Any, ...], torch.Tensor]): Argument of a `module`.\n\n args can be structured either as:\n\n 1. ONLY A TUPLE OF ARGUMENTS::\n\n args = (x, y, z)\n\n The tuple should contain model inputs such that `module(*args)` is a valid\n invocation of the model. Any non-Tensor arguments will be hard-coded into the\n exported model; any Tensor arguments will become inputs of the exported model,\n in the order they occur in the tuple.\n\n 2. A TENSOR::\n\n args = torch.Tensor([1])\n\n This is equivalent to a 1-ary tuple of that Tensor.\n\n 3. A TUPLE OF ARGUMENTS ENDING WITH A DICTIONARY OF NAMED ARGUMENTS::\n\n args = (\n x,\n {\n \"y\": input_y,\n \"z\": input_z\n }\n )\n\n All but the last element of the tuple will be passed as non-keyword arguments,\n and named arguments will be set from the last element. If a named argument is\n not present in the dictionary, it is assigned the default value, or None if a\n default value is not provided.\n\n .. note::\n If a dictionary is the last element of the args tuple, it will be\n interpreted as containing named arguments. In order to pass a dict as the\n last non-keyword arg, provide an empty dict as the last element of the args\n tuple. For example, instead of::\n\n export(\n module,\n (\n x,\n # WRONG: will be interpreted as named arguments\n {y: z}\n ),\n \"test.onnx.pb\"\n )\n\n Write::\n\n export(\n module,\n (\n x,\n {y: z},\n {}\n ),\n \"test.onnx.pb\"\n )\n f (str): A string containing a file name. A binary protocol buffer will be written to this file.\n export_params (bool, optional): If True, all parameters will\n be exported. Set this to False if you want to export an untrained model.\n In this case, the exported model will first take all of its parameters\n as arguments, with the ordering as specified by `module.state_dict().values()`. Defaults to True.\n verbose (bool, optional): If True, prints a description of the\n model being exported to stdout. In addition, the final ONNX graph will include the\n field `doc_string` from the exported model which mentions the source code locations\n for `module`. If True, ONNX exporter logging will be turned on. Defaults to False.\n training (torch._C._onnx.TrainingMode, optional): Defaults to torch._C._onnx.TrainingMode.EVAL.\n * `TrainingMode.EVAL`: export the model in inference mode.\n * `TrainingMode.PRESERVE`: export the model in inference mode if model.training is\n False and in training mode if model.training is True.\n * `TrainingMode.TRAINING`: export the model in training mode. Disables optimizations\n which might interfere with training.\n input_names (Optional[Sequence[str]], optional): Names to assign to the input nodes of the graph, in order.\n Names of `module.forward` arguments will be used when None is given. Defaults to None.\n output_names (Optional[Sequence[str]], optional): Names to assign to the output nodes of the graph, in order.\n Defaults to None.\n operator_export_type (torch._C._onnx.OperatorExportTypes, optional):\n Defaults to `torch._C._onnx.OperatorExportTypes.ONNX`.\n * `OperatorExportTypes.ONNX`: Export all ops as regular ONNX ops (in the default opset domain).\n * `OperatorExportTypes.ONNX_FALLTHROUGH`: Try to convert all ops\n to standard ONNX ops in the default opset domain. If unable to do so\n (e.g. because support has not been added to convert a particular torch op to ONNX),\n fall back to exporting the op into a custom opset domain without conversion. Applies\n to `custom ops <https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html>`_\n as well as ATen ops. For the exported model to be usable, the runtime must support\n these non-standard ops.\n * `OperatorExportTypes.ONNX_ATEN`: All ATen ops (in the TorchScript namespace \"aten\")\n are exported as ATen ops (in opset domain \"org.pytorch.aten\").\n `ATen <https://pytorch.org/cppdocs/#aten>`_ is PyTorch's built-in tensor library, so\n this instructs the runtime to use PyTorch's implementation of these ops.\n\n .. warning::\n\n Models exported this way are probably runnable only by Caffe2.\n\n This may be useful if the numeric differences in implementations of operators are\n causing large differences in behavior between PyTorch and Caffe2 (which is more\n common on untrained models).\n * `OperatorExportTypes.ONNX_ATEN_FALLBACK`: Try to export each ATen op\n (in the TorchScript namespace \"aten\") as a regular ONNX op. If we are unable to do so\n (e.g. because support has not been added to convert a particular torch op to ONNX),\n fall back to exporting an ATen op. See documentation on OperatorExportTypes.ONNX_ATEN for\n context.\n For example::\n\n graph(%0 : Float):\n %3 : int = prim::Constant[value=0]()\n # conversion unsupported\n %4 : Float = aten::triu(%0, %3)\n # conversion supported\n %5 : Float = aten::mul(%4, %0)\n return (%5)\n\n Assuming `aten::triu` is not supported in ONNX, this will be exported as::\n\n graph(%0 : Float):\n %1 : Long() = onnx::Constant[value={0}]()\n # not converted\n %2 : Float = aten::ATen[operator=\"triu\"](%0, %1)\n # converted\n %3 : Float = onnx::Mul(%2, %0)\n return (%3)\n\n If PyTorch was built with Caffe2 (i.e. with `BUILD_CAFFE2=1`), then\n Caffe2-specific behavior will be enabled, including special support\n for ops are produced by the modules described in\n `Quantization <https://pytorch.org/docs/stable/quantization.html>`_.\n\n .. warning::\n\n Models exported this way are probably runnable only by Caffe2.\n opset_version (int, optional): The version of the default (ai.onnx) opset\n <https://github.com/onnx/onnx/blob/master/docs/Operators.md> to target. Must be >= 7 and <= 18.\n Defaults to 17.\n do_constant_folding (bool, optional): Apply the constant-folding optimization.\n Constant-folding will replace some of the ops that have all constant inputs\n with pre-computed constant nodes. Defaults to True.\n keep_initializers_as_inputs (Optional[bool], optional): If True, all the initializers\n (typically corresponding to parameters) in the exported graph will also be added\n as inputs to the graph. If False, then initializers are not added as inputs to the\n graph, and only the non-parameter inputs are added as inputs. This may allow for\n better optimizations (e.g. constant folding) by backends/runtimes. Defaults to None.\n custom_opsets (Optional[Mapping[str, int]], optional): A dict with schema:\n\n * KEY (str): opset domain name\n * VALUE (int): opset version\n\n If a custom opset is referenced by ``model`` but not mentioned in this dictionary,\n the opset version is set to 1. Only custom opset domain name and version should be\n indicated through this argument. Defaults to None.\n export_modules_as_functions (Union[bool, Collection[type[torch.nn.Module]]], optional): Flag to enable\n exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the\n particular types of modules to export as local functions in ONNX.\n This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because\n ``opset_version`` < 15 implies IR version < 8, which means no local function support.\n Module variables will be exported as function attributes. There are two categories of function\n attributes. Defaults to False.\n use_fast_export (bool, optional): If True, export process will be done in memory. If `module` with total\n parameter size larger than 2GB, this flag will be automatically set to `False`. If False, temporary\n export process will be done using temporary files. Defaults to True.\n apply_transforms (bool, optional): If True, ONNX transforms defined by SqueezeBits.inc will be applied for\n model optimization. If False, ONNX transformations will be skipped. However, turning this flag to `False`\n is experimental and might yield unexpected behavior. Defaults to True.\n simplify (bool, optional): If True, onnx-simplifier will be run. If False, onnx-simplifier will be skipped.\n Defaults to True.\n check_n (int, optional): Only available when `simplify=True`. The number of times to run check for the\n simplified ONNX proto after onnx-simplifier. Defaults to 1.\n skip_fuse_bn (bool, optional): Only available when `simplify=True`. Whether to skip batchnorm-fusion.\n Defaults to False.\n skipped_optimizers (Optional[list[str]], optional): Only available when `simplify=True`. The list of\n onnx-simplifier passes to skip. Defaults to None.\n See https://github.com/onnx/optimizer/tree/master/onnxoptimizer/passes for available passes.\n dynamic_dimensions (Optional[DynamicDimensions], optional): Dynamic dimensions setting configured by\n `configure_dynamic_dimensions`. Defaults to None.\n\n Raises:\n TypeError: If `f` is not a string.\n ValueError: If the quantizer has invalid condition.\n `torch.onnx.errors.CheckerError`: If the ONNX checker detects an invalid ONNX graph.\n `torch.onnx.errors.UnsupportedOperatorError`: If the ONNX graph cannot be exported because it\n uses an operator that is not supported by the exporter.\n `torch.onnx.errors.OnnxExporterError`: Other errors that can occur during export.\n All errors are subclasses of :class:`errors.OnnxExporterError`.\n \"\"\"\n\n if not isinstance(f, str):\n raise TypeError(\"owlite.onnx.export requires the argument `f` to be a string.\")\n\n if isinstance(module, GraphModule):\n if module.meta[\"owlite_status\"] == OwLiteStatus.COMPRESSED:\n log.warning(\n \"This module has not yet been calibrated. \"\n \"The onnx that comes out of this module may have unexpected results in accuracy and latency.\"\n )\n\n clip_narrow_range_weights(module)\n # Batch Norm Fusing\n fuse_bn(module)\n\n # zero point folding\n fold_zp_to_bias(module)\n\n check_fake_quantization_condition(module)\n\n device = get_most_common_device(module)\n dtype = get_most_common_floating_point_type(module)\n args = move_tensors_to(args, device, dtype)\n\n size_in_gigabytes = sum(p.numel() * p.element_size() for p in module.parameters()) / (1 << 30)\n\n if size_in_gigabytes >= 2:\n log.warning(\n f\"Model has total parameter size larger than 2 GB ({size_in_gigabytes:.2f} GB).\"\n '\"use_fast_export\" will be set to False'\n )\n use_fast_export = False\n\n export_function, optimize_function = (_export, _optimize) if use_fast_export else (_export_path, _optimize_path)\n\n if opset_version is None:\n opset_version = 17\n\n if input_names is None and isinstance(module, GraphModule):\n input_names = get_default_input_names(module, args)\n onnx_proto = export_function(\n module,\n args=args,\n export_params=export_params,\n verbose=verbose,\n training=training,\n input_names=input_names,\n output_names=output_names,\n operator_export_type=operator_export_type,\n opset_version=opset_version,\n do_constant_folding=do_constant_folding,\n keep_initializers_as_inputs=keep_initializers_as_inputs,\n custom_opsets=custom_opsets,\n export_modules_as_functions=export_modules_as_functions,\n )\n\n if skipped_optimizers is None:\n skipped_optimizers = [\"fuse_qkv\"]\n\n onnx_proto = optimize_function(\n onnx_proto,\n apply_transforms=apply_transforms,\n simplify=simplify,\n check_n=check_n,\n skip_fuse_bn=skip_fuse_bn,\n skipped_optimizers=skipped_optimizers,\n )\n\n if dynamic_dimensions is not None:\n onnx_proto = dynamize(onnx_proto, dynamic_dimensions)\n\n onnx_proto.producer_name = f\"owlite + {onnx_proto.producer_name}\"\n onnx_proto.doc_string = \"Processed by OwLite\"\n\n model_dir = os.path.dirname(f)\n name, _ = os.path.splitext(os.path.basename(f))\n location = f\"{name}.bin\"\n abs_location = os.path.join(model_dir, location)\n\n log.info(f\"Saving exported ONNX proto at {f} with external data {location}\")\n if model_dir:\n os.makedirs(model_dir, exist_ok=True)\n if abs_location is not None and os.path.isfile(abs_location):\n log.warning(f\"External data file at {abs_location} will be overwritten.\")\n # os.remove is required since onnx.save opens the external data file with mode='ab'\n os.remove(abs_location)\n onnx.save(\n onnx_proto,\n f,\n location=location,\n save_as_external_data=True,\n size_threshold=0,\n )" }, { "identifier": "get_input_shape_signature", "path": "owlite/backend/onnx/export.py", "snippet": "def get_input_shape_signature(\n module: torch.nn.Module, *args: Any, **kwargs: Any\n) -> list[tuple[str, Union[tuple[int, ...], str]]]:\n \"\"\"Maps the parameter names of a PyTorch module's forward method to the corresponding values' shapes or class name.\n\n This function returns a list of tuples, where each tuple contains a parameter name and its corresponding shape\n (as a tuple of integers) if the value is an instance of `torch.Tensor` or otherwise the name of the class of\n the value.\n\n Args:\n module (torch.nn.Module): The PyTorch module to inspect.\n args (Any): Positional arguments to be passed to the module.\n kwargs (Any): Keyword arguments to be passed to the module.\n\n Returns:\n list[tuple[str, Union[tuple[int, ...], str]]]: A list of tuples mapping parameter names to their shape\n (if they are torch.Tensor instances) or to their class name (for non-torch.Tensor instances).\n\n Note:\n This function assumes that `args` and `kwargs` match the signatures of the module's forward method exactly,\n in order and length. If they don't, the result may not be as expected or exceptions might occur.\n \"\"\"\n signature_map = map_signature(module.forward, *args, **kwargs)\n return [\n (\n name,\n tuple(value.shape) if isinstance(value, torch.Tensor) else value.__class__.__name__,\n )\n for name, value in signature_map\n ]" }, { "identifier": "log", "path": "owlite/logger.py", "snippet": "class Logger(logging.Logger):\n class _WarningFilterContext:\n class WarningFilter(logging.Filter):\n ENV_VAR = \"OWLITE_LOG_LEVEL\"\n DEBUG_WARNING = 15\n ULTRA_VERBOSE = -10\n def ignore_warnings(self):\n def __init__(self, logger) -> None:\n def __enter__(self):\n def filter(self, record):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def debug_warning(self, msg, *args, **kwargs):\n def level(self) -> int:\n def level(self, value):\ndef suppress_owlite_warnings(cls):\n def new_init(self, *args, **kwargs):" }, { "identifier": "ONNXExportOptions", "path": "owlite/options/onnx_export_options.py", "snippet": "class ONNXExportOptions:\n \"\"\"\n Class handling options for ONNX export.\n\n OwLite internally imports the target model to ONNX during conversion or benchmarking.\n Users can set options for ONNX export using this class.\n \"\"\"\n\n opset_version: int = 17" }, { "identifier": "GraphQuantizationOptions", "path": "owlite/options/quantization_options.py", "snippet": "class GraphQuantizationOptions(OptionsDict):\n \"\"\"\n * Key (str): the name of a FX node\n * Value (NodeQuantizationOptions): node quantization options\n \"\"\"\n\n ValueType = NodeQuantizationOptions" }, { "identifier": "quantize", "path": "owlite/quantize.py", "snippet": "def quantize(model: GraphModule, options: GraphQuantizationOptions) -> GraphModule:\n \"\"\"Quantizes the model with the specification described in options.\n\n This function inserts quantizers with the quantization options specified in the options,\n substitutes them with the Quantized module, and performs post-processing. The linear module\n that quantizes the bias cannot fuse the batch norm after quantizing, so it proceeds to fuse\n the batch norm. Then, it fuses quantizers with the same quantization option that correspond\n to the same tensor in the original model.\n\n Args:\n model (GraphModule): The symbolic traced model to be quantized.\n options (GraphQuantizationOptions): Options specifying the quantization.\n\n Raises:\n TypeError: If model is not a instance of `GraphModule`.\n\n Returns:\n GraphModule: Quantized model.\n \"\"\"\n\n if not isinstance(model, GraphModule):\n raise TypeError(\"Only GraphModule instance can be quantized with `owlite.quantize`\")\n configure(model, options)\n fuse_linear_bn_with_quantized_bias(model)\n log.debug(\"Fusing the redundant quantizers.\")\n fuse_redundant_quantizers(model)\n enable_quantizers(model, True)\n return model" } ]
import json import os import torch from dataclasses import asdict, dataclass from typing import Any, Optional from torch.fx import GraphModule # type: ignore from torch.nn.parallel import DataParallel, DistributedDataParallel from owlite_core.cli.device import OWLITE_DEVICE_NAME from owlite_core.constants import ( OWLITE_FRONT_BASE_URL, OWLITE_REPO_PATH, OWLITE_REPORT_URL, ) from owlite_core.owlite_settings import OWLITE_SETTINGS from .api.device.devices import ( download_trt_engine, poll_run_benchmark, request_trt_benchmark, ) from .api.dove.doves import get_configuration, upload_baseline from .api.main.baselines import check_baseline_existence, create_baseline from .api.main.projects import create_or_load_project from .api.main.runs import ( copy_run, create_run, get_benchmark_key, get_run_info, update_run_info, upload_run_onnx_proto, ) from .backend.fx.trace import symbolic_trace from .backend.onnx.dynamize import configure_dynamic_dimensions from .backend.onnx.export import export, get_input_shape_signature from .logger import log from .options import GraphQuantizationOptions, ONNXExportOptions from .quantize import quantize
12,939
f"{OWLITE_FRONT_BASE_URL}/project/detail/{self.project_id}" ) engine_path = os.path.join( OWLITE_REPO_PATH, self.project_name, self.baseline_name, self.experiment_name, f"{self.project_name}_{self.baseline_name}_{self.experiment_name}.engine", ) download_trt_engine(benchmark_key, engine_path) def log(self, **kwargs) -> None: """Logs the model's metrics. Notes: Log metrics with OwLite like below ... owl = owlite.init(...) ... owl.log(accuracy=0.72, loss=1.2) Raises: TypeError: When data is not JSON serializable. """ try: logs = json.dumps(kwargs) except TypeError as e: log.error("Data is not JSON serializable") raise e update_run_info(self.project_id, self.baseline_name, self.experiment_name, logs) # pylint: disable-next=too-many-branches def init( project: str, baseline: str, experiment: Optional[str] = None, duplicate_from: Optional[str] = None, description: str = "", onnx_export_options: Optional[ONNXExportOptions] = None, ) -> OwLite: """Sets project, baseline and experiment information in DB to proper state and creates `OwLite` instance. Args: project (str): OwLite project name. baseline (str): OwLite baseline name. experiment (str, optional): OwLite experiment name. Defaults to None. duplicate_from (str, optional): OwLite source experiment name. Defaults to None. description (str, optional): OwLite project description. Defaults to "". onnx_export_options (ONNXExportOptions, optional): Options for ONNX export. Defaults to None. Raises: RuntimeError: When not authenticated. ValueError: When invalid experiment name or baseline name is given. Returns: OwLite: Created `OwLite` instance. """ if OWLITE_SETTINGS.tokens is None: log.error("Please log in using 'owlite login'. Account not found on this device") raise RuntimeError("OwLite token not found") if OWLITE_DEVICE_NAME is None: log.warning("Connected device not found. Please connect device by 'owlite device connect --name (name)'") else: log.info(f"Connected device: {OWLITE_DEVICE_NAME}") if experiment == baseline: log.error(f"Experiment name '{baseline}' is reserved for baseline. Please try with a different experiment name") raise ValueError("Invalid experiment name") dir_path = os.path.join( OWLITE_REPO_PATH, project, baseline, experiment or baseline, ) if os.path.exists(dir_path): log.warning(f"Existing local directory found at {dir_path}. Continuing this code will overwrite the data") else: os.makedirs(dir_path, exist_ok=True) log.info(f"Experiment data will be saved in {dir_path}") # create or load project project_id = create_or_load_project(project, description) if experiment is None: if duplicate_from: log.warning(f"duplicate_from='{duplicate_from}' will be ignored as no value for experiment was provided") created_baseline = create_baseline(project_id, baseline) if created_baseline != baseline: log.warning( f"A baseline '{baseline}' already exists. " f"Created a new baseline '{created_baseline}' at project '{project}'" ) baseline = created_baseline else: log.info(f"Created new baseline '{baseline}' at project '{project}'") else: if not check_baseline_existence(project_id, baseline): log.error(f"Baseline '{baseline}' not found. Please verify the entered baseline name and try again") raise ValueError("Invalid baseline name") if duplicate_from: experiment = copy_run(project_id, baseline, duplicate_from, experiment) log.info( f"Copied compression configuration from the experiment '{duplicate_from}' " f"to the new experiment '{experiment}'" ) exp_info = get_run_info(project_id, baseline, experiment) if exp_info is None:
# type: ignore """OwLite Optimization Module This module facilitates optimization and benchmarking of models using OwLite services.""" @dataclass class OwLite: """Class handling OwLite project, baseline, and experiment configurations. The OwLite class manages project, baseline, and experiment configurations within the OwLite system. It allows users to create or load projects, set baselines, create or duplicate experiments, convert models, and benchmark models against the specified configurations. """ project_id: str project_name: str baseline_name: str experiment_name: str onnx_export_options: ONNXExportOptions module_args: Optional[tuple[Any, ...]] = None module_kwargs: Optional[dict[str, Any]] = None @property def is_baseline(self) -> bool: # pylint: disable=missing-function-docstring return self.baseline_name == self.experiment_name def convert(self, model: torch.nn.Module, *args, **kwargs) -> GraphModule: """Converts input model to compressed model. Args: model (torch.nn.Module): Model to compress. Returns: GraphModule: Compressed graph module. Raises: HTTPError: When request for compression configuration was not successful. """ log.info("Model conversion initiated") try: model = symbolic_trace(model, *args, **kwargs) except Exception as e: # pylint: disable=broad-exception-caught log.error( "Failed to extract the computation graph from the provided model. " "Please check the error message for details.\n" "If the issue persists, try replacing with a traceable node. " "In case the problem remain unresolved, kindly report it at " f"{OWLITE_REPORT_URL} for further assistance" ) raise e self.module_args = args self.module_kwargs = kwargs if self.is_baseline: onnx_path = os.path.join( OWLITE_REPO_PATH, self.project_name, self.baseline_name, self.experiment_name, f"{self.project_name}_{self.baseline_name}_{self.experiment_name}.onnx", ) export( model, (*self.module_args, self.module_kwargs), onnx_path, **asdict(self.onnx_export_options), ) log.info("Baseline ONNX saved") upload_baseline(self.project_id, self.baseline_name, onnx_path, model) log.info("Uploaded the model excluding parameters") else: exp_info = get_run_info(self.project_id, self.baseline_name, self.experiment_name) assert exp_info is not None if not exp_info["config_id"]: log.warning("No compression configuration found, skipping the compression process") else: log.info(f"Compression configuration found for '{self.experiment_name}'") configuration_string = get_configuration(self.project_id, self.baseline_name, self.experiment_name) options = GraphQuantizationOptions.load(configuration_string) log.info("Applying compression configuration") model = quantize(model, options) log.info("Converted the model") return model def benchmark( self, model: GraphModule, dynamic_axes: Optional[dict[str, dict[int, dict[str, int]]]] = None, ) -> None: """Benchmarks given model. Args: model (GraphModule): Model to benchmark. dynamic_axes (Optional[dict[str, dict[int, dict[str, int]]]]): By default the exported model will have the shapes of all input tensors set to exactly match those given when calling convert. To specify axes of tensors as dynamic (i.e. known only at run-time), set `dynamic_axes` to a dict with schema: * KEY (str): an input name. * VALUE (dict[int, dict[str, int]]): a single item dictionary whose key is dynamic dimension of input and value is a dynamic range setting dictionary containing min, opt, max, test dimension size settings. For example:: import owlite owl = owlite.init( ... ) class SumModule(torch.nn.Module): def forward(self, x): return torch.sum(x, dim=1) model = owl.convert( ... ) ... # set first(0-th) dimension of input x to be dynamic within the range of 1 ~ 8 # optimize for 4 and benchmark for 5 owl.benchmark(model, dynamic_axes={ "x": { 0: { "min": 1, "opt": 4, "max": 8, "test": 5, } } }) Raises: TypeError: When the `model` is an instance of `torch.nn.DataParallel` or `torch.nn.DistributedDataParallel`. RuntimeError: When `dynamic_axes` is set for baseline benchmark. ValueError: When invalid `dynamic_axes` is given. """ if isinstance(model, (DataParallel, DistributedDataParallel)): _model_type = f"torch.nn.parallel.{type(model).__name__}" log.error( f"{_model_type} is not supported by benchmark, please use attribute module " f"to unwrap model from {_model_type}. Try owlite.benchmark(model.module)" ) raise TypeError(f"{_model_type} is not supported by benchmark") if self.is_baseline: log.info( f"Benchmark initiated. '{self.baseline_name}' " "ONNX will be uploaded to the connected device for TensorRT execution and benchmark" ) if dynamic_axes is not None: log.error( "Baseline cannot be done with dynamic input. To benchmark baseline model with dynamic input, " "please create a run without compression configuration and benchmark that run with dynamic input" ) raise RuntimeError("Attempted dynamic baseline benchmark") else: log.info( f"Benchmark initiated. '{self.experiment_name}' " "ONNX will be created and uploaded to the connected device for TensorRT execution and benchmark" ) dynamic_dimensions = None if dynamic_axes is not None: sep = "', '" log.info(f"dynamic_axes setting for following inputs are provided. '{sep.join(dynamic_axes.keys())}'") input_signature = get_input_shape_signature( model, *(self.module_args or ()), **(self.module_kwargs or {}) ) dynamic_dimensions = configure_dynamic_dimensions(input_signature, dynamic_axes) onnx_path = os.path.join( OWLITE_REPO_PATH, self.project_name, self.baseline_name, self.experiment_name, f"{self.project_name}_{self.baseline_name}_{self.experiment_name}.onnx", ) export( model, (*(self.module_args or ()), self.module_kwargs), onnx_path, **asdict(self.onnx_export_options), dynamic_dimensions=dynamic_dimensions, ) log.info("Experiment ONNX saved") upload_run_onnx_proto(self.project_id, self.baseline_name, self.experiment_name, onnx_path, dynamic_axes) log.info("Uploaded the model excluding parameters") benchmark_key = get_benchmark_key(self.project_id, self.baseline_name, self.experiment_name) bin_path = os.path.join( OWLITE_REPO_PATH, self.project_name, self.baseline_name, self.experiment_name, f"{self.project_name}_{self.baseline_name}_{self.experiment_name}.bin", ) request_trt_benchmark(benchmark_key, bin_path) log.info("TensorRT engine execution and benchmark successfully requested") poll_run_benchmark(self.project_id, benchmark_key) exp_info = get_run_info(self.project_id, self.baseline_name, self.experiment_name) assert exp_info is not None if self.is_baseline: log.info( "Latency\n" f"\t\tBaseline - {exp_info['latency']} on {exp_info['device_name']}\n" "\t\tConfigure the quantization settings located at " f"{OWLITE_FRONT_BASE_URL}/project/detail/{self.project_id}" ) else: log.info( "Latency\n" f"\t\tConfigured - {exp_info['latency']} on {exp_info['device_name']}\n" "\t\tRetrieve the specifics of the experiment at " f"{OWLITE_FRONT_BASE_URL}/project/detail/{self.project_id}" ) engine_path = os.path.join( OWLITE_REPO_PATH, self.project_name, self.baseline_name, self.experiment_name, f"{self.project_name}_{self.baseline_name}_{self.experiment_name}.engine", ) download_trt_engine(benchmark_key, engine_path) def log(self, **kwargs) -> None: """Logs the model's metrics. Notes: Log metrics with OwLite like below ... owl = owlite.init(...) ... owl.log(accuracy=0.72, loss=1.2) Raises: TypeError: When data is not JSON serializable. """ try: logs = json.dumps(kwargs) except TypeError as e: log.error("Data is not JSON serializable") raise e update_run_info(self.project_id, self.baseline_name, self.experiment_name, logs) # pylint: disable-next=too-many-branches def init( project: str, baseline: str, experiment: Optional[str] = None, duplicate_from: Optional[str] = None, description: str = "", onnx_export_options: Optional[ONNXExportOptions] = None, ) -> OwLite: """Sets project, baseline and experiment information in DB to proper state and creates `OwLite` instance. Args: project (str): OwLite project name. baseline (str): OwLite baseline name. experiment (str, optional): OwLite experiment name. Defaults to None. duplicate_from (str, optional): OwLite source experiment name. Defaults to None. description (str, optional): OwLite project description. Defaults to "". onnx_export_options (ONNXExportOptions, optional): Options for ONNX export. Defaults to None. Raises: RuntimeError: When not authenticated. ValueError: When invalid experiment name or baseline name is given. Returns: OwLite: Created `OwLite` instance. """ if OWLITE_SETTINGS.tokens is None: log.error("Please log in using 'owlite login'. Account not found on this device") raise RuntimeError("OwLite token not found") if OWLITE_DEVICE_NAME is None: log.warning("Connected device not found. Please connect device by 'owlite device connect --name (name)'") else: log.info(f"Connected device: {OWLITE_DEVICE_NAME}") if experiment == baseline: log.error(f"Experiment name '{baseline}' is reserved for baseline. Please try with a different experiment name") raise ValueError("Invalid experiment name") dir_path = os.path.join( OWLITE_REPO_PATH, project, baseline, experiment or baseline, ) if os.path.exists(dir_path): log.warning(f"Existing local directory found at {dir_path}. Continuing this code will overwrite the data") else: os.makedirs(dir_path, exist_ok=True) log.info(f"Experiment data will be saved in {dir_path}") # create or load project project_id = create_or_load_project(project, description) if experiment is None: if duplicate_from: log.warning(f"duplicate_from='{duplicate_from}' will be ignored as no value for experiment was provided") created_baseline = create_baseline(project_id, baseline) if created_baseline != baseline: log.warning( f"A baseline '{baseline}' already exists. " f"Created a new baseline '{created_baseline}' at project '{project}'" ) baseline = created_baseline else: log.info(f"Created new baseline '{baseline}' at project '{project}'") else: if not check_baseline_existence(project_id, baseline): log.error(f"Baseline '{baseline}' not found. Please verify the entered baseline name and try again") raise ValueError("Invalid baseline name") if duplicate_from: experiment = copy_run(project_id, baseline, duplicate_from, experiment) log.info( f"Copied compression configuration from the experiment '{duplicate_from}' " f"to the new experiment '{experiment}'" ) exp_info = get_run_info(project_id, baseline, experiment) if exp_info is None:
create_run(project_id, baseline, experiment)
14
2023-12-08 06:41:50+00:00
16k
qitan/devops-backend-lite
apps/ucenter/views.py
[ { "identifier": "FEISHU_SYNC_USER_JOB_CACHE_KEY", "path": "common/variables.py", "snippet": "FEISHU_SYNC_USER_JOB_CACHE_KEY = 'celery_job:feishu_user_sync'" }, { "identifier": "Menu", "path": "dbapp/models.py", "snippet": "" }, { "identifier": "CustomModelViewSet", "path": "common/extends/viewsets.py", "snippet": "class CustomModelViewSet(viewsets.ModelViewSet):\n \"\"\"\n A viewset that provides default `create()`, `retrieve()`, `update()`,\n `partial_update()`, `destroy()` and `list()` actions.\n \"\"\"\n\n def get_permission_from_role(self, request):\n try:\n perms = request.user.roles.values(\n 'permissions__method',\n ).distinct()\n return [p['permissions__method'] for p in perms]\n except AttributeError:\n return []\n\n def extend_filter(self, queryset):\n return queryset\n\n def get_queryset(self):\n \"\"\"\n Get the list of items for this view.\n This must be an iterable, and may be a queryset.\n Defaults to using `self.queryset`.\n\n This method should always be used rather than accessing `self.queryset`\n directly, as `self.queryset` gets evaluated only once, and those results\n are cached for all subsequent requests.\n\n You may want to override this if you need to provide different\n querysets depending on the incoming request.\n\n (Eg. return a list of items that is specific to the user)\n \"\"\"\n assert self.queryset is not None, (\n \"'%s' should either include a `queryset` attribute, \"\n \"or override the `get_queryset()` method.\"\n % self.__class__.__name__\n )\n queryset = self.extend_filter(self.queryset)\n if isinstance(queryset, QuerySet):\n # Ensure queryset is re-evaluated on each request.\n queryset = queryset.all()\n return queryset.distinct()\n\n @action(methods=['GET'], url_path='count', detail=False)\n def count(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n return Response({'code': 20000, 'data': queryset.count()})\n\n def create(self, request, *args, **kwargs):\n try:\n request.data['name'] = request.data['name'].strip(\n ' ').replace(' ', '-')\n except BaseException as e:\n print('exception ', str(e))\n serializer = self.get_serializer(data=request.data)\n if not serializer.is_valid():\n return Response({'code': 40000, 'status': 'failed', 'message': serializer.errors})\n try:\n self.perform_create(serializer)\n except BaseException as e:\n return Response({'code': 50000, 'status': 'failed', 'message': str(e)})\n log_audit(request, action_type=self.serializer_class.Meta.model.__name__, action='创建', content='',\n data=serializer.data)\n\n data = {'data': serializer.data, 'status': 'success', 'code': 20000}\n return Response(data)\n\n def list(self, request, pk=None, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset())\n page_size = request.query_params.get('page_size')\n pagination.PageNumberPagination.page_size = page_size\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = self.get_serializer(queryset, many=True)\n data = {'data': {'total': queryset.count(), 'items': serializer.data},\n 'code': 20000, 'status': 'success'}\n return Response(data)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n partial = kwargs.pop('partial', False)\n try:\n request.data['name'] = request.data['name'].strip(\n ' ').replace(' ', '-')\n except BaseException as e:\n logger.warning(f'不包含name字段: {str(e)}')\n serializer = self.get_serializer(\n instance, data=request.data, partial=partial)\n if not serializer.is_valid():\n return Response({'code': 40000, 'status': 'failed', 'message': str(serializer.errors)})\n try:\n self.perform_update(serializer)\n except BaseException as e:\n logger.exception(f'更新失败,原因:{e}')\n return Response({'code': 50000, 'status': 'failed', 'message': str(e)})\n\n if getattr(instance, '_prefetched_objects_cache', None):\n # If 'prefetch_related' has been applied to a queryset, we need to\n # forcibly invalidate the prefetch cache on the instance.\n instance._prefetched_objects_cache = {}\n\n log_audit(request, self.serializer_class.Meta.model.__name__, '更新', content=f\"更新对象:{instance}\",\n data=serializer.data, old_data=self.serializer_class(instance).data)\n\n data = {'data': serializer.data, 'status': 'success', 'code': 20000}\n return Response(data)\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n data = {'data': serializer.data, 'code': 20000, 'status': 'success'}\n return Response(data)\n\n def destroy(self, request, *args, **kwargs):\n \"\"\"\n TODO: 删除操作物理删除 or 逻辑删除(增加删除标记字段)\n \"\"\"\n instance = self.get_object()\n try:\n self.perform_destroy(instance)\n except ProtectedError:\n # 存在关联数据,不可删除\n return Response({'code': 50000, 'status': 'failed', 'message': '存在关联数据,禁止删除!'})\n except BaseException as e:\n logger.exception(f'删除数据发生错误 {e}, {e.__class__}')\n return Response({'code': 50000, 'status': 'failed', 'message': f'删除异常: {str(e)}'})\n log_audit(request, self.serializer_class.Meta.model.__name__,\n '删除', content=f\"删除对象:{instance}\")\n\n return Response({'code': 20000, 'status': 'success', 'msg': ''})" }, { "identifier": "CustomModelParentViewSet", "path": "common/extends/viewsets.py", "snippet": "class CustomModelParentViewSet(CustomModelViewSet):\n\n def get_queryset(self):\n assert self.queryset is not None, (\n \"'%s' should either include a `queryset` attribute, \"\n \"or override the `get_queryset()` method.\"\n % self.__class__.__name__\n )\n queryset = self.extend_filter(self.queryset)\n if self.action == 'list':\n if not self.request.query_params.get('search'):\n queryset = queryset.filter(parent__isnull=True)\n if isinstance(queryset, QuerySet):\n queryset = queryset.all()\n return queryset.distinct()" }, { "identifier": "RbacPermission", "path": "common/extends/permissions.py", "snippet": "class RbacPermission(BasePermission):\n \"\"\"\n 自定义权限\n \"\"\"\n\n @classmethod\n def check_is_admin(cls, request):\n return request.user.is_authenticated and request.user.roles.filter(name='管理员').count() > 0\n\n @classmethod\n def get_permission_from_role(cls, request):\n try:\n perms = request.user.roles.values(\n 'permissions__method',\n ).distinct()\n return [p['permissions__method'] for p in perms]\n except AttributeError:\n return []\n\n def _has_permission(self, request, view):\n \"\"\"\n :return:\n \"\"\"\n _method = request._request.method.lower()\n platform = get_redis_data('platform')\n url_whitelist = platform['whitelist'] if platform else []\n url_whitelist.extend(\n [{'url': '/api/login/feishu/'}, {'url': '/api/login/gitlab/'}])\n path_info = request.path_info\n for item in url_whitelist:\n url = item['url']\n if url in path_info:\n logger.debug(f'请求地址 {path_info} 命中白名单 {url}, 放行')\n return True\n\n from_workflow = 'from_workflow' in request.GET\n if _method == 'get' and from_workflow:\n return True\n\n is_superuser = request.user.is_superuser\n if is_superuser:\n return True\n\n is_admin = RbacPermission.check_is_admin(request)\n perms = self.get_permission_from_role(request)\n if not is_admin and not perms:\n logger.debug(f'用户 {request.user} 不是管理员 且 权限列表为空, 直接拒绝')\n return False\n\n perms_map = view.perms_map\n\n action = view.action\n _custom_method = f'{_method}_{action}'\n for i in perms_map:\n for method, alias in i.items():\n if is_admin and (method == '*' and alias[0] == 'admin'):\n return True\n if method == '*' and alias[0] in perms:\n return True\n if _custom_method and alias[0] in perms and (_custom_method == method or method == f'*_{action}'):\n return True\n if _method == method and alias[0] in perms:\n return True\n return False\n\n def has_permission(self, request, view):\n res = self._has_permission(request, view)\n # 记录权限异常的操作\n if not res:\n AuditLog.objects.create(\n user=request.user, type='', action='拒绝操作',\n action_ip=user_ip(request),\n content=f\"请求方法:{request.method},请求路径:{request.path},UserAgent:{request.META['HTTP_USER_AGENT']}\",\n data='',\n old_data=''\n )\n return res" }, { "identifier": "CustomInvalidToken", "path": "common/extends/JwtAuth.py", "snippet": "class CustomInvalidToken(InvalidToken):\n status_code = status.HTTP_401_UNAUTHORIZED\n default_detail = 'Token不合法或者已经过期.'\n default_code = 40100" }, { "identifier": "TokenObtainPairSerializer", "path": "common/extends/JwtAuth.py", "snippet": "class TokenObtainPairSerializer(BaseTokenObtainPairSerializer):\n\n default_error_messages = {\n \"no_active_account\": \"用户名或者密码错误!\"\n }\n\n @classmethod\n def get_token(cls, user):\n token = RefreshToken.for_user(user)\n return token" }, { "identifier": "TokenRefreshSerializer", "path": "common/extends/JwtAuth.py", "snippet": "class TokenRefreshSerializer(BaseTokenRefreshSerializer):\n\n def validate(self, attrs):\n refresh = RefreshToken(attrs['refresh'])\n data = {'access': str(refresh.access_token)}\n\n if api_settings.ROTATE_REFRESH_TOKENS:\n if api_settings.BLACKLIST_AFTER_ROTATION:\n try:\n # Attempt to blacklist the given refresh token\n refresh.blacklist()\n except AttributeError:\n # If blacklist app not installed, `blacklist` method will\n # not be present\n pass\n\n refresh.set_jti()\n refresh.set_exp()\n\n data['refresh'] = str(refresh)\n\n return data" }, { "identifier": "log_audit", "path": "common/extends/handler.py", "snippet": "def log_audit(request, action_type, action, content=None, data=None, old_data=None, user=None):\n if user is None:\n user = request.user.first_name or request.user.username\n\n AuditLog.objects.create(user=user, type=action_type, action=action,\n action_ip=user_ip(request),\n content=f\"{mask_sensitive_data(content)}\\n请求方法:{request.method},请求路径:{request.path},UserAgent:{request.META['HTTP_USER_AGENT']}\",\n data=mask_sensitive_data(data),\n old_data=mask_sensitive_data(old_data))" }, { "identifier": "AuditLogFilter", "path": "common/extends/filters.py", "snippet": "class AuditLogFilter(FilterSet):\n exclude = ExcludeFilter(field_name='type', lookup_expr='in', exclude=True)\n type = CharFilter(field_name='type')\n\n class Meta:\n models = AuditLog\n fields = ['type', 'exclude']" }, { "identifier": "CustomSearchFilter", "path": "common/extends/filters.py", "snippet": "class CustomSearchFilter(SearchFilter):\n\n def get_search_fields(self, view, request):\n \"\"\"\n Search fields are obtained from the view, but the request is always\n passed to this method. Sub-classes can override this method to\n dynamically change the search fields based on request content.\n \"\"\"\n if hasattr(view, 'get_search_fields'):\n return view.get_search_fields()\n return getattr(view, 'search_fields', None)\n\n def get_search_terms(self, request):\n \"\"\"\n Search terms are set by a ?search=... query parameter,\n and may be comma and/or whitespace delimited.\n \"\"\"\n params = request.query_params.get(self.search_param, '')\n params = params.replace('\\x00', '') # strip null characters\n values = params.strip('+').split('+')\n if len(values) > 1:\n return values, 1\n params = params.replace(',', ' ')\n params = params.replace('|', ' ')\n return params.split(), 0\n\n def filter_queryset(self, request, queryset, view):\n search_fields = self.get_search_fields(view, request)\n search_param = self.get_search_terms(request)\n search_terms = search_param[0]\n search_condition = search_param[1]\n if not search_fields or not search_terms:\n return queryset\n\n orm_lookups = [\n self.construct_search(str(search_field))\n for search_field in search_fields\n ]\n\n base = queryset\n conditions = []\n for search_term in search_terms:\n queries = [\n models.Q(**{orm_lookup: search_term.strip()})\n for orm_lookup in orm_lookups\n ]\n conditions.append(reduce(operator.or_, queries))\n if search_condition == 1:\n queryset = queryset.filter(reduce(operator.and_, conditions))\n else:\n queryset = queryset.filter(reduce(operator.or_, conditions))\n\n if self.must_call_distinct(queryset, search_fields):\n # Filtering against a many-to-many field requires us to\n # call queryset.distinct() in order to avoid duplicate items\n # in the resulting queryset.\n # We try to avoid this if possible, for performance reasons.\n queryset = distinct(queryset, base)\n return queryset" }, { "identifier": "GlueJenkins", "path": "common/utils/JenkinsAPI.py", "snippet": "class GlueJenkins(Jenkins):\n\n def __init__(self, url=None, username=None, password=None):\n self.__url = url\n self.__username = username\n self.__password = password\n super(GlueJenkins, self).__init__(\n self.__url, self.__username, self.__password)\n\n def _get_encoded_params(self, params):\n for k, v in params.items():\n if k in [\"name\", \"msg\", \"short_name\", \"from_short_name\",\n \"to_short_name\", \"folder_url\", \"from_folder_url\", \"to_folder_url\"]:\n params[k] = quote(v.encode('utf8'))\n return params\n\n def _build_url(self, format_spec, variables=None):\n\n if variables:\n url_path = format_spec % self._get_encoded_params(variables)\n else:\n url_path = format_spec\n return str(urljoin(self.server, url_path))\n\n def assert_credential_exists(self, name, folder_name=None, domain_name='_',\n exception_message='credential[%s] does not exist.'):\n '''Raise an exception if credential does not exist in domain of folder\n\n :param name: Name of credential, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :param exception_message: Message to use for the exception.\n Formatted with ``name``, ``domain_name``,\n and ``folder_name``\n :throws: :class:`JenkinsException` whenever the credentail\n does not exist in domain of folder\n '''\n if not self.credential_exists(name, folder_name, domain_name):\n raise JenkinsException(exception_message\n % name)\n\n def get_credential_global_config(self, name, domain_name='_'):\n '''Get configuration of credential in domain of folder.\n :param name: Name of credentail, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Credential configuration (XML format)\n '''\n return self.jenkins_open(requests.Request(\n 'GET', self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n ))\n\n def get_credential_info(self, name, folder_name=None, domain_name='_'):\n '''Get credential information dictionary in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: folder_name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Dictionary of credential info, ``dict``\n '''\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(CREDENTIAL_INFO_GLOBAL, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('credential[%s] does not exist.' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('credential[%s] does not exist.' % name)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for credential[%s].' % name\n )\n\n def credential_exists(self, name, folder_name=None, domain_name='_'):\n '''Check whether a credentail exists in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: ``True`` if credentail exists, ``False`` otherwise\n '''\n try:\n return self.get_credential_info(name)['id'] == name\n except JenkinsException:\n return False\n\n def create_credential_global(self, name=None, user=None, password=None, secret=None, comment=None, domain_name='_'):\n '''Create credentail in domain of folder\n\n :param name: username\n :param password: password\n :param comment: comment, ``str``\n :param config_xml: New XML configuration, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n '''\n st = shortuuid.ShortUUID()\n st.set_alphabet(\n f\"0123456789{''.join([chr(i) for i in range(ord('a'), ord('z') + 1)])}\")\n if name is None:\n name = '-'.join(['api', st.random(length=8),\n st.random(length=4), st.random(length=12)])\n config_xml = '''<com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <username>%s</username>\n <password>%s</password>\n</com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>''' % (name, comment, user, password)\n if user is None:\n config_xml = '''<org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <secret>%s</secret>\n</org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>''' % (name, comment, secret)\n if self.credential_exists(name):\n raise JenkinsException('credential[%s] already exists.' % name)\n\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_CREDENTIAL_GLOBAL, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n self.assert_credential_exists(\n name, exception_message='create credential[%s] failed.')\n return {'status': 0, 'data': name}\n\n def reconfig_credential_global(self, name, user=None, password=None, secret=None, comment=None, domain_name='_'):\n \"\"\"\n Reconfig credential with new config in domain of folder\n :param name: name, ``str``\n :param user:\n :param password:\n :param secret:\n :param comment:\n :param domain_name: Domain name, default is '_', ``str``\n :return:\n \"\"\"\n reconfig_url = self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n config_xml = self.get_credential_global_config(name)\n xml_dict = xmltodict.parse(config_xml)\n if user is None:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['secret'] = secret\n if comment:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['description'] = comment\n else:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['username'] = user\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['password'] = password\n if comment:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl'][\n 'description'] = comment\n config_xml = xmltodict.unparse(xml_dict, pretty=True)\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def create_job(self, name, config_xml):\n '''Create a new Jenkins job\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: config file text, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n if self.job_exists(name):\n raise JenkinsException('job[%s] already exists' % (name))\n\n try:\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_JOB, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n except NotFoundException:\n raise JenkinsException('Cannot create job[%s] because folder '\n 'for the job does not exist' % (name))\n self.assert_job_exists(name, 'create[%s] failed')\n\n def reconfig_job(self, name, config_xml):\n '''Change configuration of existing Jenkins job.\n\n To create a new job, see :meth:`Jenkins.create_job`.\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: New XML configuration, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n reconfig_url = self._build_url(CONFIG_JOB, locals())\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def get_stage_describe(self, name, number, node_number):\n \"\"\" 获取 单个stage 详情 \"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_DES, locals())\n ))\n\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_logs(self, name, number, node_number):\n \"\"\" 获取 stage 执行日志\"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_LOG, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_info(self, name, number, depth=0):\n\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_INFO, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_flow_detail(self, job_name, build_number):\n stage_data = self.get_stage_info(name=job_name, number=build_number)\n stages = stage_data.get('stages')\n for i in stages:\n logs = ''\n try:\n # 获取stage返回信息\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(i['_links']['self']['href']), locals())\n ))\n if response:\n res = json.loads(response)\n for j in res['stageFlowNodes']:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(j['_links']['log']['href']), locals())\n ))\n res = json.loads(response)\n try:\n # 移除href html信息,保留链接文字\n import re\n pat = re.compile('<a href[^>]*>')\n logs = logs + '\\n' + \\\n pat.sub('', res['text'].replace('</a>', ''))\n except:\n pass\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (job_name, build_number)\n )\n\n stage_data[\"stages\"][stages.index(i)]['logs'] = logs\n return stage_data\n\n def get_queue_item(self, number, depth=0):\n '''Get information about a queued item (to-be-created job).\n\n The returned dict will have a \"why\" key if the queued item is still\n waiting for an executor.\n\n The returned dict will have an \"executable\" key if the queued item is\n running on an executor, or has completed running. Use this to\n determine the job number / URL.\n\n :param name: queue number, ``int``\n :returns: dictionary of queued information, ``dict``\n '''\n url = self._build_url(Q_ITEM, locals())\n try:\n response = self.jenkins_open(requests.Request('GET', url))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('queue number[%d] does not exist'\n % number)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('queue number[%d] does not exist' % number)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for queue number[%d]' % number\n )\n\n def build_job(self, name, parameters=None, token=None):\n '''Trigger build job.\n\n This method returns a queue item number that you can pass to\n :meth:`Jenkins.get_queue_item`. Note that this queue number is only\n valid for about five minutes after the job completes, so you should\n get/poll the queue information as soon as possible to determine the\n job's URL.\n\n :param name: name of job\n :param parameters: parameters for job, or ``None``, ``dict``\n :param token: Jenkins API token\n :returns: ``int`` queue item\n '''\n response = self.jenkins_request(requests.Request(\n 'POST', self.build_job_url(name, parameters, token)))\n\n if 'Location' not in response.headers:\n raise EmptyResponseException(\n \"Header 'Location' not found in \"\n \"response from server[%s]\" % self.server)\n\n location = response.headers['Location']\n if location.endswith('/'):\n location = location[:-1]\n parts = location.split('/')\n number = int(parts[-1])\n return number\n\n def get_job_config(self, name):\n '''Get configuration of existing Jenkins job.\n\n :param name: Name of Jenkins job, ``str``\n :returns: job configuration (XML format)\n '''\n folder_url, short_name = self._get_job_folder(name)\n request = requests.Request(\n 'GET', self._build_url(CONFIG_JOB, locals()))\n return self.jenkins_open(request)\n\n def get_job_info(self, name, depth=0, fetch_all_builds=False):\n '''Get job information dictionary.\n\n :param name: Job name, ``str``\n :param depth: JSON depth, ``int``\n :param fetch_all_builds: If true, all builds will be retrieved\n from Jenkins. Otherwise, Jenkins will\n only return the most recent 100\n builds. This comes at the expense of\n an additional API call which may\n return significant amounts of\n data. ``bool``\n :returns: dictionary of job information\n '''\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(JOB_INFO, locals())\n ))\n if response:\n if fetch_all_builds:\n return self._add_missing_builds(json.loads(response))\n else:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] does not exist' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] does not exist' % name)\n except ValueError:\n raise JenkinsException(\n \"Could not parse JSON info for job[%s]\" % name)" }, { "identifier": "user_ip", "path": "common/get_ip.py", "snippet": "def user_ip(request):\n \"\"\"\n 获取用户真实IP\n :param request:\n :return:\n \"\"\"\n if 'X-Real-IP' in request.META:\n return request.META['X-Real-IP']\n if 'HTTP_X_FORWARDED_FOR' in request.META:\n return request.META['HTTP_X_FORWARDED_FOR'].split(',')[0]\n if 'REMOTE_ADDR' in request.META:\n return request.META['REMOTE_ADDR'].split(',')[0]" }, { "identifier": "ThirdPartyUser", "path": "common/ext_fun.py", "snippet": "class ThirdPartyUser(object):\n\n def get_user(self):\n user = UserProfile.objects.get_or_create(username='thirdparty')[0]\n self.set_permission(user, self.get_role())\n return user\n\n def get_role(self):\n return Role.objects.get_or_create(name='thirdparty')[0]\n\n def get_perm(self):\n return Permission.objects.get_or_create(name='Jenkins回调', method='jenkins_callback')[0]\n\n def set_permission(self, user, role):\n role.permissions.set([self.get_perm().id])\n user.roles.set([role.id])" }, { "identifier": "set_redis_data", "path": "common/ext_fun.py", "snippet": "def set_redis_data(name, config):\n cache.set(f\"system:{name}\", config, None)" }, { "identifier": "get_redis_data", "path": "common/ext_fun.py", "snippet": "def get_redis_data(name):\n ret = cache.get(f\"system:{name}\")\n if not ret:\n try:\n if name == 'cicd-harbor':\n qs = SystemConfig.objects.filter(type=name)[0]\n else:\n qs = SystemConfig.objects.get(name=name)\n except BaseException as e:\n return None\n ret = json.loads(qs.config)\n set_redis_data(name, ret)\n\n return ret" }, { "identifier": "timeline_generate", "path": "common/ext_fun.py", "snippet": "def timeline_generate(time_range, format_type='dashboard'):\n \"\"\"\n 根据起始时间生成时间线\n\n : params format_type: 默认为dashboard, 用于概览报表粗略显示, 其它用于监控类的展示则使用更细粒度的格式\n \"\"\"\n TIME_FREQNAMES = DASHBOARD_TIME_FREQNAMES\n TIME_FORMAT = DASHBOARD_TIME_FORMAT\n if format_type == 'cmdb':\n TIME_FREQNAMES = DASHBOARD_TIME_FREQNAMES_T\n TIME_FORMAT = DASHBOARD_TIME_FORMAT_T\n start_time = time_range['start_time']\n end_time = time_range['end_time']\n time_line = rrule(\n freq=TIME_FREQNAMES[time_range['name']], dtstart=start_time, until=end_time)\n return [i.strftime(TIME_FORMAT[time_range['name']]) for i in time_line]" }, { "identifier": "time_period", "path": "common/ext_fun.py", "snippet": "def time_period(time_range='6-months', type_range='static', time_zone='Asia/Shanghai', name=None):\n \"\"\"\n 根据时间范围生成起止时间\n \"\"\"\n start_time = None\n end_time = timezone.now().astimezone(pytz.timezone(time_zone))\n if type_range == 'dynamic' and name is None:\n start_time = datetime.strptime(time_range[0], '%Y-%m-%d %H:%M:%S')\n end_time = datetime.strptime(time_range[1], '%Y-%m-%d %H:%M:%S')\n if start_time > end_time:\n start_time, end_time = end_time, start_time\n if (end_time - start_time).days >= 60:\n name = 'months'\n elif (end_time - start_time).days >= 2:\n name = 'days'\n elif (end_time - start_time).days >= 1 or (end_time - start_time).seconds > 60 * 60:\n name = 'hours'\n else:\n name = 'minutes'\n return {'name': name, 'start_time': start_time, 'end_time': end_time}\n\n if type_range == 'static':\n _time = time_range.split('-')\n if _time[-1] == 'week':\n start_time = end_time - relativedelta(days=end_time.weekday(), hours=end_time.hour, minutes=end_time.minute,\n seconds=end_time.second,\n microseconds=end_time.microsecond)\n return {'name': 'days', 'start_time': start_time, 'end_time': end_time}\n if _time[-1] == 'lastweek':\n start_time = end_time - relativedelta(days=end_time.weekday() + 7, hours=end_time.hour,\n minutes=end_time.minute, seconds=end_time.second,\n microseconds=end_time.microsecond)\n end_time = end_time - relativedelta(days=end_time.weekday(), hours=end_time.hour, minutes=end_time.minute,\n seconds=end_time.second, microseconds=end_time.microsecond)\n return {'name': 'days', 'start_time': start_time, 'end_time': end_time}\n if _time[-1] in ['today', 'yesterday']:\n start_time = end_time - relativedelta(hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second,\n microseconds=end_time.microsecond)\n if _time[-1] == 'yesterday':\n end_time = start_time\n start_time = end_time - relativedelta(days=1)\n return {'name': 'hours', 'start_time': start_time, 'end_time': end_time}\n name = _time[1]\n if name is None:\n if _time[1] in ['years', 'months']:\n name = 'months'\n if _time[1] == 'months' and int(_time[0]) < 2:\n name = 'days'\n if _time[1] == 'days' and int(_time[0]) < 2:\n name = 'hours'\n start_time = end_time + relativedelta(**{_time[1]: -int(_time[0])})\n return {'name': name, 'start_time': start_time, 'end_time': end_time}" }, { "identifier": "node_filter", "path": "common/ext_fun.py", "snippet": "def node_filter(node_id, data):\n \"\"\"\n 查找节点\n\n :params: node_id int 节点ID\n :params: data list 节点数组\n \"\"\"\n for i in data:\n if i['id'] == node_id:\n print('get node', i)\n return i\n else:\n if i.get('children', None):\n node = node_filter(node_id, i['children'])\n if isinstance(node, (dict,)):\n return node" }, { "identifier": "test_notify", "path": "qtasks/tasks.py", "snippet": "def test_notify(receiver, notify_type='mail', robot_name=None, robot_webhook=None, robot_key=None,\n robot_type='dingtalk'):\n ret = None\n if notify_type == 'mail':\n mail_send = OmsMail()\n ret = mail_send.test_notify(receiver)\n if notify_type == 'robot':\n robot_notify = ROBOT_CATEGORIES[robot_type](robot_webhook, robot_key)\n ret = robot_notify.test_notify(receiver, robot_name)\n\n return ret" } ]
import hashlib import django_filters import datetime import time import shortuuid import json import logging from django.core.cache import cache from rest_framework import viewsets, status from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.decorators import action from rest_framework import pagination from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView from rest_framework_simplejwt.exceptions import TokenError, InvalidToken from rest_framework_simplejwt.authentication import JWTAuthentication from rest_framework_simplejwt.tokens import RefreshToken, Token, OutstandingToken from rest_framework.filters import SearchFilter, OrderingFilter from django_q.tasks import async_task, result from django.contrib.auth.models import update_last_login from django.db.models import Q from django.contrib.auth import logout from common.variables import FEISHU_SYNC_USER_JOB_CACHE_KEY from dbapp.models import Menu, Permission, Role, Organization, UserProfile, AuditLog, SystemConfig, DataDict from ucenter.serializers import MenuSerializers, MenuListSerializers, PermissionListSerializers, PermissionSerializers, \ RoleListSerializers, \ RoleSerializers, OrganizationSerializers, \ UserProfileListSerializers, UserProfileSerializers, UserProfileDetailSerializers, AuditLogSerializers, \ AuditLogActivitySerializers, SystemConfigSerializers, \ SystemConfigListSerializers, DataDictSerializers from common.extends.viewsets import CustomModelViewSet, CustomModelParentViewSet from common.extends.permissions import RbacPermission from common.extends.JwtAuth import CustomInvalidToken, TokenObtainPairSerializer, TokenRefreshSerializer from common.extends.handler import log_audit from common.extends.filters import AuditLogFilter, CustomSearchFilter from common.utils.JenkinsAPI import GlueJenkins from common.get_ip import user_ip from common.ext_fun import ThirdPartyUser, set_redis_data, get_redis_data, timeline_generate, time_period, \ node_filter from qtasks.tasks import test_notify from django.conf import settings from django.contrib.auth import login, REDIRECT_FIELD_NAME from django.views.decorators.csrf import csrf_exempt, csrf_protect from django.views.decorators.cache import never_cache
11,926
def update(self, request, *args, **kwargs): instance = self.queryset.get(username=request.user) instance.__dict__.update(**request.data) instance.save() log_audit(request, self.serializer_class.Meta.model.__name__, '更新用户信息', '', data=self.serializer_class(instance).data, old_data=self.serializer_class(instance).data) data = {'data': '更新成功', 'status': 'success', 'code': 20000} return Response(data) def menu_sort(self, menus): """ 菜单排序 sort值越小越靠前 :param menus: :return: """ for menu in menus: try: if menu['children']: self.menu_sort(menu['children']) except KeyError: pass try: menus.sort(key=lambda k: (k.get('sort'))) except: pass return menus @action(methods=['GET'], url_path='info', detail=False) def info(self, request): """ 获取用户信息 :param request: :return: """ serializer = self.get_serializer(request.user) data = serializer.data data.pop('password', None) data.pop('routers', None) data['roles'] = ['超级管理员'] if request.user.is_superuser else [ i['name'] for i in data['user_roles']] return Response({'code': 20000, 'data': data}) @action(methods=['GET'], url_path='menus', detail=False) def menus(self, request): """ 获取用户菜单 :param request: :return: """ serializer = self.get_serializer(request.user) data = serializer.data routers = data['routers'] routers = self.menu_sort(routers) data = {'data': {'routers': routers}, 'code': 20000, 'status': 'success'} return Response(data) @action(methods=['GET'], url_path='activity', detail=False, queryset=AuditLog.objects.all()) def user_activity(self, request): page_size = request.query_params.get('page_size') pagination.PageNumberPagination.page_size = page_size queryset = self.filter_queryset( self.get_queryset().filter(Q(user=request.user.first_name) | Q(user=request.user.username))) page = self.paginate_queryset(queryset) if page is not None: serializer = self.get_serializer(page, many=True) return self.get_paginated_response(serializer.data) serializer = self.get_serializer(queryset, many=True) data = {'data': {'total': queryset.count(), 'items': serializer.data}, 'code': 20000, 'status': 'success'} return Response(data) class SystemConfigViewSet(CustomModelViewSet): """ 系统设置视图 ### 系统设置权限 {'*': ('system_all', '系统设置管理')}, {'get': ('system_list', '查看系统设置')}, """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('system_all', '系统设置管理')}, {'get': ('system_list', '查看系统设置')}, ) queryset = SystemConfig.objects.all() serializer_class = SystemConfigSerializers filter_backends = ( django_filters.rest_framework.DjangoFilterBackend, SearchFilter, OrderingFilter) filter_fields = ('name', 'type') search_fields = ('name', 'type') def get_serializer_class(self): if self.action in ['list', 'retrieve']: return SystemConfigListSerializers return SystemConfigSerializers @staticmethod def set_credit(jenkins_cli, name, user=None, password=None, secret=None, comment=None): try: credit = jenkins_cli.credential_exists(name) if credit: jenkins_cli.reconfig_credential_global(name=name, user=user, password=password, secret=secret, comment=comment) else: jenkins_cli.create_credential_global(name=name, user=user, password=password, secret=secret, comment=comment) except BaseException as e: print('err: ', str(e)) def create(self, request, *args, **kwargs): if request.data['type'] == 'thirdparty': # 生成token给第三方访问 expired_time = self.request.data['config']['expired_time'] seconds = int(expired_time / 1000 - time.time())
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : [email protected] @Time : 2020/9/15 下午4:08 @FileName: views.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') DEFAULT_SESSION_TIMEOUT = None class DataDictViewSet(CustomModelParentViewSet): """ 数据字典视图 ### 数据字典权限 {'*': ('data_all', '数据字典管理')}, {'get': ('data_list', '查看数据字典')}, {'post': ('data_create', '创建数据字典')}, {'put': ('data_edit', '编辑数据字典')}, {'patch': ('data_edit', '编辑数据字典')}, {'delete': ('data_delete', '删除数据字典')} """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('data_all', '数据字典管理')}, {'get': ('data_list', '查看数据字典')}, {'post': ('data_create', '创建数据字典')}, {'put': ('data_edit', '编辑数据字典')}, {'patch': ('data_edit', '编辑数据字典')}, {'delete': ('data_delete', '删除数据字典')} ) queryset = DataDict.objects.all() serializer_class = DataDictSerializers filter_backends = ( django_filters.rest_framework.DjangoFilterBackend, SearchFilter, OrderingFilter) filter_fields = ('key', 'value') search_fields = ('key', 'value') def perform_update(self, serializer): serializer.save() cache.delete(f"datadict:{serializer.data['key']}:0") cache.delete(f"datadict:{serializer.data['key']}:1") @action(methods=['GET'], url_path='user', detail=False) def get_user(self, request): """ 获取用户列表 ### 传递参数 force: 0|1 force为1时强制刷新 """ _force = request.query_params.get('force', None) position = request.query_params.get('position', None) _key = str( f'project:users:{self.request.user.id}-{self.request.query_params}') try: data = cache.get(_key) except BaseException as e: cache.delete(_key) data = None if not data or _force: if position: users = UserProfile.objects.exclude( username='thirdparty').filter(position=position) else: users = UserProfile.objects.exclude(username='thirdparty') data = [{'id': i.id, 'first_name': i.first_name, 'username': i.username, 'name': i.name, 'title': i.title, 'position': i.position} for i in users] cache.set(_key, data, timeout=60 * 60 * 24) return Response({'code': 20000, 'data': data}) @action(methods=['GET'], url_path='extra', detail=False) def get_by_key(self, request): """ 通过指定key名获取 参数: key """ key_name = request.query_params.get('key', None) instance = self.queryset.get(key=key_name) serializer = self.get_serializer(instance) data = {'data': serializer.data, 'code': 20000, 'status': 'success'} return Response(data) class AuditLogViewSet(CustomModelViewSet): """ 审计日志视图 ### 审计日志权限 {'get': ('audit_list', '查看审计日志')} """ perms_map = ( {'*': ('admin', '管理员')}, {'get': ('audit_list', '查看审计日志')} ) queryset = AuditLog.objects.all() serializer_class = AuditLogSerializers filter_backends = (django_filters.rest_framework.DjangoFilterBackend, CustomSearchFilter, OrderingFilter) filter_class = AuditLogFilter filter_fields = ('user', 'type', 'action', 'action_ip', 'operator') search_fields = ('user', 'type', 'action', 'action_ip', 'content') def create(self, request, *args, **kwargs): pass def update(self, request, *args, **kwargs): pass def destroy(self, request, *args, **kwargs): pass class MenuViewSet(CustomModelParentViewSet): """ 菜单视图 ### 菜单权限 {'*': ('menu_all', '菜单管理')}, {'get': ('menu_list', '查看菜单')}, {'post': ('menu_create', '创建菜单')}, {'put': ('menu_edit', '编辑菜单')}, {'patch': ('menu_edit', '编辑菜单')}, {'delete': ('menu_delete', '删除菜单')} """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('menu_all', '菜单管理')}, {'get': ('menu_list', '查看菜单')}, {'post': ('menu_create', '创建菜单')}, {'put': ('menu_edit', '编辑菜单')}, {'patch': ('menu_edit', '编辑菜单')}, {'delete': ('menu_delete', '删除菜单')} ) queryset = Menu.objects.all() serializer_class = MenuSerializers def get_serializer_class(self): if self.action in ['list', 'retrieve']: return MenuListSerializers return MenuSerializers class PermissionViewSet(CustomModelParentViewSet): """ 权限视图 ### 查看权限列表的权限 {'*': ('perm_all', '权限管理')}, {'get': ('perm_list', '查看权限')}, """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('perm_all', '权限管理')}, {'get': ('perm_list', '查看权限')} ) queryset = Permission.objects.all() serializer_class = PermissionSerializers def get_serializer_class(self): if self.action in ['list', 'retrieve']: return PermissionListSerializers return PermissionSerializers class RoleViewSet(CustomModelViewSet): """ 角色视图 ### 角色管理权限 {'*': ('role_all', '角色管理')}, {'get': ('role_list', '查看角色')}, {'post': ('role_create', '创建角色')}, {'put': ('role_edit', '编辑角色')}, {'patch': ('role_edit', '编辑角色')}, {'delete': ('role_delete', '删除角色')} """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('role_all', '角色管理')}, {'get': ('role_list', '查看角色')}, {'post': ('role_create', '创建角色')}, {'put': ('role_edit', '编辑角色')}, {'patch': ('role_edit', '编辑角色')}, {'delete': ('role_delete', '删除角色')} ) queryset = Role.objects.exclude(name='thirdparty') serializer_class = RoleSerializers def get_serializer_class(self): if self.action == 'list' or self.action == 'retrieve': return RoleListSerializers return RoleSerializers def perform_destroy(self, instance): if instance.name != '默认角色': instance.delete() class OrganizationViewSet(CustomModelParentViewSet): """ 组织架构视图 ### 组织架构权限 {'*': ('org_all', '组织架构管理')}, {'get': ('org_list', '查看组织架构')}, {'post': ('org_create', '创建组织架构')}, {'put': ('org_edit', '编辑组织架构')}, {'patch': ('org_edit', '编辑组织架构')}, {'delete': ('org_delete', '删除组织架构')} """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('org_all', '组织架构管理')}, {'get': ('org_list', '查看组织架构')}, {'post': ('org_create', '创建组织架构')}, {'put': ('org_edit', '编辑组织架构')}, {'patch': ('org_edit', '编辑组织架构')}, {'delete': ('org_delete', '删除组织架构')} ) queryset = Organization.objects.all() serializer_class = OrganizationSerializers search_fields = ('name', 'dn') def get_org_users(self, org): qs = org.org_user.all() for i in org.children.all(): qs |= self.get_org_users(i) return qs @action(methods=['GET'], url_path='users', detail=True) def organization_users(self, request, pk=None): page_size = request.query_params.get('page_size') pagination.PageNumberPagination.page_size = page_size qs = self.queryset.get(pk=pk) queryset = self.get_org_users(qs).distinct() page = self.paginate_queryset(queryset) if page is not None: serializer = UserProfileListSerializers(page, many=True) return self.get_paginated_response(serializer.data) serializer = UserProfileListSerializers(queryset, many=True) data = {'data': {'total': queryset.count(), 'items': serializer.data}, 'code': 20000, 'status': 'success'} return Response(data) class UserViewSet(CustomModelViewSet): """ 用户管理视图 ### 用户管理权限 {'*': ('user_all', '用户管理')}, {'get': ('user_list', '查看用户')}, {'post': ('user_create', '创建用户')}, {'put': ('user_edit', '编辑用户')}, {'patch': ('user_edit', '编辑用户')}, {'delete': ('user_delete', '删除用户')} """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('user_all', '用户管理')}, {'get': ('user_list', '查看用户')}, {'post': ('user_create', '创建用户')}, {'put': ('user_edit', '编辑用户')}, {'patch': ('user_edit', '编辑用户')}, {'delete': ('user_delete', '删除用户')} ) queryset = UserProfile.objects.exclude( Q(username='thirdparty') | Q(is_active=False)) serializer_class = UserProfileSerializers filter_backends = ( django_filters.rest_framework.DjangoFilterBackend, SearchFilter, OrderingFilter) filter_fields = { 'position': ['exact'], 'title': ['exact'], 'id': ['in', 'exact'], } search_fields = ('position', 'mobile', 'title', 'username', 'first_name', 'email') def get_serializer_class(self): if self.action == 'list': return UserProfileListSerializers if self.action == 'detail' or self.action == 'retrieve': return UserProfileDetailSerializers return UserProfileSerializers def create(self, request, *args, **kwargs): if self.queryset.filter(username=request.data['username']): return Response({'code': 20000, 'message': '%s 账号已存在!' % request.data['username']}) password = shortuuid.ShortUUID().random(length=8) request.data['password'] = password serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) self.perform_create(serializer) data = serializer.data log_audit(request, action_type=self.serializer_class.Meta.model.__name__, action='创建', content='', data=serializer.data) data['password'] = password data['status'] = 'success' data['code'] = 20000 return Response(data) def perform_destroy(self, instance): # 禁用用户 instance.is_active = False instance.save() @action(methods=['POST'], url_path='password/reset', detail=False) def password_reset(self, request): """ 重置用户密码 ### 重置用户密码 """ data = self.request.data user = self.queryset.get(pk=data['uid']) m = hashlib.md5() m.update(data['password']) password = m.hexdigest() user.set_password(password) user.save() log_audit(request, action_type=self.serializer_class.Meta.model.__name__, action='密码修改', content=f"修改用户{user.first_name or user.username}密码") return Response({'code': 20000, 'data': '密码已更新.'}) @action(methods=['GET'], url_path='detail', detail=False) def detail_info(self, request, pk=None, *args, **kwargs): """ 用户详细列表 ### 获取用户详细信息,用户管理模块 """ page_size = request.query_params.get('page_size') pagination.PageNumberPagination.page_size = page_size queryset = self.filter_queryset(self.get_queryset()) page = self.paginate_queryset(queryset) if page is not None: serializer = self.get_serializer(page, many=True) return self.get_paginated_response(serializer.data) serializer = self.get_serializer(queryset, many=True) data = {'data': {'total': queryset.count(), 'items': serializer.data}, 'code': 20000, 'status': 'success'} return Response(data) class UserAuthTokenView(TokenObtainPairView): """ 用户登录视图 """ perms_map = () serializer_class = TokenObtainPairSerializer def post(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) data = None try: if not serializer.is_valid(): logger.exception(f'用户登录异常{serializer.errors}') return Response({'code': 40108, 'message': str(e.args[0])}, status=status.HTTP_401_UNAUTHORIZED) data = serializer.validated_data log_audit(request, 'User', '登录成功', '', user=request.data['username']) # 用户登录成功,绑定默认角色并更新最后登录时间 user = UserProfile.objects.get(username=request.data['username']) try: role = Role.objects.get(name='默认角色') user.roles.add(*[role.id]) except BaseException as e: logger.exception(f"绑定用户角色失败, 原因: {e}") update_last_login(None, user) except BaseException as e: logger.error(f"用户登录异常, 原因: {e}") log_audit(request, 'User', '登录失败', '', user=request.data['username']) return Response({'code': 40108, 'message': str(e.args[0])}, status=status.HTTP_401_UNAUTHORIZED) return Response({'code': 20000, 'data': data}) class UserAuthTokenRefreshView(TokenRefreshView): """ 用户token刷新视图 """ perms_map = () serializer_class = TokenRefreshSerializer def post(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) try: if not serializer.is_valid(raise_exception=False): logger.error(f'Token刷新校验不通过: {serializer.errors}') return Response({'code': 40101, 'message': '刷新Token已过期,请重新登录.'}, status=status.HTTP_401_UNAUTHORIZED) data = serializer.validated_data data['username'] = request.user.username except TokenError as e: logger.error(f"刷新用户token异常, 原因: {e}") return Response({'code': 40101, 'message': '刷新Token已过期,请重新登录.'}, status=status.HTTP_401_UNAUTHORIZED) return Response({'code': 20000, 'data': data}) class UserLogout(APIView): """ 用户注销视图 """ perms_map = () def get(self, request, format=None): logout(request) return Response({ 'code': 20000 }) class UserProfileViewSet(CustomModelViewSet): """ 用户信息视图 ### 用户信息管理权限 {'*': ('userinfo_all', '用户信息管理')}, {'get': ('userinfo_list', '查看用户信息')}, {'put': ('userinfo_edit', '编辑用户信息')}, {'patch': ('userinfo_edit', '编辑用户信息')}, """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('userinfo_all', '用户信息管理')}, {'get': ('userinfo_list', '查看用户信息')}, {'put': ('userinfo_edit', '编辑用户信息')}, {'patch': ('userinfo_edit', '编辑用户信息')}, ) queryset = UserProfile.objects.exclude(username='thirdparty') authentication_classes = [JWTAuthentication, ] serializer_class = UserProfileDetailSerializers filter_backends = (django_filters.rest_framework.DjangoFilterBackend, CustomSearchFilter, OrderingFilter) filter_class = AuditLogFilter filter_fields = ('user', 'type', 'action', 'action_ip', 'operator') search_fields = ('user', 'type', 'action', 'action_ip', 'content') def get_serializer_class(self): if self.action == 'create' or self.action == 'update': return UserProfileSerializers if self.action == 'user_activity': return AuditLogActivitySerializers return UserProfileDetailSerializers def update(self, request, *args, **kwargs): instance = self.queryset.get(username=request.user) instance.__dict__.update(**request.data) instance.save() log_audit(request, self.serializer_class.Meta.model.__name__, '更新用户信息', '', data=self.serializer_class(instance).data, old_data=self.serializer_class(instance).data) data = {'data': '更新成功', 'status': 'success', 'code': 20000} return Response(data) def menu_sort(self, menus): """ 菜单排序 sort值越小越靠前 :param menus: :return: """ for menu in menus: try: if menu['children']: self.menu_sort(menu['children']) except KeyError: pass try: menus.sort(key=lambda k: (k.get('sort'))) except: pass return menus @action(methods=['GET'], url_path='info', detail=False) def info(self, request): """ 获取用户信息 :param request: :return: """ serializer = self.get_serializer(request.user) data = serializer.data data.pop('password', None) data.pop('routers', None) data['roles'] = ['超级管理员'] if request.user.is_superuser else [ i['name'] for i in data['user_roles']] return Response({'code': 20000, 'data': data}) @action(methods=['GET'], url_path='menus', detail=False) def menus(self, request): """ 获取用户菜单 :param request: :return: """ serializer = self.get_serializer(request.user) data = serializer.data routers = data['routers'] routers = self.menu_sort(routers) data = {'data': {'routers': routers}, 'code': 20000, 'status': 'success'} return Response(data) @action(methods=['GET'], url_path='activity', detail=False, queryset=AuditLog.objects.all()) def user_activity(self, request): page_size = request.query_params.get('page_size') pagination.PageNumberPagination.page_size = page_size queryset = self.filter_queryset( self.get_queryset().filter(Q(user=request.user.first_name) | Q(user=request.user.username))) page = self.paginate_queryset(queryset) if page is not None: serializer = self.get_serializer(page, many=True) return self.get_paginated_response(serializer.data) serializer = self.get_serializer(queryset, many=True) data = {'data': {'total': queryset.count(), 'items': serializer.data}, 'code': 20000, 'status': 'success'} return Response(data) class SystemConfigViewSet(CustomModelViewSet): """ 系统设置视图 ### 系统设置权限 {'*': ('system_all', '系统设置管理')}, {'get': ('system_list', '查看系统设置')}, """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('system_all', '系统设置管理')}, {'get': ('system_list', '查看系统设置')}, ) queryset = SystemConfig.objects.all() serializer_class = SystemConfigSerializers filter_backends = ( django_filters.rest_framework.DjangoFilterBackend, SearchFilter, OrderingFilter) filter_fields = ('name', 'type') search_fields = ('name', 'type') def get_serializer_class(self): if self.action in ['list', 'retrieve']: return SystemConfigListSerializers return SystemConfigSerializers @staticmethod def set_credit(jenkins_cli, name, user=None, password=None, secret=None, comment=None): try: credit = jenkins_cli.credential_exists(name) if credit: jenkins_cli.reconfig_credential_global(name=name, user=user, password=password, secret=secret, comment=comment) else: jenkins_cli.create_credential_global(name=name, user=user, password=password, secret=secret, comment=comment) except BaseException as e: print('err: ', str(e)) def create(self, request, *args, **kwargs): if request.data['type'] == 'thirdparty': # 生成token给第三方访问 expired_time = self.request.data['config']['expired_time'] seconds = int(expired_time / 1000 - time.time())
user = ThirdPartyUser().get_user()
13
2023-12-13 03:09:32+00:00
16k
liujin112/PortraitDiffusion
app.py
[ { "identifier": "AttentionBase", "path": "utils/masactrl_utils.py", "snippet": "class AttentionBase:\n def __init__(self):\n self.cur_step = 0\n self.num_att_layers = -1\n self.cur_att_layer = 0\n\n def after_step(self):\n pass\n\n def __call__(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n out = self.forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs)\n self.cur_att_layer += 1\n if self.cur_att_layer == self.num_att_layers:\n self.cur_att_layer = 0\n self.cur_step += 1\n # after step\n self.after_step()\n return out\n\n def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n out = torch.einsum('b i j, b j d -> b i d', attn, v)\n out = rearrange(out, '(b h) n d -> b n (h d)', h=num_heads)\n return out\n\n def reset(self):\n self.cur_step = 0\n self.cur_att_layer = 0" }, { "identifier": "regiter_attention_editor_diffusers", "path": "utils/masactrl_utils.py", "snippet": "def regiter_attention_editor_diffusers(model, editor: AttentionBase):\n \"\"\"\n Register a attention editor to Diffuser Pipeline, refer from [Prompt-to-Prompt]\n \"\"\"\n def ca_forward(self, place_in_unet):\n def forward(x, encoder_hidden_states=None, attention_mask=None, context=None, mask=None):\n \"\"\"\n The attention is similar to the original implementation of LDM CrossAttention class\n except adding some modifications on the attention\n \"\"\"\n if encoder_hidden_states is not None:\n context = encoder_hidden_states\n if attention_mask is not None:\n mask = attention_mask\n\n to_out = self.to_out\n if isinstance(to_out, nn.modules.container.ModuleList):\n to_out = self.to_out[0]\n else:\n to_out = self.to_out\n\n h = self.heads\n q = self.to_q(x)\n is_cross = context is not None\n context = context if is_cross else x\n k = self.to_k(context)\n v = self.to_v(context)\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))\n\n sim = torch.einsum('b i d, b j d -> b i j', q, k) * self.scale\n\n if mask is not None:\n mask = rearrange(mask, 'b ... -> b (...)')\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, 'b j -> (b h) () j', h=h)\n mask = mask[:, None, :].repeat(h, 1, 1)\n sim.masked_fill_(~mask, max_neg_value)\n\n attn = sim.softmax(dim=-1)\n # the only difference\n out = editor(\n q, k, v, sim, attn, is_cross, place_in_unet,\n self.heads, scale=self.scale)\n\n return to_out(out)\n\n return forward\n\n def register_editor(net, count, place_in_unet):\n for name, subnet in net.named_children():\n if net.__class__.__name__ == 'Attention': # spatial Transformer layer\n net.forward = ca_forward(net, place_in_unet)\n return count + 1\n elif hasattr(net, 'children'):\n count = register_editor(subnet, count, place_in_unet)\n return count\n\n cross_att_count = 0\n for net_name, net in model.unet.named_children():\n if \"down\" in net_name:\n cross_att_count += register_editor(net, 0, \"down\")\n elif \"mid\" in net_name:\n cross_att_count += register_editor(net, 0, \"mid\")\n elif \"up\" in net_name:\n cross_att_count += register_editor(net, 0, \"up\")\n editor.num_att_layers = cross_att_count" }, { "identifier": "register_upblock2d", "path": "utils/free_lunch_utils.py", "snippet": "def register_upblock2d(model):\n def up_forward(self):\n def forward(hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, scale=None):\n for resnet in self.resnets:\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n #print(f\"in upblock2d, hidden states shape: {hidden_states.shape}\")\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n if is_torch_version(\">=\", \"1.11.0\"):\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet), hidden_states, temb, use_reentrant=False\n )\n else:\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet), hidden_states, temb\n )\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n \n return forward\n \n for i, upsample_block in enumerate(model.unet.up_blocks):\n if isinstance_str(upsample_block, \"UpBlock2D\"):\n upsample_block.forward = up_forward(upsample_block)" }, { "identifier": "register_crossattn_upblock2d", "path": "utils/free_lunch_utils.py", "snippet": "def register_crossattn_upblock2d(model):\n def up_forward(self):\n def forward(\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n for resnet, attn in zip(self.resnets, self.attentions):\n # pop res hidden states\n #print(f\"in crossatten upblock2d, hidden states shape: {hidden_states.shape}\")\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n \n return forward\n \n for i, upsample_block in enumerate(model.unet.up_blocks):\n if isinstance_str(upsample_block, \"CrossAttnUpBlock2D\"):\n upsample_block.forward = up_forward(upsample_block)" }, { "identifier": "register_free_upblock2d", "path": "utils/free_lunch_utils.py", "snippet": "def register_free_upblock2d(model, b1=1.2, b2=1.4, s1=0.9, s2=0.2,source_mask=None):\n def up_forward(self):\n def forward(hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, scale=None):\n for resnet in self.resnets:\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n #print(f\"in free upblock2d, hidden states shape: {hidden_states.shape}\")\n \n if self.source_mask is not None:\n spatial_mask_source = F.interpolate(self.source_mask, (hidden_states.shape[2], hidden_states.shape[3]))\n spatial_mask_source_b1 = spatial_mask_source * self.b1 + (1 - spatial_mask_source)\n spatial_mask_source_b2 = spatial_mask_source * self.b2 + (1 - spatial_mask_source)\n # --------------- FreeU code -----------------------\n # Only operate on the first two stages\n if hidden_states.shape[1] == 1280:\n if self.source_mask is not None:\n #where in mask = 0, set hidden states unchanged\n hidden_states[:,:640] = hidden_states[:,:640] * spatial_mask_source_b1\n \n else:\n hidden_states[:,:640] = hidden_states[:,:640] * self.b1\n res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s1)\n if hidden_states.shape[1] == 640:\n\n if self.source_mask is not None:\n hidden_states[:,:320] = hidden_states[:,:320] * spatial_mask_source_b2\n else:\n hidden_states[:,:320] = hidden_states[:,:320] * self.b2\n res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s2)\n # ---------------------------------------------------------\n\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n if is_torch_version(\">=\", \"1.11.0\"):\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet), hidden_states, temb, use_reentrant=False\n )\n else:\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet), hidden_states, temb\n )\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n \n return forward\n \n for i, upsample_block in enumerate(model.unet.up_blocks):\n if isinstance_str(upsample_block, \"UpBlock2D\"):\n upsample_block.forward = up_forward(upsample_block)\n setattr(upsample_block, 'b1', b1)\n setattr(upsample_block, 'b2', b2)\n setattr(upsample_block, 's1', s1)\n setattr(upsample_block, 's2', s2)\n setattr(upsample_block, 'source_mask', source_mask)" }, { "identifier": "register_free_crossattn_upblock2d", "path": "utils/free_lunch_utils.py", "snippet": "def register_free_crossattn_upblock2d(model, b1=1.2, b2=1.4, s1=0.9, s2=0.2,source_mask=None):\n def up_forward(self):\n def forward(\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n \n if self.source_mask is not None:\n \n spatial_mask_source = F.interpolate(self.source_mask, (hidden_states.shape[2], hidden_states.shape[3]))\n spatial_mask_source_b1 = spatial_mask_source * self.b1 + (1 - spatial_mask_source)\n spatial_mask_source_b2 = spatial_mask_source * self.b2 + (1 - spatial_mask_source)\n # print(f\"source mask is not none, {spatial_mask_source_b1.shape} with min {spatial_mask_source_b1.min()}\", )\n \n for resnet, attn in zip(self.resnets, self.attentions):\n # pop res hidden states\n #print(f\"in free crossatten upblock2d, hidden states shape: {hidden_states.shape}\")\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n \n # --------------- FreeU code -----------------------\n # Only operate on the first two stages\n if hidden_states.shape[1] == 1280:\n if self.source_mask is not None:\n #where in mask = 0, set hidden states unchanged\n hidden_states[:,:640] = hidden_states[:,:640] * spatial_mask_source_b1\n \n else:\n hidden_states[:,:640] = hidden_states[:,:640] * self.b1\n res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s1)\n if hidden_states.shape[1] == 640:\n if self.source_mask is not None:\n hidden_states[:,:320] = hidden_states[:,:320] * spatial_mask_source_b2\n else:\n hidden_states[:,:320] = hidden_states[:,:320] * self.b2\n res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s2)\n # ---------------------------------------------------------\n\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n # hidden_states = attn(\n # hidden_states,\n # encoder_hidden_states=encoder_hidden_states,\n # cross_attention_kwargs=cross_attention_kwargs,\n # encoder_attention_mask=encoder_attention_mask,\n # return_dict=False,\n # )[0]\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n )[0]\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n \n return forward\n \n for i, upsample_block in enumerate(model.unet.up_blocks):\n if isinstance_str(upsample_block, \"CrossAttnUpBlock2D\"):\n upsample_block.forward = up_forward(upsample_block)\n setattr(upsample_block, 'b1', b1)\n setattr(upsample_block, 'b2', b2)\n setattr(upsample_block, 's1', s1)\n setattr(upsample_block, 's2', s2)\n setattr(upsample_block, 'source_mask', source_mask)" }, { "identifier": "MaskPromptedStyleAttentionControl", "path": "utils/style_attn_control.py", "snippet": "class MaskPromptedStyleAttentionControl(AttentionBase):\n def __init__(self, start_step=4, start_layer=10, style_attn_step=35, layer_idx=None, step_idx=None, total_steps=50, style_guidance=0.1, \n only_masked_region=False, guidance=0.0, \n style_mask=None, source_mask=None, de_bug=False):\n \"\"\"\n MaskPromptedSAC\n Args:\n start_step: the step to start mutual self-attention control\n start_layer: the layer to start mutual self-attention control\n layer_idx: list of the layers to apply mutual self-attention control\n step_idx: list the steps to apply mutual self-attention control\n total_steps: the total number of steps\n thres: the thereshold for mask thresholding\n ref_token_idx: the token index list for cross-attention map aggregation\n cur_token_idx: the token index list for cross-attention map aggregation\n mask_save_dir: the path to save the mask image\n \"\"\"\n\n super().__init__()\n self.total_steps = total_steps\n self.total_layers = 16\n self.start_step = start_step\n self.start_layer = start_layer\n self.layer_idx = layer_idx if layer_idx is not None else list(range(start_layer, self.total_layers))\n self.step_idx = step_idx if step_idx is not None else list(range(start_step, total_steps))\n print(\"using MaskPromptStyleAttentionControl\")\n print(\"MaskedSAC at denoising steps: \", self.step_idx)\n print(\"MaskedSAC at U-Net layers: \", self.layer_idx)\n \n self.de_bug = de_bug\n self.style_guidance = style_guidance\n self.only_masked_region = only_masked_region\n self.style_attn_step = style_attn_step\n self.self_attns = []\n self.cross_attns = []\n self.guidance = guidance\n self.style_mask = style_mask\n self.source_mask = source_mask\n\n\n def after_step(self):\n self.self_attns = []\n self.cross_attns = []\n\n def attn_batch(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, q_mask,k_mask, **kwargs):\n B = q.shape[0] // num_heads\n H = W = int(np.sqrt(q.shape[1]))\n q = rearrange(q, \"(b h) n d -> h (b n) d\", h=num_heads)\n k = rearrange(k, \"(b h) n d -> h (b n) d\", h=num_heads)\n v = rearrange(v, \"(b h) n d -> h (b n) d\", h=num_heads)\n\n sim = torch.einsum(\"h i d, h j d -> h i j\", q, k) * kwargs.get(\"scale\")\n \n if q_mask is not None:\n sim = sim.masked_fill(q_mask.unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n \n if k_mask is not None:\n sim = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n \n attn = sim.softmax(-1) if attn is None else attn\n\n if len(attn) == 2 * len(v):\n v = torch.cat([v] * 2)\n out = torch.einsum(\"h i j, h j d -> h i d\", attn, v)\n out = rearrange(out, \"(h1 h) (b n) d -> (h1 b) n (h d)\", b=B, h=num_heads)\n return out\n \n def attn_batch_fg_bg(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, q_mask,k_mask, **kwargs):\n B = q.shape[0] // num_heads\n H = W = int(np.sqrt(q.shape[1]))\n q = rearrange(q, \"(b h) n d -> h (b n) d\", h=num_heads)\n k = rearrange(k, \"(b h) n d -> h (b n) d\", h=num_heads)\n v = rearrange(v, \"(b h) n d -> h (b n) d\", h=num_heads)\n sim = torch.einsum(\"h i d, h j d -> h i j\", q, k) * kwargs.get(\"scale\")\n if q_mask is not None:\n sim_fg = sim.masked_fill(q_mask.unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n sim_bg = sim.masked_fill(q_mask.unsqueeze(0)==1, -torch.finfo(sim.dtype).max)\n if k_mask is not None:\n sim_fg = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n sim_bg = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==1, -torch.finfo(sim.dtype).max)\n sim = torch.cat([sim_fg, sim_bg])\n attn = sim.softmax(-1)\n\n if len(attn) == 2 * len(v):\n v = torch.cat([v] * 2)\n out = torch.einsum(\"h i j, h j d -> h i d\", attn, v)\n out = rearrange(out, \"(h1 h) (b n) d -> (h1 b) n (h d)\", b=B, h=num_heads)\n return out\n \n def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n\n \"\"\"\n Attention forward function\n \"\"\"\n \n if is_cross or self.cur_step not in self.step_idx or self.cur_att_layer // 2 not in self.layer_idx:\n return super().forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs)\n\n B = q.shape[0] // num_heads // 2\n H = W = int(np.sqrt(q.shape[1]))\n \n if self.style_mask is not None and self.source_mask is not None:\n #mask = self.aggregate_cross_attn_map(idx=self.cur_token_idx) # (4, H, W)\n heigh, width = self.style_mask.shape[-2:]\n mask_style = self.style_mask# (H, W)\n mask_source = self.source_mask# (H, W)\n scale = int(np.sqrt(heigh * width / q.shape[1]))\n # res = int(np.sqrt(q.shape[1]))\n spatial_mask_source = F.interpolate(mask_source, (heigh//scale, width//scale)).reshape(-1, 1)\n spatial_mask_style = F.interpolate(mask_style, (heigh//scale, width//scale)).reshape(-1, 1)\n \n else:\n spatial_mask_source=None\n spatial_mask_style=None\n\n if spatial_mask_style is None or spatial_mask_source is None:\n \n out_s,out_c,out_t = self.style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n \n else:\n if self.only_masked_region:\n out_s,out_c,out_t = self.mask_prompted_style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n else:\n out_s,out_c,out_t = self.separate_mask_prompted_style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n\n out = torch.cat([out_s,out_c,out_t],dim=0) \n return out\n \n\n def style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n if self.de_bug:\n import pdb; pdb.set_trace()\n \n qs, qc, qt = q.chunk(3)\n\n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n out_c = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n\n if self.cur_step < self.style_attn_step:\n out_t = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n else:\n out_t = self.attn_batch(qt, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n if self.style_guidance>=0:\n out_t = out_c + (out_t - out_c) * self.style_guidance\n return out_s,out_c,out_t\n\n def mask_prompted_style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n qs, qc, qt = q.chunk(3)\n \n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n out_c = self.attn_batch(qc, k[num_heads: 2*num_heads], v[num_heads:2*num_heads], sim[num_heads: 2*num_heads], attn[num_heads: 2*num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None, **kwargs)\n out_c_new = self.attn_batch(qc, k[num_heads: 2*num_heads], v[num_heads:2*num_heads], sim[num_heads: 2*num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None, **kwargs)\n \n if self.de_bug:\n import pdb; pdb.set_trace()\n\n if self.cur_step < self.style_attn_step:\n out_t = out_c #self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n else:\n out_t_fg = self.attn_batch(qt, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c_fg = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n if self.style_guidance>=0:\n out_t = out_c_fg + (out_t_fg - out_c_fg) * self.style_guidance \n \n out_t = out_t * spatial_mask_source + out_c * (1 - spatial_mask_source)\n\n if self.de_bug:\n import pdb; pdb.set_trace()\n \n # print(torch.sum(out_t* (1 - spatial_mask_source) - out_c * (1 - spatial_mask_source)))\n return out_s,out_c,out_t\n\n def separate_mask_prompted_style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n \n if self.de_bug:\n import pdb; pdb.set_trace()\n # To prevent query confusion, render fg and bg according to mask.\n qs, qc, qt = q.chunk(3)\n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n if self.cur_step < self.style_attn_step: \n \n out_c = self.attn_batch_fg_bg(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c_fg,out_c_bg = out_c.chunk(2)\n out_t = out_c_fg * spatial_mask_source + out_c_bg * (1 - spatial_mask_source)\n\n else:\n out_t = self.attn_batch_fg_bg(qt, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c = self.attn_batch_fg_bg(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_t_fg,out_t_bg = out_t.chunk(2)\n out_c_fg,out_c_bg = out_c.chunk(2)\n if self.style_guidance>=0:\n out_t_fg = out_c_fg + (out_t_fg - out_c_fg) * self.style_guidance \n out_t_bg = out_c_bg + (out_t_bg - out_c_bg) * self.style_guidance \n out_t = out_t_fg * spatial_mask_source + out_t_bg * (1 - spatial_mask_source)\n \n return out_s,out_t,out_t" }, { "identifier": "MasaCtrlPipeline", "path": "utils/pipeline.py", "snippet": "class MasaCtrlPipeline(StableDiffusionPipeline):\n\n def next_step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n eta=0.,\n verbose=False\n ):\n \"\"\"\n Inverse sampling for DDIM Inversion\n \"\"\"\n if verbose:\n print(\"timestep: \", timestep)\n next_step = timestep\n timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999)\n alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod\n alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step]\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output\n x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir\n return x_next, pred_x0\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n eta: float=0.0,\n verbose=False,\n ):\n \"\"\"\n predict the sampe the next step in the denoise process.\n \"\"\"\n prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps\n alpha_prod_t = self.scheduler.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep > 0 else self.scheduler.final_alpha_cumprod\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_prev)**0.5 * model_output\n x_prev = alpha_prod_t_prev**0.5 * pred_x0 + pred_dir\n return x_prev, pred_x0\n\n @torch.no_grad()\n def image2latent(self, image):\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if type(image) is Image:\n image = np.array(image)\n image = torch.from_numpy(image).float() / 127.5 - 1\n image = image.permute(2, 0, 1).unsqueeze(0).to(DEVICE)\n # input image density range [-1, 1]\n latents = self.vae.encode(image)['latent_dist'].mean\n latents = latents * 0.18215\n return latents\n\n @torch.no_grad()\n def latent2image(self, latents, return_type='np'):\n latents = 1 / 0.18215 * latents.detach()\n image = self.vae.decode(latents)['sample']\n if return_type == 'np':\n image = (image / 2 + 0.5).clamp(0, 1)\n image = image.cpu().permute(0, 2, 3, 1).numpy()[0]\n image = (image * 255).astype(np.uint8)\n elif return_type == \"pt\":\n image = (image / 2 + 0.5).clamp(0, 1)\n\n return image\n\n def latent2image_grad(self, latents):\n latents = 1 / 0.18215 * latents\n image = self.vae.decode(latents)['sample']\n\n return image # range [-1, 1]\n\n @torch.no_grad()\n def __call__(\n self,\n prompt,\n batch_size=1,\n height=512,\n width=512,\n num_inference_steps=50,\n guidance_scale=7.5,\n eta=0.0,\n latents=None,\n unconditioning=None,\n neg_prompt=None,\n ref_intermediate_latents=None,\n return_intermediates=False,\n lcm_lora=False,\n de_bug=False,\n **kwds):\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if isinstance(prompt, list):\n batch_size = len(prompt)\n elif isinstance(prompt, str):\n if batch_size > 1:\n prompt = [prompt] * batch_size\n\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n\n text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]\n print(\"input text embeddings :\", text_embeddings.shape)\n # if kwds.get(\"dir\"):\n # dir = text_embeddings[-2] - text_embeddings[-1]\n # u, s, v = torch.pca_lowrank(dir.transpose(-1, -2), q=1, center=True)\n # text_embeddings[-1] = text_embeddings[-1] + kwds.get(\"dir\") * v\n # print(u.shape)\n # print(v.shape)\n\n # define initial latents\n latents_shape = (batch_size, self.unet.config.in_channels, height//8, width//8)\n if latents is None:\n latents = torch.randn(latents_shape, device=DEVICE)\n else:\n assert latents.shape == latents_shape, f\"The shape of input latent tensor {latents.shape} should equal to predefined one.\"\n\n # unconditional embedding for classifier free guidance\n if guidance_scale > 1.:\n max_length = text_input.input_ids.shape[-1]\n if neg_prompt:\n uc_text = neg_prompt\n else:\n uc_text = \"\"\n # uc_text = \"ugly, tiling, poorly drawn hands, poorly drawn feet, body out of frame, cut off, low contrast, underexposed, distorted face\"\n unconditional_input = self.tokenizer(\n [uc_text] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n # unconditional_input.input_ids = unconditional_input.input_ids[:, 1:]\n unconditional_embeddings = self.text_encoder(unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat([unconditional_embeddings, text_embeddings], dim=0)\n\n print(\"latents shape: \", latents.shape)\n # iterative sampling\n self.scheduler.set_timesteps(num_inference_steps)\n # print(\"Valid timesteps: \", reversed(self.scheduler.timesteps))\n latents_list = [latents]\n pred_x0_list = [latents]\n if de_bug:\n import pdb;pdb.set_trace()\n for i, t in enumerate(tqdm(self.scheduler.timesteps, desc=\"DDIM Sampler\")):\n if ref_intermediate_latents is not None:\n # note that the batch_size >= 2\n latents_ref = ref_intermediate_latents[-1 - i]\n _, latents_cur = latents.chunk(2)\n latents = torch.cat([latents_ref, latents_cur])\n\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n if unconditioning is not None and isinstance(unconditioning, list):\n _, text_embeddings = text_embeddings.chunk(2)\n text_embeddings = torch.cat([unconditioning[i].expand(*text_embeddings.shape), text_embeddings]) \n # predict tghe noise\n noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t -> x_t-1\n if lcm_lora:\n latents, pred_x0 = self.scheduler.step(noise_pred, t, latents, return_dict=False)\n else:\n latents, pred_x0 = self.step(noise_pred, t, latents)\n latents_list.append(latents)\n pred_x0_list.append(pred_x0)\n\n image = self.latent2image(latents, return_type=\"pt\")\n if return_intermediates:\n pred_x0_list = [self.latent2image(img, return_type=\"pt\") for img in pred_x0_list]\n latents_list = [self.latent2image(img, return_type=\"pt\") for img in latents_list]\n return image, pred_x0_list, latents_list\n return image\n\n @torch.no_grad()\n def invert(\n self,\n image: torch.Tensor,\n prompt,\n num_inference_steps=50,\n guidance_scale=7.5,\n eta=0.0,\n return_intermediates=False,\n **kwds):\n \"\"\"\n invert a real image into noise map with determinisc DDIM inversion\n \"\"\"\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n batch_size = image.shape[0]\n if isinstance(prompt, list):\n if batch_size == 1:\n image = image.expand(len(prompt), -1, -1, -1)\n elif isinstance(prompt, str):\n if batch_size > 1:\n prompt = [prompt] * batch_size\n\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]\n print(\"input text embeddings :\", text_embeddings.shape)\n # define initial latents\n latents = self.image2latent(image)\n start_latents = latents\n # print(latents)\n # exit()\n # unconditional embedding for classifier free guidance\n if guidance_scale > 1.:\n max_length = text_input.input_ids.shape[-1]\n unconditional_input = self.tokenizer(\n [\"\"] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n unconditional_embeddings = self.text_encoder(unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat([unconditional_embeddings, text_embeddings], dim=0)\n\n print(\"latents shape: \", latents.shape)\n # interative sampling\n self.scheduler.set_timesteps(num_inference_steps)\n print(\"Valid timesteps: \", reversed(self.scheduler.timesteps))\n # print(\"attributes: \", self.scheduler.__dict__)\n latents_list = [latents]\n pred_x0_list = [latents]\n for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc=\"DDIM Inversion\")):\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n\n # predict the noise\n noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t-1 -> x_t\n latents, pred_x0 = self.next_step(noise_pred, t, latents)\n latents_list.append(latents)\n pred_x0_list.append(pred_x0)\n\n if return_intermediates:\n # return the intermediate laters during inversion\n # pred_x0_list = [self.latent2image(img, return_type=\"pt\") for img in pred_x0_list]\n return latents, latents_list\n return latents, start_latents" } ]
import os import torch import random import numpy as np import gradio as gr import torch.nn.functional as F from glob import glob from datetime import datetime from diffusers import StableDiffusionPipeline from diffusers import DDIMScheduler, LCMScheduler from PIL import Image,ImageDraw from utils.masactrl_utils import (AttentionBase, regiter_attention_editor_diffusers) from utils.free_lunch_utils import register_upblock2d,register_crossattn_upblock2d,register_free_upblock2d, register_free_crossattn_upblock2d from utils.style_attn_control import MaskPromptedStyleAttentionControl from utils.pipeline import MasaCtrlPipeline from torchvision.utils import save_image from segment_anything import sam_model_registry, SamPredictor
11,992
def update_stable_diffusion(self, stable_diffusion_dropdown): if stable_diffusion_dropdown == 'latent-consistency/lcm-lora-sdv1-5': self.load_lcm_lora() else: self.load_base_pipeline(stable_diffusion_dropdown) self.lora_loaded = None self.personal_model_loaded = None return gr.Dropdown() def update_base_model(self, base_model_dropdown): if self.pipeline is None: gr.Info(f"Please select a pretrained model path.") return None else: base_model = self.personalized_model_list[base_model_dropdown] mid_model = StableDiffusionPipeline.from_single_file(base_model) self.pipeline.vae = mid_model.vae self.pipeline.unet = mid_model.unet self.pipeline.text_encoder = mid_model.text_encoder self.pipeline.to(self.device) self.personal_model_loaded = base_model_dropdown.split('.')[0] print(f'load {base_model_dropdown} model success!') return gr.Dropdown() def update_lora_model(self, lora_model_dropdown,lora_alpha_slider): if self.pipeline is None: gr.Info(f"Please select a pretrained model path.") return None else: if lora_model_dropdown == "none": self.pipeline.unfuse_lora() self.pipeline.unload_lora_weights() self.lora_loaded = None print("Restore lora.") else: lora_model_path = self.lora_model_list[lora_model_dropdown] self.pipeline.load_lora_weights(lora_model_path) self.pipeline.fuse_lora(lora_alpha_slider) self.lora_loaded = lora_model_dropdown.split('.')[0] print(f'load {lora_model_dropdown} LoRA Model Success!') return gr.Dropdown() def load_lcm_lora(self, lora_alpha_slider=1.0): # set scheduler self.pipeline = MasaCtrlPipeline.from_pretrained(self.stable_diffusion_list[0]).to(self.device) self.pipeline.scheduler = LCMScheduler.from_config(self.pipeline.scheduler.config) # load LCM-LoRA self.pipeline.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") self.pipeline.fuse_lora(lora_alpha_slider) self.lcm_lora_loaded = True print(f'load LCM-LoRA model success!') def generate(self, source, style, source_mask, style_mask, start_step, start_layer, Style_attn_step, Method, Style_Guidance, ddim_steps, scale, seed, de_bug, target_prompt, negative_prompt_textbox, inter_latents, freeu, b1, b2, s1, s2, width_slider,height_slider, ): os.makedirs(self.savedir, exist_ok=True) os.makedirs(self.savedir_sample, exist_ok=True) os.makedirs(self.savedir_mask, exist_ok=True) model = self.pipeline if seed != -1 and seed != "": torch.manual_seed(int(seed)) else: torch.seed() seed = torch.initial_seed() sample_count = len(os.listdir(self.savedir_sample)) os.makedirs(os.path.join(self.savedir_mask, f"results_{sample_count}"), exist_ok=True) # ref_prompt = [source_prompt, target_prompt] # prompts = ref_prompt+[''] ref_prompt = [target_prompt, target_prompt] prompts = ref_prompt+[target_prompt] source_image,style_image,source_mask,style_mask = load_mask_images(source,style,source_mask,style_mask,self.device,width_slider,height_slider,out_dir=os.path.join(self.savedir_mask, f"results_{sample_count}")) # global START_CODE, LATENTS_LIST with torch.no_grad(): #import pdb;pdb.set_trace() #prev_source if self.start_code is None and self.latents_list is None: content_style = torch.cat([style_image, source_image], dim=0) editor = AttentionBase() regiter_attention_editor_diffusers(model, editor) st_code, latents_list = model.invert(content_style, ref_prompt, guidance_scale=scale, num_inference_steps=ddim_steps, return_intermediates=True) start_code = torch.cat([st_code, st_code[1:]], dim=0) self.start_code = start_code self.latents_list = latents_list else: start_code = self.start_code latents_list = self.latents_list print('------------------------------------------ Use previous latents ------------------------------------------ ') #["Without mask", "Only masked region", "Seperate Background Foreground"] if Method == "Without mask": style_mask = None source_mask = None only_masked_region = False elif Method == "Only masked region": assert style_mask is not None and source_mask is not None only_masked_region = True else: assert style_mask is not None and source_mask is not None only_masked_region = False
css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ class GlobalText: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.personalized_model_dir = './models/Stable-diffusion' self.lora_model_dir = './models/Lora' self.savedir = os.path.join(self.basedir, "samples", datetime.now().strftime("Gradio-%Y-%m-%dT%H-%M-%S")) self.savedir_sample = os.path.join(self.savedir, "sample") self.savedir_mask = os.path.join(self.savedir, "mask") self.stable_diffusion_list = ["runwayml/stable-diffusion-v1-5", "latent-consistency/lcm-lora-sdv1-5"] self.personalized_model_list = [] self.lora_model_list = [] # config models self.tokenizer = None self.text_encoder = None self.vae = None self.unet = None self.pipeline = None self.lora_loaded = None self.lcm_lora_loaded = False self.personal_model_loaded = None self.sam_predictor = None self.lora_model_state_dict = {} self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") # self.refresh_stable_diffusion() self.refresh_personalized_model() self.reset_start_code() def load_base_pipeline(self, model_path): print(f'loading {model_path} model') scheduler = DDIMScheduler.from_pretrained(model_path,subfolder="scheduler") self.pipeline = MasaCtrlPipeline.from_pretrained(model_path, scheduler=scheduler).to(self.device) def refresh_stable_diffusion(self): self.load_base_pipeline(self.stable_diffusion_list[0]) self.lora_loaded = None self.personal_model_loaded = None self.lcm_lora_loaded = False return self.stable_diffusion_list[0] def refresh_personalized_model(self): personalized_model_list = glob(os.path.join(self.personalized_model_dir, "**/*.safetensors"), recursive=True) self.personalized_model_list = {os.path.basename(file): file for file in personalized_model_list} lora_model_list = glob(os.path.join(self.lora_model_dir, "**/*.safetensors"), recursive=True) self.lora_model_list = {os.path.basename(file): file for file in lora_model_list} def update_stable_diffusion(self, stable_diffusion_dropdown): if stable_diffusion_dropdown == 'latent-consistency/lcm-lora-sdv1-5': self.load_lcm_lora() else: self.load_base_pipeline(stable_diffusion_dropdown) self.lora_loaded = None self.personal_model_loaded = None return gr.Dropdown() def update_base_model(self, base_model_dropdown): if self.pipeline is None: gr.Info(f"Please select a pretrained model path.") return None else: base_model = self.personalized_model_list[base_model_dropdown] mid_model = StableDiffusionPipeline.from_single_file(base_model) self.pipeline.vae = mid_model.vae self.pipeline.unet = mid_model.unet self.pipeline.text_encoder = mid_model.text_encoder self.pipeline.to(self.device) self.personal_model_loaded = base_model_dropdown.split('.')[0] print(f'load {base_model_dropdown} model success!') return gr.Dropdown() def update_lora_model(self, lora_model_dropdown,lora_alpha_slider): if self.pipeline is None: gr.Info(f"Please select a pretrained model path.") return None else: if lora_model_dropdown == "none": self.pipeline.unfuse_lora() self.pipeline.unload_lora_weights() self.lora_loaded = None print("Restore lora.") else: lora_model_path = self.lora_model_list[lora_model_dropdown] self.pipeline.load_lora_weights(lora_model_path) self.pipeline.fuse_lora(lora_alpha_slider) self.lora_loaded = lora_model_dropdown.split('.')[0] print(f'load {lora_model_dropdown} LoRA Model Success!') return gr.Dropdown() def load_lcm_lora(self, lora_alpha_slider=1.0): # set scheduler self.pipeline = MasaCtrlPipeline.from_pretrained(self.stable_diffusion_list[0]).to(self.device) self.pipeline.scheduler = LCMScheduler.from_config(self.pipeline.scheduler.config) # load LCM-LoRA self.pipeline.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") self.pipeline.fuse_lora(lora_alpha_slider) self.lcm_lora_loaded = True print(f'load LCM-LoRA model success!') def generate(self, source, style, source_mask, style_mask, start_step, start_layer, Style_attn_step, Method, Style_Guidance, ddim_steps, scale, seed, de_bug, target_prompt, negative_prompt_textbox, inter_latents, freeu, b1, b2, s1, s2, width_slider,height_slider, ): os.makedirs(self.savedir, exist_ok=True) os.makedirs(self.savedir_sample, exist_ok=True) os.makedirs(self.savedir_mask, exist_ok=True) model = self.pipeline if seed != -1 and seed != "": torch.manual_seed(int(seed)) else: torch.seed() seed = torch.initial_seed() sample_count = len(os.listdir(self.savedir_sample)) os.makedirs(os.path.join(self.savedir_mask, f"results_{sample_count}"), exist_ok=True) # ref_prompt = [source_prompt, target_prompt] # prompts = ref_prompt+[''] ref_prompt = [target_prompt, target_prompt] prompts = ref_prompt+[target_prompt] source_image,style_image,source_mask,style_mask = load_mask_images(source,style,source_mask,style_mask,self.device,width_slider,height_slider,out_dir=os.path.join(self.savedir_mask, f"results_{sample_count}")) # global START_CODE, LATENTS_LIST with torch.no_grad(): #import pdb;pdb.set_trace() #prev_source if self.start_code is None and self.latents_list is None: content_style = torch.cat([style_image, source_image], dim=0) editor = AttentionBase() regiter_attention_editor_diffusers(model, editor) st_code, latents_list = model.invert(content_style, ref_prompt, guidance_scale=scale, num_inference_steps=ddim_steps, return_intermediates=True) start_code = torch.cat([st_code, st_code[1:]], dim=0) self.start_code = start_code self.latents_list = latents_list else: start_code = self.start_code latents_list = self.latents_list print('------------------------------------------ Use previous latents ------------------------------------------ ') #["Without mask", "Only masked region", "Seperate Background Foreground"] if Method == "Without mask": style_mask = None source_mask = None only_masked_region = False elif Method == "Only masked region": assert style_mask is not None and source_mask is not None only_masked_region = True else: assert style_mask is not None and source_mask is not None only_masked_region = False
controller = MaskPromptedStyleAttentionControl(start_step, start_layer,
6
2023-12-06 01:18:39+00:00
16k
AsuradaYuci/TF-CLIP
datasets/make_dataloader_clipreid.py
[ { "identifier": "VideoDataset", "path": "datasets/video_loader_xh.py", "snippet": "class VideoDataset(Dataset):\n \"\"\"Video Person ReID Dataset.\n Note batch data has shape (batch, seq_len, channel, height, width).\n \"\"\"\n sample_methods = ['evenly', 'random', 'dense']\n\n def __init__(self, dataset, seq_len=15, sample='evenly', transform=None):\n self.dataset = dataset\n self.seq_len = seq_len\n self.sample = sample\n self.transform = transform\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, indices):\n if isinstance(indices, (tuple, list)):\n return [self.__get_single_item__(index) for index in indices]\n return self.__get_single_item__(indices)\n\n def __get_single_item__(self, index):\n S = self.seq_len # 4\n img_paths, pid, camid, trackid = self.dataset[index]\n num = len(img_paths) # 27\n \"\"\"rss 操作\"\"\"\n sample_clip = []\n frame_indices = list(range(num))\n if num < S: # 8 = chunk的数目,每个tracklet分成8段,每段随机选一帧\n strip = list(range(num)) + [frame_indices[-1]] * (S - num)\n for s in range(S):\n pool = strip[s * 1:(s + 1) * 1]\n sample_clip.append(list(pool))\n else:\n inter_val = math.ceil(num / S)\n strip = list(range(num)) + [frame_indices[-1]] * (inter_val * S - num)\n for s in range(S):\n pool = strip[inter_val * s:inter_val * (s + 1)]\n sample_clip.append(list(pool))\n\n sample_clip = np.array(sample_clip)\n\n if self.sample == 'random':\n \"\"\"\n Randomly sample seq_len consecutive frames from num frames,\n if num is smaller than seq_len, then replicate items.\n This sampling strategy is used in training phase.\n \"\"\"\n frame_indices = list(range(num))\n rand_end = max(0, len(frame_indices) - self.seq_len - 1)\n begin_index = random.randint(0, rand_end)\n end_index = min(begin_index + self.seq_len, len(frame_indices))\n\n indices = frame_indices[begin_index:end_index]\n\n for index in indices:\n if len(indices) >= self.seq_len:\n break\n indices.append(index)\n indices = np.array(indices)\n imgseq = []\n for index in indices:\n index = int(index)\n img_path = img_paths[index]\n img = Image.open(img_path).convert('RGB') # 3x224x112\n imgseq.append(img)\n\n seq = [imgseq]\n if self.transform is not None:\n seq = self.transform(seq)\n\n img_tensor = torch.stack(seq[0], dim=0) # seq_len 4x3x224x112\n flow_tensor = None\n\n return img_tensor, pid, camid\n\n elif self.sample == 'dense':\n \"\"\"\n Sample all frames in a video into a list of clips, each clip contains seq_len frames, batch_size needs to be set to 1.\n This sampling strategy is used in test phase.\n \"\"\"\n cur_index = 0\n frame_indices = list(range(num)) # 27\n indices_list = []\n while num-cur_index > self.seq_len:\n indices_list.append(frame_indices[cur_index:cur_index+self.seq_len])\n cur_index += self.seq_len\n\n last_seq = frame_indices[cur_index:]\n\n for index in last_seq:\n if len(last_seq) >= self.seq_len:\n break\n last_seq.append(index)\n\n indices_list.append(last_seq) # <class 'list'>: [[0, 1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15], [16, 17, 18, 19, 20, 21, 22, 23], [24, 25, 24, 25, 24, 25, 24, 25]]\n imgs_list = []\n for indices in indices_list: # <class 'list'>: [0, 1, 2, 3, 4, 5, 6, 7]\n imgs = []\n for index in indices:\n index = int(index)\n img_path = img_paths[index]\n img = Image.open(img_path).convert('RGB')\n # img = img.unsqueeze(0)\n imgs.append(img)\n\n imgs = [imgs]\n if self.transform is not None:\n imgs = self.transform(imgs)\n imgs = torch.stack(imgs[0], 0) # torch.Size([8, 3, 224, 112])\n imgs_list.append(imgs)\n imgs_tensor = torch.stack(imgs_list) # torch.Size([13, 8, 3, 224, 112])\n # flow_tensor = None\n return imgs_tensor, pid, camid, trackid, \"\"\n\n elif self.sample == 'rrs_train':\n idx = np.random.choice(sample_clip.shape[1], sample_clip.shape[0])\n number = sample_clip[np.arange(len(sample_clip)), idx]\n # imgseq = []\n img_paths = np.array(list(img_paths)) # img_paths原始为tuple,转换成数组\n # flow_paths = np.array([img_path.replace('Mars', 'Mars_optical') for img_path in img_paths])\n imgseq = [Image.open(img_path).convert('RGB') for img_path in img_paths[number]]\n # flowseq = [Image.open(flow_path).convert('RGB') for flow_path in flow_paths[number]]\n\n seq = [imgseq]\n # seq = [imgseq, flowseq]\n if self.transform is not None:\n seq = self.transform(seq)\n\n img_tensor = torch.stack(seq[0], dim=0) # seq_len 4x3x224x112\n # flow_tensor = torch.stack(seq[1], dim=0) # seq_len 4x3x224x112\n\n return img_tensor, pid, camid, trackid, \"\"\n\n elif self.sample == 'rrs_test':\n number = sample_clip[:, 0]\n img_paths = np.array(list(img_paths)) # img_paths原始为tuple,转换成数组\n # flow_paths = np.array([img_path.replace('Mars', 'Mars_optical') for img_path in img_paths])\n imgseq = [Image.open(img_path).convert('RGB') for img_path in img_paths[number]]\n # flowseq = [Image.open(flow_path).convert('RGB') for flow_path in flow_paths[number]]\n\n seq = [imgseq]\n # seq = [imgseq, flowseq]\n if self.transform is not None:\n seq = self.transform(seq)\n img_tensor = torch.stack(seq[0], dim=0) # torch.Size([8, 3, 256, 128])\n # flow_tensor = torch.stack(seq[1], dim=0)\n return img_tensor, pid, camid, trackid, \"\"\n else:\n raise KeyError(\"Unknown sample method: {}. Expected one of {}\".format(self.sample, self.sample_methods))" }, { "identifier": "RandomIdentitySampler", "path": "datasets/samplers.py", "snippet": "class RandomIdentitySampler(Sampler):\n \"\"\"\n Randomly sample N identities, then for each identity,\n randomly sample K instances, therefore batch size is N*K.\n\n Args:\n - data_source (Dataset): dataset to sample from.\n - num_instances (int): number of instances per identity.\n \"\"\"\n def __init__(self, data_source, batch_size, num_instances=4):\n self.data_source = data_source\n self.batch_size = batch_size # 16\n self.num_instances = num_instances # 4\n self.num_pids_per_batch = self.batch_size // self.num_instances # 4\n self.index_dic = defaultdict(list)\n for index, (_, pid, _, _) in enumerate(data_source):\n self.index_dic[pid].append(index)\n self.pids = list(self.index_dic.keys())\n self.num_identities = len(self.pids)\n\n # compute number of examples in an epoch\n self.length = 0\n for pid in self.pids:\n idxs = self.index_dic[pid]\n num = len(idxs)\n if num < self.num_instances:\n num = self.num_instances\n self.length += num - num % self.num_instances # 7532\n\n def __iter__(self):\n\n batch_idxs_dict = defaultdict(list)\n\n for pid in self.pids: # 每个Pid选择4个序列\n idxs = copy.deepcopy(self.index_dic[pid])\n if len(idxs) < self.num_instances:\n idxs = np.random.choice(idxs, size=self.num_instances, replace=True)\n random.shuffle(idxs)\n batch_idxs = []\n for idx in idxs:\n batch_idxs.append(idx)\n if len(batch_idxs) == self.num_instances:\n batch_idxs_dict[pid].append(batch_idxs)\n batch_idxs = []\n\n avai_pids = copy.deepcopy(self.pids)\n final_idxs = []\n\n while len(avai_pids) >= self.num_pids_per_batch: # 选择P个ID\n selected_pids = random.sample(avai_pids, self.num_pids_per_batch)\n for pid in selected_pids:\n batch_idxs = batch_idxs_dict[pid].pop(0)\n final_idxs.extend(batch_idxs)\n if len(batch_idxs_dict[pid]) == 0:\n avai_pids.remove(pid)\n\n return iter(final_idxs)\n\n def __len__(self):\n return self.length" }, { "identifier": "RandomIdentitySamplerForSeq", "path": "datasets/samplers.py", "snippet": "class RandomIdentitySamplerForSeq(Sampler):\n \"\"\"\n Randomly sample N identities, then for each identity,\n randomly sample K instances, therefore batch size is N*K.\n\n Args:\n - data_source (Dataset): dataset to sample from.\n - num_instances (int): number of instances per identity.\n \"\"\"\n def __init__(self, data_source, batch_size, num_instances=4):\n self.data_source = data_source\n self.batch_size = batch_size # 256\n self.num_instances = num_instances # K=4\n self.num_pids_per_batch = self.batch_size // self.num_instances\n self.index_dic = defaultdict(list)\n for index, (_, pid, _, _) in enumerate(data_source):\n self.index_dic[pid].append(index)\n self.pids = list(self.index_dic.keys())\n self.num_identities = len(self.pids) # 625\n\n # compute number of examples in an epoch\n self.length = 0\n for pid in self.pids:\n idxs = self.index_dic[pid]\n num = len(idxs)\n if num < self.num_instances:\n num = self.num_instances\n self.length += num - num % self.num_instances\n\n def __iter__(self):\n\n batch_idxs_dict = defaultdict(list)\n\n for pid in self.pids:\n idxs = copy.deepcopy(self.index_dic[pid])\n if len(idxs) < self.num_instances:\n idxs = np.random.choice(idxs, size=self.num_instances, replace=True)\n random.shuffle(idxs)\n batch_idxs = []\n for idx in idxs:\n batch_idxs.append(idx)\n if len(batch_idxs) == self.num_instances:\n batch_idxs_dict[pid].append(batch_idxs)\n batch_idxs = []\n\n avai_pids = copy.deepcopy(self.pids)\n final_idxs = []\n\n while len(avai_pids) >= self.num_pids_per_batch:\n selected_pids = random.sample(avai_pids, self.num_pids_per_batch)\n for pid in selected_pids:\n batch_idxs = batch_idxs_dict[pid].pop(0)\n final_idxs.extend(batch_idxs)\n if len(batch_idxs_dict[pid]) == 0:\n avai_pids.remove(pid)\n\n return iter(final_idxs)\n\n def __len__(self):\n return self.length" }, { "identifier": "RandomIdentitySamplerWYQ", "path": "datasets/samplers.py", "snippet": "class RandomIdentitySamplerWYQ(Sampler):\n \"\"\"\n Randomly sample N identities, then for each identity,\n randomly sample K instances, therefore batch size is N*K.\n\n Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/data/sampler.py.\n\n Args:\n data_source (Dataset): dataset to sample from.\n num_instances (int): number of instances per identity.\n \"\"\"\n def __init__(self, data_source, num_instances=4):\n super(RandomIdentitySampler).__init__()\n self.data_source = data_source\n self.num_instances = num_instances\n self.index_dic = defaultdict(list)\n for index, (_, pid, _, _) in enumerate(data_source):\n self.index_dic[pid].append(index)\n self.pids = list(self.index_dic.keys())\n self.num_identities = len(self.pids)\n\n def __iter__(self):\n indices = torch.randperm(self.num_identities)\n ret = []\n for i in indices:\n pid = self.pids[i]\n t = self.index_dic[pid]\n replace = False if len(t) >= self.num_instances else True\n t = np.random.choice(t, size=self.num_instances, replace=replace)\n ret.extend(t)\n # print(ret)\n return iter(ret)\n\n def __len__(self):\n return self.num_identities * self.num_instances" }, { "identifier": "SeqTrainPreprocessor", "path": "datasets/seqpreprocessor.py", "snippet": "class SeqTrainPreprocessor(object):\n def __init__(self, seqset, dataset, seq_len, transform=None):\n super(SeqTrainPreprocessor, self).__init__()\n self.seqset = seqset\n self.identities = dataset.identities\n self.transform = transform\n self.seq_len = seq_len\n self.root = [dataset.images_dir]\n self.root.append(dataset.other_dir)\n\n def __len__(self):\n return len(self.seqset)\n\n def __getitem__(self, indices):\n if isinstance(indices, (tuple, list)):\n return [self._get_single_item(index) for index in indices]\n return self._get_single_item(indices)\n\n def _get_single_item(self, index):\n\n start_ind, end_ind, pid, label, camid = self.seqset[index]\n\n imgseq = []\n flowseq = []\n for ind in range(start_ind, end_ind):\n fname = self.identities[pid][camid][ind]\n fpath_img = osp.join(self.root[0], fname)\n imgrgb = Image.open(fpath_img).convert('RGB')\n fpath_flow = osp.join(self.root[1], fname)\n flowrgb = Image.open(fpath_flow).convert('RGB')\n imgseq.append(imgrgb)\n flowseq.append(flowrgb)\n\n while len(imgseq) < self.seq_len:\n imgseq.append(imgrgb)\n flowseq.append(flowrgb)\n\n seq = [imgseq, flowseq]\n\n if self.transform is not None:\n seq = self.transform(seq)\n\n img_tensor = torch.stack(seq[0], 0)\n\n flow_tensor = torch.stack(seq[1], 0)\n\n return img_tensor, flow_tensor, label, camid" }, { "identifier": "SeqTestPreprocessor", "path": "datasets/seqpreprocessor.py", "snippet": "class SeqTestPreprocessor(object):\n\n def __init__(self, seqset, dataset, seq_len, transform=None):\n super(SeqTestPreprocessor, self).__init__()\n self.seqset = seqset\n self.identities = dataset.identities\n self.transform = transform\n self.seq_len = seq_len\n self.root = [dataset.images_dir]\n self.root.append(dataset.other_dir)\n\n def __len__(self):\n return len(self.seqset)\n\n def __getitem__(self, indices):\n if isinstance(indices, (tuple, list)):\n return [self._get_single_item(index) for index in indices]\n return self._get_single_item(indices)\n\n def _get_single_item(self, index):\n\n start_ind, end_ind, pid, label, camid = self.seqset[index]\n\n imgseq = []\n flowseq = []\n for ind in range(start_ind, end_ind):\n fname = self.identities[pid][camid][ind]\n fpath_img = osp.join(self.root[0], fname)\n imgrgb = Image.open(fpath_img).convert('RGB')\n fpath_flow = osp.join(self.root[1], fname)\n flowrgb = Image.open(fpath_flow).convert('RGB')\n imgseq.append(imgrgb)\n flowseq.append(flowrgb)\n\n while len(imgseq) < self.seq_len:\n imgseq.append(imgrgb)\n flowseq.append(flowrgb)\n\n seq = [imgseq, flowseq]\n\n if self.transform is not None:\n seq = self.transform(seq)\n\n img_tensor = torch.stack(seq[0], 0)\n\n if len(self.root) == 2:\n flow_tensor = torch.stack(seq[1], 0)\n else:\n flow_tensor = None\n\n return img_tensor, flow_tensor, pid, camid" }, { "identifier": "Mars", "path": "datasets/set/mars.py", "snippet": "class Mars(object):\n root = '/18640539002/dataset_cc/Mars/'\n train_name_path = osp.join(root, 'info/train_name.txt')\n test_name_path = osp.join(root, 'info/test_name.txt')\n track_train_info_path = osp.join(root, 'info/tracks_train_info.mat')\n track_test_info_path = osp.join(root, 'info/tracks_test_info.mat')\n query_IDX_path = osp.join(root, 'info/query_IDX.mat')\n split_train_json_path = osp.join(root, 'split_train.json')\n split_query_json_path = osp.join(root, 'split_query.json')\n split_gallery_json_path = osp.join(root, 'split_gallery.json')\n \n def __init__(self, root= '../data/Mars/', min_seq_len=0):\n self._check_before_run()\n\n train_names = self._get_names(self.train_name_path) # <class 'list'>: <Len: 509914> '0001C1T0001F001.jpg'\n test_names = self._get_names(self.test_name_path) # <class 'list'>: <Len: 681089> '00-1C1T0001F001.jpg'\n track_train = loadmat(self.track_train_info_path)[\n 'track_train_info'] # numpy.ndarray (8298, 4) [[1 16 1 1],[17 95 1 1] ...]\n track_test = loadmat(self.track_test_info_path)[\n 'track_test_info'] # numpy.ndarray (12180, 4) [[1 24 -1 1][25 34 -1 1]]\n \n query_IDX = loadmat(self.query_IDX_path)['query_IDX'].squeeze() # numpy.ndarray (1980,) [4130, 4138...]\n query_IDX -= 1 # index from 0 [4129,4137....]\n track_query = track_test[query_IDX, :] # 对应行的小段视频信息,[[171610 171649 2 1],[172214 172313 2 2]...]\n \n gallery_IDX = [i for i in range(track_test.shape[0]) if i not in query_IDX] # gallery = 10200\n track_gallery = track_test[gallery_IDX, :] # <class 'tuple'>: (12180, 4) [[1 24 -1 1][25 34 -1 1]...]\n\n train, num_train_tracklets, num_train_pids, num_train_imgs, num_train_cams, num_train_vids = \\\n self._process_data(train_names, track_train, home_dir='bbox_train', relabel=True,\n min_seq_len=min_seq_len, json_path=self.split_train_json_path)\n\n query, num_query_tracklets, num_query_pids, num_query_imgs, query_pid, query_camid = \\\n self._process_gallery_data(test_names, track_query, home_dir='bbox_test', relabel=False,\n min_seq_len=min_seq_len, json_path=self.split_query_json_path,)\n\n gallery, num_gallery_tracklets, num_gallery_pids, num_gallery_imgs, gallery_pid, gallery_camid = \\\n self._process_gallery_data(test_names, track_gallery, home_dir='bbox_test', relabel=False,\n min_seq_len=min_seq_len, json_path=self.split_gallery_json_path)\n\n num_imgs_per_tracklet = num_train_imgs + num_query_imgs + num_gallery_imgs\n min_num = np.min(num_imgs_per_tracklet)\n max_num = np.max(num_imgs_per_tracklet)\n avg_num = np.mean(num_imgs_per_tracklet)\n\n num_total_pids = num_train_pids + num_query_pids\n num_total_tracklets = num_train_tracklets + num_query_tracklets + num_gallery_tracklets\n\n print(\"=> MARS loaded\")\n print(\"Dataset statistics:\")\n print(\" ------------------------------\")\n print(\" subset | # ids | # tracklets\")\n print(\" ------------------------------\")\n print(\" train | {:5d} | {:8d}\".format(num_train_pids, num_train_tracklets))\n print(\" query | {:5d} | {:8d}\".format(num_query_pids, num_query_tracklets))\n print(\" gallery | {:5d} | {:8d}\".format(num_gallery_pids, num_gallery_tracklets))\n print(\" ------------------------------\")\n print(\" total | {:5d} | {:8d}\".format(num_total_pids, num_total_tracklets))\n print(\" number of images per tracklet: {} ~ {}, average {:.1f}\".format(min_num, max_num, avg_num))\n print(\" ------------------------------\")\n\n self.train = train\n self.query = query\n self.gallery = gallery\n\n self.num_train_pids = num_train_pids\n self.num_query_pids = num_query_pids\n self.num_gallery_pids = num_gallery_pids\n\n self.queryinfo = infostruct()\n self.queryinfo.pid = query_pid\n self.queryinfo.camid = query_camid\n self.queryinfo.tranum = num_query_imgs\n\n self.galleryinfo = infostruct()\n self.galleryinfo.pid = gallery_pid\n self.galleryinfo.camid = gallery_camid\n self.galleryinfo.tranum = num_gallery_imgs\n\n self.num_train_cams = num_train_cams\n self.num_train_vids = num_train_vids\n \n def _check_before_run(self):\n \"\"\"Check if all files are available before going deeper\"\"\"\n if not osp.exists(self.root):\n raise RuntimeError(\"'{}' is not available\".format(self.root))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.track_train_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_train_info_path))\n if not osp.exists(self.track_test_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_test_info_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))\n \n def _get_names(self, fpath):\n names = []\n with open(fpath, 'r') as f:\n for line in f:\n new_line = line.rstrip()\n names.append(new_line)\n return names\n \n def _process_data(self, names, meta_data, home_dir=None, relabel=False, min_seq_len=0, json_path=''):\n if osp.exists(json_path):\n print(\"=> {} generated before, awesome!\".format(json_path))\n split = read_json(json_path)\n return split['tracklets'], split['num_tracklets'], split['num_pids'], split['num_imgs_per_tracklet'], split['num_cams'], split['num_tracks']\n print(\"=> Automatically generating split (might take a while for the first time, have a coffe)\")\n assert home_dir in ['bbox_train', 'bbox_test']\n num_tracklets = meta_data.shape[0] # 8298 TODO: 要不要增加?\n pid_list = list(set(meta_data[:, 2].tolist())) # pid = 625 => [1 3 5 7 9...]\n num_pids = len(pid_list)\n\n if relabel:\n pid2label = {pid: label for label, pid in enumerate(pid_list)} # {1:0,3:1,5:2,...}\n tracklets = []\n num_imgs_per_tracklet = []\n cams = []\n \n for tracklet_idx in range(num_tracklets):\n data = meta_data[tracklet_idx, ...] # [1 16 1 1]\n start_index, end_index, pid, camid = data\n \n cams += [int(camid)]\n \n if pid == -1:\n continue # junk images are just ignored\n assert 1 <= camid <= 6\n if relabel:\n pid = pid2label[pid] # pid = 0\n camid -= 1\n # index starts from 0\n img_names = names[start_index - 1:end_index]\n # <class 'list'>:['0001C1T0001F001.jpg'.. '0001C1T0001F016.jpg']\n\n # make sure image names correspond to the same person\n pnames = [img_name[:4] for img_name in img_names] # pnames = ['0001','0001'...]\n assert len(set(pnames)) == 1, \"Error: a single tracklet contains different person images\"\n\n # make sure all images are captured under the same camera\n camnames = [img_name[5] for img_name in img_names] # camnames = ['1','1'...]\n assert len(set(camnames)) == 1, \"Error: images are captured under different cameras!\"\n\n # append image names with directory information\n # '/media/ying/0BDD17830BDD1783/ReIdDataset/Mars/bbox_train/0001/0001C1T0001F001.jpg'\n img_paths = [osp.join(self.root, home_dir, img_name[:4], img_name) for img_name in img_names] # list<16>\n # print(img_paths)\n \n if len(img_paths) >= min_seq_len:\n img_paths = tuple(img_paths)\n tracklets.append((img_paths, int(pid), int(camid), 1)) # (('.jpg','.jpg','每张图片的路径'), 0'行人id', 0'camid' trackid)\n num_imgs_per_tracklet.append(len(img_paths)) # [16,79,15...'每个小段视频包含的图片帧数目']\n\n num_tracklets = len(tracklets) # 8298\n\n cams = set(cams)\n num_cams = len(cams)\n\n print(\"Saving split to {}\".format(json_path))\n split_dict = {\n 'tracklets': tracklets,\n 'num_tracklets': num_tracklets,\n 'num_pids': num_pids,\n 'num_imgs_per_tracklet': num_imgs_per_tracklet,\n 'num_cams' : num_cams,\n 'num_tracks' : 1\n }\n write_json(split_dict, json_path)\n\n return tracklets, num_tracklets, num_pids, num_imgs_per_tracklet, num_cams, 1\n \n def _process_gallery_data(self, names, meta_data, home_dir=None, relabel=False, min_seq_len=0, json_path=''):\n if osp.exists(json_path):\n print(\"=> {} generated before, awesome!\".format(json_path))\n split = read_json(json_path)\n return split['tracklets'], split['num_tracklets'], split['num_pids'], split['num_imgs_per_tracklet'], split['pids'], split['camid']\n\n assert home_dir in ['bbox_train', 'bbox_test']\n num_tracklets = meta_data.shape[0] # 8298 TODO: 要不要增加?\n pid_list = list(set(meta_data[:, 2].tolist())) # pid = 625 => [1 3 5 7 9...]\n num_pids = len(pid_list) # 626 622\n\n if relabel:\n pid2label = {pid: label for label, pid in enumerate(pid_list)} # {1:0,3:1,5:2,...}\n tracklets = []\n num_imgs_per_tracklet = []\n gallery_pid = []\n gallery_camid = []\n\n for tracklet_idx in range(num_tracklets):\n data = meta_data[tracklet_idx, ...] # [1 16 1 1]\n start_index, end_index, pid, camid = data\n\n if pid == -1:\n continue # junk images are just ignored\n assert 1 <= camid <= 6\n if relabel:\n pid = pid2label[pid] # pid = 0\n camid -= 1\n # index starts from 0\n img_names = names[start_index - 1:end_index]\n # <class 'list'>:['0001C1T0001F001.jpg'.. '0001C1T0001F016.jpg']\n\n # make sure image names correspond to the same person\n pnames = [img_name[:4] for img_name in img_names] # pnames = ['0001','0001'...]\n assert len(set(pnames)) == 1, \"Error: a single tracklet contains different person images\"\n\n # make sure all images are captured under the same camera\n camnames = [img_name[5] for img_name in img_names] # camnames = ['1','1'...]\n assert len(set(camnames)) == 1, \"Error: images are captured under different cameras!\"\n\n # append image names with directory information\n # '/media/ying/0BDD17830BDD1783/ReIdDataset/Mars/bbox_train/0001/0001C1T0001F001.jpg'\n img_paths = [osp.join(self.root, home_dir, img_name[:4], img_name) for img_name in img_names] # list<16>\n if len(img_paths) >= min_seq_len:\n img_paths = tuple(img_paths)\n tracklets.append((img_paths, int(pid), int(camid), 1)) # (('.jpg','.jpg','每张图片的路径'), 0'行人id', 0'camid' )\n num_imgs_per_tracklet.append(len(img_paths)) # [16,79,15...'每个小段视频包含的图片帧数目']\n gallery_pid.append(int(pid))\n gallery_camid.append(int(camid))\n num_tracklets = len(tracklets) # 8298\n print(\"Saving split to {}\".format(json_path))\n split_dict = {\n 'tracklets': tracklets,\n 'num_tracklets': num_tracklets,\n 'num_pids': num_pids,\n 'num_imgs_per_tracklet': num_imgs_per_tracklet,\n 'pids': gallery_pid,\n 'camid': gallery_camid,\n }\n write_json(split_dict, json_path)\n return tracklets, num_tracklets, num_pids, num_imgs_per_tracklet, gallery_pid, gallery_camid" }, { "identifier": "iLIDSVIDSEQUENCE", "path": "datasets/set/ilidsvidsequence.py", "snippet": "class iLIDSVIDSEQUENCE(Datasequence):\n\n def __init__(self, root, split_id=0, seq_len=12, seq_srd=6, num_val=1, download=False):\n super(iLIDSVIDSEQUENCE, self).__init__(root, split_id=split_id)\n\n if download:\n self.download()\n\n if not self._check_integrity():\n self.imgextract()\n # --> load完后就有了train,val,和trainval,实际上最开始只有trainval,我们按照num_val\n self.load(seq_len, seq_srd, num_val)\n self.num_train_cams = 2\n self.num_train_vids = 1\n\n self.query, query_pid, query_camid, query_num = self._pluckseq_cam(self.identities, self.split['query'],\n seq_len, seq_srd, 0)\n self.queryinfo = infostruct()\n self.queryinfo.pid = query_pid\n self.queryinfo.camid = query_camid\n self.queryinfo.tranum = query_num\n\n self.gallery, gallery_pid, gallery_camid, gallery_num = self._pluckseq_cam(self.identities,\n self.split['gallery'],\n seq_len, seq_srd, 1)\n self.galleryinfo = infostruct()\n self.galleryinfo.pid = gallery_pid\n self.galleryinfo.camid = gallery_camid\n self.galleryinfo.tranum = gallery_num\n\n @property\n def other_dir(self):\n return osp.join(self.root, 'others')\n\n def download(self):\n\n if self._check_integrity():\n print(\"Files already downloaded and verified\")\n return\n\n raw_dir = osp.join(self.root, 'raw')\n mkdir_if_missing(raw_dir)\n\n fpath1 = osp.join(raw_dir, datasetname + '.tar')\n fpath2 = osp.join(raw_dir, flowname + '.tar')\n\n if osp.isfile(fpath1) and osp.isfile(fpath2):\n print(\"Using the download file:\" + fpath1 + \" \" + fpath2)\n else:\n print(\"Please firstly download the files\")\n raise RuntimeError(\"Downloaded file missing!\")\n\n def imgextract(self):\n\n raw_dir = osp.join(self.root, 'raw')\n exdir1 = osp.join(raw_dir, datasetname)\n exdir2 = osp.join(raw_dir, flowname)\n fpath1 = osp.join(raw_dir, datasetname + '.tar')\n fpath2 = osp.join(raw_dir, flowname + '.tar')\n\n if not osp.isdir(exdir1):\n print(\"Extracting tar file\")\n cwd = os.getcwd()\n tar = tarfile.open(fpath1)\n mkdir_if_missing(exdir1)\n os.chdir(exdir1)\n tar.extractall()\n tar.close()\n os.chdir(cwd)\n\n if not osp.isdir(exdir2):\n print(\"Extracting tar file\")\n cwd = os.getcwd()\n tar = tarfile.open(fpath2)\n mkdir_if_missing(exdir2)\n os.chdir(exdir2)\n tar.extractall()\n tar.close()\n os.chdir(cwd)\n\n # reorganzing the dataset\n # Format\n\n temp_images_dir = osp.join(self.root, 'temp_images')\n mkdir_if_missing(temp_images_dir)\n\n temp_others_dir = osp.join(self.root, 'temp_others')\n mkdir_if_missing(temp_others_dir)\n\n images_dir = osp.join(self.root, 'images')\n mkdir_if_missing(images_dir)\n\n others_dir = osp.join(self.root, 'others')\n mkdir_if_missing(others_dir)\n\n fpaths1 = sorted(glob(osp.join(exdir1, 'i-LIDS-VID/sequences', '*/*/*.png')))\n fpaths2 = sorted(glob(osp.join(exdir2, flowname, '*/*/*.png')))\n\n identities_imgraw = [[[] for _ in range(2)] for _ in range(319)]\n identities_otherraw = [[[] for _ in range(2)] for _ in range(319)]\n\n # image information\n for fpath in fpaths1:\n fname = osp.basename(fpath)\n fname_list = fname.split('_')\n cam_name = fname_list[0]\n pid_name = fname_list[1]\n cam = int(cam_name[-1])\n pid = int(pid_name[-3:])\n temp_fname = ('{:08d}_{:02d}_{:04d}.png'\n .format(pid, cam, len(identities_imgraw[pid - 1][cam - 1])))\n identities_imgraw[pid - 1][cam - 1].append(temp_fname)\n shutil.copy(fpath, osp.join(temp_images_dir, temp_fname))\n\n identities_temp = [x for x in identities_imgraw if x != [[], []]]\n identities_images = identities_temp\n\n for pid in range(len(identities_temp)):\n for cam in range(2):\n for img in range(len(identities_images[pid][cam])):\n temp_fname = identities_temp[pid][cam][img]\n fname = ('{:08d}_{:02d}_{:04d}.png'\n .format(pid, cam, img))\n identities_images[pid][cam][img] = fname\n shutil.copy(osp.join(temp_images_dir, temp_fname), osp.join(images_dir, fname))\n\n shutil.rmtree(temp_images_dir)\n\n # flow information\n\n for fpath in fpaths2:\n fname = osp.basename(fpath)\n fname_list = fname.split('_')\n cam_name = fname_list[0]\n pid_name = fname_list[1]\n cam = int(cam_name[-1])\n pid = int(pid_name[-3:])\n temp_fname = ('{:08d}_{:02d}_{:04d}.png'\n .format(pid, cam, len(identities_otherraw[pid - 1][cam - 1])))\n identities_otherraw[pid - 1][cam - 1].append(temp_fname)\n shutil.copy(fpath, osp.join(temp_others_dir, temp_fname))\n\n identities_temp = [x for x in identities_otherraw if x != [[], []]]\n identities_others = identities_temp\n\n for pid in range(len(identities_temp)):\n for cam in range(2):\n for img in range(len(identities_others[pid][cam])):\n temp_fname = identities_temp[pid][cam][img]\n fname = ('{:08d}_{:02d}_{:04d}.png'\n .format(pid, cam, img))\n identities_others[pid][cam][img] = fname\n shutil.copy(osp.join(temp_others_dir, temp_fname), osp.join(others_dir, fname))\n\n shutil.rmtree(temp_others_dir)\n\n meta = {'name': 'iLIDS-sequence', 'shot': 'sequence', 'num_cameras': 2,\n 'identities': identities_images}\n\n write_json(meta, osp.join(self.root, 'meta.json'))\n\n # Consider fixed training and testing split\n splitmat_name = osp.join(exdir1, 'train-test people splits', 'train_test_splits_ilidsvid.mat')\n data = sio.loadmat(splitmat_name)\n person_list = data['ls_set']\n num = len(identities_images)\n splits = []\n\n for i in range(10):\n pids = (person_list[i] - 1).tolist()\n trainval_pids = sorted(pids[:num // 2])\n test_pids = sorted(pids[num // 2:])\n split = {'trainval': trainval_pids,\n 'query': test_pids,\n 'gallery': test_pids}\n splits.append(split)\n write_json(splits, osp.join(self.root, 'splits.json'))\n\n def _pluckseq_cam(self, identities, indices, seq_len, seq_str, camid):\n # --> query和gallery与 trainval不同的是\n # --> trainval是用来训练的,所以怎么处理都行\n # --> query和gallery是来模拟实际场景的,所以不能用那种重复采样的方法扩充两个数据集\n # --> 另外要求是不同镜头下的,所以加一个camid\n\n ret = []\n per_id = []\n cam_id = []\n tra_num = []\n\n for index, pid in enumerate(indices):\n pid_images = identities[pid]\n cam_images = pid_images[camid]\n seqall = len(cam_images)\n seq_inds = [(start_ind, start_ind + seq_len) for start_ind in range(0, seqall - seq_len, seq_str)]\n if not seq_inds:\n seq_inds = [(0, seqall)]\n for seq_ind in seq_inds:\n ret.append((seq_ind[0], seq_ind[1], pid, index, camid))\n per_id.append(pid)\n cam_id.append(camid)\n tra_num.append(len(seq_inds))\n return ret, per_id, cam_id, tra_num" }, { "identifier": "LSVID", "path": "datasets/set/lsvid.py", "snippet": "class LSVID(object):\n\n def __init__(self, root=None, sampling_step=48, *args, **kwargs):\n self._root = root\n self.train_name_path = osp.join(self._root, 'info/list_sequence/list_seq_train.txt')\n self.test_name_path = osp.join(self._root, 'info/list_sequence/list_seq_test.txt')\n self.query_IDX_path = osp.join(self._root, 'info/data/info_test.mat')\n\n self._check_before_run()\n\n # prepare meta data\n track_train = self._get_names(self.train_name_path)\n track_test = self._get_names(self.test_name_path)\n\n track_train = np.array(track_train)\n track_test = np.array(track_test)\n\n query_IDX = h5py.File(self.query_IDX_path, mode='r')['query'][0,:] # numpy.ndarray (1980,)\n query_IDX = np.array(query_IDX, dtype=int)\n\n query_IDX -= 1 # index from 0\n track_query = track_test[query_IDX, :]\n\n gallery_IDX = [i for i in range(track_test.shape[0]) if i not in query_IDX]\n track_gallery = track_test[gallery_IDX, :]\n\n self.split_train_dense_json_path = osp.join(self._root,'split_train_dense_{}.json'.format(sampling_step))\n self.split_train_json_path = osp.join(self._root, 'split_train.json')\n self.split_query_json_path = osp.join(self._root, 'split_query.json')\n self.split_gallery_json_path = osp.join(self._root, 'split_gallery.json')\n\n train, num_train_tracklets, num_train_pids, num_train_imgs, num_train_cams, num_train_vids = \\\n self._process_data(track_train, json_path=self.split_train_json_path, relabel=True)\n\n train_dense, num_train_tracklets_dense, num_train_pids_dense, num_train_imgs_dense, _, _ = \\\n self._process_data(track_train, json_path=self.split_train_dense_json_path, relabel=True, sampling_step=sampling_step)\n\n query, num_query_tracklets, num_query_pids, num_query_imgs, _, _ = \\\n self._process_data(track_query, json_path=self.split_query_json_path, relabel=False)\n\n gallery, num_gallery_tracklets, num_gallery_pids, num_gallery_imgs, _, _ = \\\n self._process_data(track_gallery, json_path=self.split_gallery_json_path, relabel=False)\n\n num_imgs_per_tracklet = num_train_imgs + num_gallery_imgs + num_query_imgs\n min_num = np.min(num_imgs_per_tracklet)\n max_num = np.max(num_imgs_per_tracklet)\n avg_num = np.mean(num_imgs_per_tracklet)\n\n num_total_pids = num_train_pids + num_gallery_pids\n num_total_tracklets = num_train_tracklets + num_gallery_tracklets + num_query_tracklets\n\n print(\"=> LS-VID loaded\")\n print(\"Dataset statistics:\")\n print(\" ------------------------------\")\n print(\" subset | # ids | # tracklets\")\n print(\" ------------------------------\")\n print(\" train | {:5d} | {:8d}\".format(num_train_pids, num_train_tracklets))\n if sampling_step != 0:\n print(\" train_d | {:5d} | {:8d}\".format(num_train_pids_dense, num_train_tracklets_dense))\n print(\" query | {:5d} | {:8d}\".format(num_query_pids, num_query_tracklets))\n print(\" gallery | {:5d} | {:8d}\".format(num_gallery_pids, num_gallery_tracklets))\n print(\" ------------------------------\")\n print(\" total | {:5d} | {:8d}\".format(num_total_pids, num_total_tracklets))\n print(\" number of images per tracklet: {} ~ {}, average {:.1f}\".format(min_num, max_num, avg_num))\n print(\" ------------------------------\")\n\n if sampling_step != 0:\n self.train = train_dense\n else:\n self.train = train\n self.query = query\n self.gallery = gallery\n\n self.num_train_pids = num_train_pids\n self.num_query_pids = num_query_pids\n self.num_gallery_pids = num_gallery_pids\n\n self.num_train_cams = num_train_cams\n self.num_train_vids = num_train_vids\n\n def _check_before_run(self):\n \"\"\"Check if all files are available before going deeper\"\"\"\n if not osp.exists(self._root):\n raise RuntimeError(\"'{}' is not available\".format(self._root))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))\n\n def _get_names(self, fpath):\n names = []\n with open(fpath, 'r') as f:\n for line in f:\n new_line = line.rstrip()\n basepath, pid = new_line.split(' ')\n names.append([basepath, int(pid)])\n return names\n\n def _process_data(self,\n meta_data,\n relabel=False,\n json_path=None,\n sampling_step=0):\n if osp.exists(json_path):\n split = read_json(json_path)\n return split['tracklets'], split['num_tracklets'], split['num_pids'], split['num_imgs_per_tracklet'], split['num_cams'], split['num_tracks']\n\n num_tracklets = meta_data.shape[0]\n pid_list = list(set(meta_data[:, 1].tolist()))\n num_pids = len(pid_list)\n\n if relabel: pid2label = {int(pid): label for label, pid in enumerate(pid_list)}\n tracklets = []\n num_imgs_per_tracklet = []\n cams = []\n\n for tracklet_idx in range(num_tracklets):\n tracklet_path = osp.join(self._root, meta_data[tracklet_idx, 0]) + '*'\n img_paths = glob.glob(tracklet_path) # avoid .DS_Store\n img_paths.sort()\n pid = int(meta_data[tracklet_idx, 1])\n _, _, camid, _ = osp.basename(img_paths[0]).split('_')[:4]\n cams += [int(camid)]\n camid = int(camid)\n\n if pid == -1: continue # junk images are just ignored\n assert 1 <= camid <= 15\n if relabel: pid = pid2label[pid]\n camid -= 1 # index starts from 0\n \n if sampling_step == 0:\n tracklets.append((img_paths, pid, camid, 1))\n else:\n num_sampling = len(img_paths) // sampling_step\n for idx in range(num_sampling):\n if idx == num_sampling - 1:\n tracklets.append((img_paths[idx * sampling_step:], pid, camid, 1))\n else:\n tracklets.append((img_paths[idx * sampling_step: (idx + 1) * sampling_step], pid, camid, 1))\n num_imgs_per_tracklet.append(len(img_paths))\n\n num_tracklets = len(tracklets)\n cams = set(cams)\n num_cams = len(cams)\n\n print(\"Saving split to {}\".format(json_path))\n split_dict = {'tracklets': tracklets, 'num_tracklets': num_tracklets, 'num_pids': num_pids,\n 'num_imgs_per_tracklet': num_imgs_per_tracklet, 'num_cams' : num_cams, 'num_tracks' : 1}\n write_json(split_dict, json_path)\n\n return tracklets, num_tracklets, num_pids, num_imgs_per_tracklet, num_cams, 1" } ]
import torch import utils.spatial_transforms as ST import utils.temporal_transforms as TT import utils.transforms as T import utils.seqtransforms as SeqT from torch.utils.data import DataLoader from datasets.video_loader_xh import VideoDataset from datasets.samplers import RandomIdentitySampler, RandomIdentitySamplerForSeq, RandomIdentitySamplerWYQ from datasets.seqpreprocessor import SeqTrainPreprocessor, SeqTestPreprocessor from datasets.set.mars import Mars from datasets.set.ilidsvidsequence import iLIDSVIDSEQUENCE from datasets.set.lsvid import LSVID
12,439
# from torchvision.transforms import InterpolationMode # import torchvision.transforms as T __factory = { 'mars': Mars, 'ilidsvidsequence': iLIDSVIDSEQUENCE, 'lsvid': LSVID } def train_collate_fn(batch): """ # collate_fn这个函数的输入就是一个list,list的长度是一个batch size,list中的每个元素都是__getitem__得到的结果 """ imgs, pids, camids, viewids, _ = zip(*batch) pids = torch.tensor(pids, dtype=torch.int64) viewids = torch.tensor(viewids, dtype=torch.int64) camids = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, viewids, def val_collate_fn(batch): imgs, pids, camids, viewids, img_paths = zip(*batch) viewids = torch.tensor(viewids, dtype=torch.int64) camids_batch = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, camids_batch, viewids, img_paths def train_collate_fn_seq(batch): """ # collate_fn这个函数的输入就是一个list,list的长度是一个batch size,list中的每个元素都是__getitem__得到的结果 """ imgs, flows, pids, camids = zip(*batch) viewids = 1 pids = torch.tensor(pids, dtype=torch.int64) viewids = torch.tensor(viewids, dtype=torch.int64) camids = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, viewids, def val_collate_fn_seq(batch): imgs, flows, pids, camids = zip(*batch) viewids = 1 img_paths = None viewids = torch.tensor(viewids, dtype=torch.int64) camids_batch = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, camids_batch, viewids, img_paths def make_dataloader(cfg): split_id = cfg.DATASETS.SPLIT seq_srd = cfg.INPUT.SEQ_SRD seq_len = cfg.INPUT.SEQ_LEN num_workers = cfg.DATALOADER.NUM_WORKERS if cfg.DATASETS.NAMES != 'mars' and cfg.DATASETS.NAMES != 'duke' and cfg.DATASETS.NAMES != 'lsvid': dataset = __factory[cfg.DATASETS.NAMES](root=cfg.DATASETS.ROOT_DIR, split_id=split_id, seq_len=seq_len, seq_srd=seq_srd, num_val=1) num_classes = dataset.num_trainval_ids cam_num = dataset.num_train_cams view_num = dataset.num_train_vids
# from torchvision.transforms import InterpolationMode # import torchvision.transforms as T __factory = { 'mars': Mars, 'ilidsvidsequence': iLIDSVIDSEQUENCE, 'lsvid': LSVID } def train_collate_fn(batch): """ # collate_fn这个函数的输入就是一个list,list的长度是一个batch size,list中的每个元素都是__getitem__得到的结果 """ imgs, pids, camids, viewids, _ = zip(*batch) pids = torch.tensor(pids, dtype=torch.int64) viewids = torch.tensor(viewids, dtype=torch.int64) camids = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, viewids, def val_collate_fn(batch): imgs, pids, camids, viewids, img_paths = zip(*batch) viewids = torch.tensor(viewids, dtype=torch.int64) camids_batch = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, camids_batch, viewids, img_paths def train_collate_fn_seq(batch): """ # collate_fn这个函数的输入就是一个list,list的长度是一个batch size,list中的每个元素都是__getitem__得到的结果 """ imgs, flows, pids, camids = zip(*batch) viewids = 1 pids = torch.tensor(pids, dtype=torch.int64) viewids = torch.tensor(viewids, dtype=torch.int64) camids = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, viewids, def val_collate_fn_seq(batch): imgs, flows, pids, camids = zip(*batch) viewids = 1 img_paths = None viewids = torch.tensor(viewids, dtype=torch.int64) camids_batch = torch.tensor(camids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids, camids, camids_batch, viewids, img_paths def make_dataloader(cfg): split_id = cfg.DATASETS.SPLIT seq_srd = cfg.INPUT.SEQ_SRD seq_len = cfg.INPUT.SEQ_LEN num_workers = cfg.DATALOADER.NUM_WORKERS if cfg.DATASETS.NAMES != 'mars' and cfg.DATASETS.NAMES != 'duke' and cfg.DATASETS.NAMES != 'lsvid': dataset = __factory[cfg.DATASETS.NAMES](root=cfg.DATASETS.ROOT_DIR, split_id=split_id, seq_len=seq_len, seq_srd=seq_srd, num_val=1) num_classes = dataset.num_trainval_ids cam_num = dataset.num_train_cams view_num = dataset.num_train_vids
train_set = SeqTrainPreprocessor(dataset.trainval, dataset, seq_len,
4
2023-12-11 04:03:46+00:00
16k
MarilynKeller/aitviewer-skel
examples/remote_amass.py
[ { "identifier": "CONFIG", "path": "aitviewer/configuration.py", "snippet": "CONFIG = Configuration()" }, { "identifier": "RemoteMeshes", "path": "aitviewer/remote/renderables/meshes.py", "snippet": "class RemoteMeshes(RemoteNode):\n MESSAGE_TYPE = Message.MESHES\n\n def __init__(self, viewer, vertices, faces, **kwargs):\n \"\"\"\n This initializer takes a RemoteViewer object and all other arguments are forwarded\n to the Meshes constructor on the remote Viewer.\n See the Meshes class for more information about parameters.\n\n :param viewer: a RemoteViewer object that will be used to send this node.\n :param vertices: Set of 3D coordinates as a np array of shape (N, V, 3) or (V, 3).\n \"\"\"\n super().__init__(\n viewer,\n vertices=vertices,\n faces=faces,\n **kwargs,\n )\n\n def add_frames(self, vertices):\n \"\"\"\n Add frames to the remote Meshes node by adding new vertices.\n\n :param vertices: Set of 3D coordinates as a np array of shape (N, V, 3) or (V, 3).\n \"\"\"\n return super().add_frames(vertices=vertices)\n\n def update_frames(self, vertices, frames):\n \"\"\"\n Update frames of the remote Meshes node by adding new vertices.\n\n :param vertices: Set of 3D coordinates as a np array of shape (N, V, 3) or (V, 3).\n :param frames: a list of integer frame indices of size N or a single integer frame index.\n \"\"\"\n return super().update_frames(vertices=vertices, frames=frames)" }, { "identifier": "RemoteSMPLSequence", "path": "aitviewer/remote/renderables/smpl.py", "snippet": "class RemoteSMPLSequence(RemoteNode):\n MESSAGE_TYPE = Message.SMPL\n\n def __init__(\n self,\n viewer,\n poses_body,\n **kwargs,\n ):\n \"\"\"\n This initializer takes a RemoteViewer object and all other arguments are forwarded\n to the SMPLLayer or SMPLSequence classes.\n The keyword arguments \"model_type\", \"gender\" and \"num_betas\" are forwarded to SMPLLayer,\n while the remaining arguments are forwarded to SMPLSequence.\n\n :param viewer: a RemoteViewer object that will be used to send this node.\n :param poses_body: An np array of shape (N, N_JOINTS*3) containing the pose parameters of the\n body, i.e. without hands or face parameters.\n \"\"\"\n super().__init__(\n viewer,\n poses_body=poses_body,\n **kwargs,\n )\n\n def add_frames(self, poses_body, poses_root=None, trans=None, betas=None):\n \"\"\"\n Add frames to the remote SMPLSequence node by adding body poses.\n\n :param poses_body: An np array of shape (N, N_JOINTS*3) or (N_JOINTS) containing the\n pose parameters of the body, i.e. without hands or face parameters.\n :param poses_root: An optional np array of shape (N, 3) or (3) containing the global root orientation.\n :param betas: An optional np array of shape (N, N_BETAS) or (N_BETAS) containing the shape parameters.\n :param trans: An optional np array of shape (N, 3) or (3) containing a global translation that is\n applied to all joints and vertices.\n\n \"\"\"\n return super().add_frames(poses_body=poses_body, poses_root=poses_root, trans=trans, betas=betas)\n\n def update_frames(self, poses_body, frames, poses_root=None, trans=None, betas=None):\n \"\"\"\n Update frames of the remote SMPLSequence node by updating body poses.\n\n :param poses_body: An np array of shape (N, N_JOINTS*3) or (N_JOINTS) containing the\n pose parameters of the body, i.e. without hands or face parameters.\n :param poses_root: An optional np array of shape (N, 3) or (3) containing the global root orientation.\n :param betas: An optional np array of shape (N, N_BETAS) or (N_BETAS) containing the shape parameters.\n :param trans: An optional np array of shape (N, 3) or (3) containing a global translation that is\n applied to all joints and vertices.\n \"\"\"\n\n return super().update_frames(\n poses_body=poses_body,\n frames=frames,\n poses_root=poses_root,\n trans=trans,\n betas=betas,\n )" }, { "identifier": "RemoteViewer", "path": "aitviewer/remote/viewer.py", "snippet": "class RemoteViewer:\n def __init__(self, host=\"localhost\", port=8417, timeout=10, verbose=True):\n \"\"\"\n Initializer.\n :param host: the IP address of a host to connect to as a string.\n :param port: the TCP port to connect to.\n :param timeout: a timeout in seconds for attempting to connect to the viewer.\n :param verbose: if True print info messages.\n \"\"\"\n url = f\"ws://{host}:{port}\"\n\n if verbose:\n print(f\"Connecting to remote viewer at {url}\")\n\n self.timeout = timeout\n self.connected = False\n\n # Semaphore used to wait for the connection to be setup by the client thread.\n self.semaphore = threading.Semaphore(0)\n\n # Create a thread for running the websocket client async loop.\n self.loop = asyncio.new_event_loop()\n self.thread = threading.Thread(target=self._entry, args=(url,), daemon=True)\n self.thread.start()\n\n # Wait for the connection to be setup.\n self.semaphore.acquire()\n if verbose:\n if self.connected:\n print(\"Connected\")\n else:\n print(f\"Failed to connect\")\n\n self.process: subprocess.Popen = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.close_connection()\n\n @classmethod\n def create_new_process(cls, args=None, **kwargs):\n \"\"\"\n Open a Viewer in a new process and return a RemoteViewer connected to it.\n\n :param args: This parameter can be used to specify an argument or\n a list of arguments that is used to create the new process.\n e.g: args = [\"path/to/script.py\", \"arg1\", \"arg2\"] will invoke the following command:\n python path/to/script.py arg1 arg2\n \"\"\"\n # If host is None create a new viewer in a separate process.\n if args is None:\n popen_args = [\"python\", \"-m\", \"aitviewer.server\"]\n else:\n if isinstance(args, list):\n popen_args = [\"python\"] + args\n else:\n popen_args = [\"python\", str(args)]\n\n # Create the viewer process.\n process = subprocess.Popen(\n popen_args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n\n # Create a remote viewer connected to the child process.\n v = cls(**kwargs)\n v.process = process\n\n return v\n\n def _entry(self, url):\n # Entry point of the client thread.\n\n asyncio.set_event_loop(self.loop)\n self.loop.run_until_complete(self._async_entry(url))\n\n async def _async_entry(self, url):\n # Async entry point of the client thread.\n\n # Attempt to connect until 'self.timeout' seconds passed.\n start_time = self.loop.time()\n try:\n while self.loop.time() < start_time + self.timeout:\n try:\n self.websocket = await websockets.connect(url, max_size=None)\n self.connected = True\n break\n except Exception as e:\n pass\n finally:\n # Release the semaphore to let the main thread continue after\n # attempting to connect. The main thread will read the\n # self.connected variable to know if we succeded at connecting.\n self.semaphore.release()\n\n # Exit the client thread if we failed to connect.\n if not self.connected:\n return\n\n # Create a queue for incoming messages to the main thread.\n self.recv_queue = queue.Queue()\n\n # Message loop.\n try:\n # This loop is exited whenever the connection is dropped\n # which causes and exception to be raised.\n async for message in self.websocket:\n data = pickle.loads(message)\n # Equeue data for the main thread to process.\n self.recv_queue.put_nowait(data)\n except Exception as e:\n print(f\"Message loop exception: {e}\")\n\n # Mark the connection as closed.\n self.connected = False\n\n def get_message(self, block=True):\n \"\"\"\n Returns the next message received by the remote viewer.\n\n :param block: if True this function blocks until a message is received, otherwise it returns immediately.\n\n :return: if block is True returns the next message or None if the connection has been closed.\n if block is False returns the next message or None if there are no messages.\n \"\"\"\n if self.connected:\n if block:\n while self.connected:\n try:\n return self.recv_queue.get(timeout=0.1)\n except queue.Empty:\n pass\n else:\n if not self.recv_queue.empty():\n return self.recv_queue.get_nowait()\n\n return None\n\n def process_messages(self, handler: Callable[[\"RemoteViewer\", object], None], block=True):\n \"\"\"\n Processes messages in a loop calling 'handler' for each message.\n\n :param block: if True this function blocks until the connection is closed, otherwise it returns\n after all messages received so far have been processed.\n\n :return: if block is True always returns False when the connection has been closed.\n if block is False returns True if the connection is still open or False if the connection\n has been closed.\n \"\"\"\n while True:\n msg = self.get_message(block)\n if msg is None:\n if block:\n return False\n else:\n return self.connected\n handler(self, msg)\n\n async def _async_send(self, data):\n await self.websocket.send(data)\n\n def send(self, data):\n try:\n if self.connected:\n # Send a message by adding a send coroutine to the thread's loop and wait for it to complete.\n asyncio.run_coroutine_threadsafe(self._async_send(data), self.loop).result()\n except Exception as e:\n print(f\"Send exception: {e}\")\n\n def send_message(self, type, uid=None, *args, **kwargs):\n \"\"\"\n Send a message to the viewer. See Viewer.process_message()\n for information about how these parameters are interpreted\n by the viewer.\n \"\"\"\n msg = make_message(type, uid, args, kwargs)\n data = pickle.dumps(msg)\n self.send(data)\n\n def set_frame(self, frame: int):\n \"\"\"\n Set the current active frame of the remote viewer.\n\n :param frame: an integer representing the id of the frame.\n \"\"\"\n self.send_message(Message.SET_FRAME, None, frame)\n\n def next_frame(self):\n \"\"\"Set the current active frame of the remote viewer to the next frame\"\"\"\n self.send_message(Message.NEXT_FRAME)\n\n def previous_frame(self):\n \"\"\"Set the current active frame of the remote viewer to the previous frame\"\"\"\n self.send_message(Message.PREVIOUS_FRAME)\n\n async def _async_close(self):\n await self.websocket.close()\n\n def close_connection(self):\n \"\"\"Close the connection with the remote viewer.\"\"\"\n if self.connected:\n asyncio.run_coroutine_threadsafe(self._async_close(), self.loop).result()\n\n # Wait for the client thread to exit.\n self.thread.join()\n\n def wait_process(self, print_viewer_output=True):\n \"\"\"\n If the viewer was created locally in a separate process wait for it\n to exit and optionally print the standard output of the remote viewer.\n\n :param print_viewer_output: if True print the output of the remote viewer.\n \"\"\"\n self.close_connection()\n if self.process is not None:\n self.process.wait()\n if print_viewer_output:\n print(\"\\nRemote viewer output:\")\n print(self.process.stdout.read().decode())" }, { "identifier": "SMPLSequence", "path": "aitviewer/renderables/smpl.py", "snippet": "class SMPLSequence(Node):\n \"\"\"\n Represents a temporal sequence of SMPL poses. Can be loaded from disk or initialized from memory.\n \"\"\"\n\n def __init__(\n self,\n poses_body,\n smpl_layer,\n poses_root=None,\n betas=None,\n trans=None,\n poses_left_hand=None,\n poses_right_hand=None,\n device=None,\n dtype=None,\n include_root=True,\n normalize_root=False,\n is_rigged=True,\n show_joint_angles=False,\n z_up=False,\n post_fk_func=None,\n icon=\"\\u0093\",\n **kwargs,\n ):\n \"\"\"\n Initializer.\n :param poses_body: An array (numpy ar pytorch) of shape (F, N_JOINTS*3) containing the pose parameters of the\n body, i.e. without hands or face parameters.\n :param smpl_layer: The SMPL layer that maps parameters to joint positions and/or dense surfaces.\n :param poses_root: An array (numpy or pytorch) of shape (F, 3) containing the global root orientation.\n :param betas: An array (numpy or pytorch) of shape (N_BETAS, ) containing the shape parameters.\n :param trans: An array (numpy or pytorch) of shape (F, 3) containing a global translation that is applied to\n all joints and vertices.\n :param device: The pytorch device for computations.\n :param dtype: The pytorch data type.\n :param include_root: Whether or not to include root information. If False, no root translation and no root\n rotation is applied.\n :param normalize_root: Whether or not to normalize the root. If True, the global root translation in the first\n frame is zero and the global root orientation is the identity.\n :param is_rigged: Whether or not to display the joints as a skeleton.\n :param show_joint_angles: Whether or not the coordinate frames at the joints should be visualized.\n :param z_up: Whether or not the input data assumes Z is up. If so, the data will be rotated such that Y is up.\n :param post_fk_func: User specified postprocessing function that is called after evaluating the SMPL model,\n the function signature must be: def post_fk_func(self, vertices, joints, current_frame_only),\n and it must return new values for vertices and joints with the same shapes.\n Shapes are:\n if current_frame_only is False: vertices (F, V, 3) and joints (F, N_JOINTS, 3)\n if current_frame_only is True: vertices (1, V, 3) and joints (1, N_JOINTS, 3)\n :param kwargs: Remaining arguments for rendering.\n \"\"\"\n assert len(poses_body.shape) == 2\n\n # Set model icon\n if smpl_layer.model_type == \"mano\":\n icon = \"\\u0092\"\n elif smpl_layer.model_type == \"flame\":\n icon = \"\\u0091\"\n\n if device is None:\n device = C.device\n if dtype is None:\n dtype = C.f_precision\n\n super(SMPLSequence, self).__init__(n_frames=poses_body.shape[0], icon=icon, gui_material=False, **kwargs)\n\n self.smpl_layer = smpl_layer\n self.post_fk_func = post_fk_func\n self.dtype = dtype\n self.device = device\n\n self.poses_body = to_torch(poses_body, dtype=dtype, device=device)\n self.poses_left_hand = to_torch(poses_left_hand, dtype=dtype, device=device)\n self.poses_right_hand = to_torch(poses_right_hand, dtype=dtype, device=device)\n\n poses_root = poses_root if poses_root is not None else torch.zeros([len(poses_body), 3])\n betas = betas if betas is not None else torch.zeros([1, self.smpl_layer.num_betas])\n trans = trans if trans is not None else torch.zeros([len(poses_body), 3])\n\n self.poses_root = to_torch(poses_root, dtype=dtype, device=device)\n self.betas = to_torch(betas, dtype=dtype, device=device)\n self.trans = to_torch(trans, dtype=dtype, device=device)\n\n if len(self.betas.shape) == 1:\n self.betas = self.betas.unsqueeze(0)\n\n self._include_root = include_root\n self._normalize_root = normalize_root\n self._show_joint_angles = show_joint_angles\n self._is_rigged = is_rigged or show_joint_angles\n self._render_kwargs = kwargs\n self._z_up = z_up\n\n if not self._include_root:\n self.poses_root = torch.zeros_like(self.poses_root)\n self.trans = torch.zeros_like(self.trans)\n\n if self._normalize_root:\n root_ori = aa2rot(self.poses_root)\n first_root_ori = torch.inverse(root_ori[0:1])\n root_ori = torch.matmul(first_root_ori, root_ori)\n self.poses_root = rot2aa(root_ori)\n\n trans = torch.matmul(first_root_ori.unsqueeze(0), self.trans.unsqueeze(-1)).squeeze()\n self.trans = trans - trans[0:1]\n\n # Edit mode\n self.gui_modes.update({\"edit\": {\"title\": \" Edit\", \"fn\": self.gui_mode_edit, \"icon\": \"\\u0081\"}})\n\n self._edit_joint = None\n self._edit_pose = None\n self._edit_pose_dirty = False\n self._edit_local_axes = True\n\n # Nodes\n self.vertices, self.joints, self.faces, self.skeleton = self.fk()\n\n if self._is_rigged:\n self.skeleton_seq = Skeletons(\n self.joints,\n self.skeleton,\n gui_affine=False,\n color=(1.0, 177 / 255, 1 / 255, 1.0),\n name=\"Skeleton\",\n )\n self._add_node(self.skeleton_seq)\n\n # First convert the relative joint angles to global joint angles in rotation matrix form.\n if self.smpl_layer.model_type != \"flame\":\n global_oris = local_to_global(\n torch.cat([self.poses_root, self.poses_body], dim=-1),\n self.skeleton[:, 0],\n output_format=\"rotmat\",\n )\n global_oris = c2c(global_oris.reshape((self.n_frames, -1, 3, 3)))\n else:\n global_oris = np.tile(np.eye(3), self.joints.shape[:-1])[np.newaxis]\n\n if self._z_up and not C.z_up:\n self.rotation = np.matmul(np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]]), self.rotation)\n\n self.rbs = RigidBodies(self.joints, global_oris, length=0.1, gui_affine=False, name=\"Joint Angles\")\n self._add_node(self.rbs, enabled=self._show_joint_angles)\n\n self.mesh_seq = Meshes(\n self.vertices,\n self.faces,\n is_selectable=False,\n gui_affine=False,\n color=kwargs.get(\"color\", (160 / 255, 160 / 255, 160 / 255, 1.0)),\n name=\"Mesh\",\n )\n self._add_node(self.mesh_seq)\n\n # Save view mode state to restore when exiting edit mode.\n self._view_mode_color = self.mesh_seq.color\n self._view_mode_joint_angles = self._show_joint_angles\n\n @classmethod\n def from_amass(\n cls,\n npz_data_path,\n smpl_layer=None,\n start_frame=None,\n end_frame=None,\n log=True,\n fps_out=None,\n z_up=True,\n **kwargs,\n ):\n \"\"\"Load a sequence downloaded from the AMASS website.\"\"\"\n\n body_data = np.load(npz_data_path)\n if smpl_layer is None:\n smpl_layer = SMPLLayer(model_type=\"smplh\", gender=body_data[\"gender\"].item(), device=C.device)\n\n if log:\n print(\"Data keys available: {}\".format(list(body_data.keys())))\n print(\"{:>6d} poses of size {:>4d}.\".format(body_data[\"poses\"].shape[0], body_data[\"poses\"].shape[1]))\n print(\"{:>6d} trans of size {:>4d}.\".format(body_data[\"trans\"].shape[0], body_data[\"trans\"].shape[1]))\n print(\"{:>6d} shape of size {:>4d}.\".format(1, body_data[\"betas\"].shape[0]))\n print(\"Gender {}\".format(body_data[\"gender\"]))\n print(\"FPS {}\".format(body_data[\"mocap_framerate\"]))\n\n sf = start_frame or 0\n ef = end_frame or body_data[\"poses\"].shape[0]\n poses = body_data[\"poses\"][sf:ef]\n trans = body_data[\"trans\"][sf:ef]\n\n if fps_out is not None:\n fps_in = body_data[\"mocap_framerate\"].tolist()\n if fps_in != fps_out:\n ps = np.reshape(poses, [poses.shape[0], -1, 3])\n ps_new = resample_rotations(ps, fps_in, fps_out)\n poses = np.reshape(ps_new, [-1, poses.shape[1]])\n trans = resample_positions(trans, fps_in, fps_out)\n\n i_root_end = 3\n i_body_end = i_root_end + smpl_layer.bm.NUM_BODY_JOINTS * 3\n i_left_hand_end = i_body_end + smpl_layer.bm.NUM_HAND_JOINTS * 3\n i_right_hand_end = i_left_hand_end + smpl_layer.bm.NUM_HAND_JOINTS * 3\n\n return cls(\n poses_body=poses[:, i_root_end:i_body_end],\n poses_root=poses[:, :i_root_end],\n poses_left_hand=poses[:, i_body_end:i_left_hand_end],\n poses_right_hand=poses[:, i_left_hand_end:i_right_hand_end],\n smpl_layer=smpl_layer,\n betas=body_data[\"betas\"][np.newaxis],\n trans=trans,\n z_up=z_up,\n **kwargs,\n )\n\n @classmethod\n def from_3dpw(cls, pkl_data_path, **kwargs):\n \"\"\"Load a 3DPW sequence which might contain multiple people.\"\"\"\n with open(pkl_data_path, \"rb\") as p:\n body_data = pkl.load(p, encoding=\"latin1\")\n num_people = len(body_data[\"poses\"])\n\n name = kwargs.get(\"name\", \"3DPW\")\n\n seqs = []\n for i in range(num_people):\n gender = body_data[\"genders\"][i]\n smpl_layer = SMPLLayer(\n model_type=\"smpl\",\n gender=\"female\" if gender == \"f\" else \"male\",\n device=C.device,\n num_betas=10,\n )\n\n # Extract the 30 Hz data that is already aligned with the image data.\n poses = body_data[\"poses\"][i]\n trans = body_data[\"trans\"][i]\n betas = body_data[\"betas\"][i]\n\n if len(betas.shape) == 1:\n betas = betas[np.newaxis]\n\n poses_body = poses[:, 3:]\n poses_root = poses[:, :3]\n trans_root = trans\n\n kwargs[\"name\"] = name + \" S{}\".format(i)\n seq = cls(\n poses_body=poses_body,\n poses_root=poses_root,\n trans=trans_root,\n smpl_layer=smpl_layer,\n betas=betas,\n **kwargs,\n )\n seqs.append(seq)\n\n # Load camera poses.\n camera_data = {\n \"intrinsics\": body_data[\"cam_intrinsics\"],\n \"extrinsics\": body_data[\"cam_poses\"],\n \"campose_valid\": body_data[\"campose_valid\"],\n }\n\n return seqs, camera_data\n\n @classmethod\n def t_pose(cls, smpl_layer=None, betas=None, frames=1, **kwargs):\n \"\"\"Creates a SMPL sequence whose single frame is a SMPL mesh in T-Pose.\"\"\"\n\n if smpl_layer is None:\n smpl_layer = SMPLLayer(model_type=\"smplh\", gender=\"neutral\")\n\n poses = np.zeros([frames, smpl_layer.bm.NUM_BODY_JOINTS * 3]) # including hands and global root\n return cls(poses, smpl_layer, betas=betas, **kwargs)\n\n @classmethod\n def from_npz(cls, file: Union[IO, str], smpl_layer: SMPLLayer = None, **kwargs):\n \"\"\"Creates a SMPL sequence from a .npz file exported through the 'export' function.\"\"\"\n if smpl_layer is None:\n smpl_layer = SMPLLayer(model_type=\"smplh\", gender=\"neutral\")\n\n data = np.load(file)\n\n return cls(\n smpl_layer=smpl_layer,\n poses_body=data[\"poses_body\"],\n poses_root=data[\"poses_root\"],\n betas=data[\"betas\"],\n trans=data[\"trans\"],\n **kwargs,\n )\n\n def export_to_npz(self, file: Union[IO, str]):\n np.savez(\n file,\n poses_body=c2c(self.poses_body),\n poses_root=c2c(self.poses_root),\n betas=c2c(self.betas),\n trans=c2c(self.trans),\n )\n\n @property\n def color(self):\n return self.mesh_seq.color\n\n @color.setter\n def color(self, color):\n self.mesh_seq.color = color\n\n @property\n def bounds(self):\n return self.mesh_seq.bounds\n\n @property\n def current_bounds(self):\n return self.mesh_seq.current_bounds\n\n @property\n def vertex_normals(self):\n return self.mesh_seq.vertex_normals\n\n @property\n def poses(self):\n return torch.cat((self.poses_root, self.poses_body), dim=-1)\n\n @property\n def _edit_mode(self):\n return self.selected_mode == \"edit\"\n\n def fk(self, current_frame_only=False):\n \"\"\"Get joints and/or vertices from the poses.\"\"\"\n if current_frame_only:\n # Use current frame data.\n if self._edit_mode:\n poses_root = self._edit_pose[:3][None, :]\n poses_body = self._edit_pose[3:][None, :]\n else:\n poses_body = self.poses_body[self.current_frame_id][None, :]\n poses_root = self.poses_root[self.current_frame_id][None, :]\n\n poses_left_hand = (\n None if self.poses_left_hand is None else self.poses_left_hand[self.current_frame_id][None, :]\n )\n poses_right_hand = (\n None if self.poses_right_hand is None else self.poses_right_hand[self.current_frame_id][None, :]\n )\n trans = self.trans[self.current_frame_id][None, :]\n\n if self.betas.shape[0] == self.n_frames:\n betas = self.betas[self.current_frame_id][None, :]\n else:\n betas = self.betas\n else:\n # Use the whole sequence.\n if self._edit_mode:\n poses_root = self.poses_root.clone()\n poses_body = self.poses_body.clone()\n\n poses_root[self.current_frame_id] = self._edit_pose[:3]\n poses_body[self.current_frame_id] = self._edit_pose[3:]\n else:\n poses_body = self.poses_body\n poses_root = self.poses_root\n\n poses_left_hand = self.poses_left_hand\n poses_right_hand = self.poses_right_hand\n trans = self.trans\n betas = self.betas\n\n verts, joints = self.smpl_layer(\n poses_root=poses_root,\n poses_body=poses_body,\n poses_left_hand=poses_left_hand,\n poses_right_hand=poses_right_hand,\n betas=betas,\n trans=trans,\n )\n\n # Apply post_fk_func if specified.\n if self.post_fk_func:\n verts, joints = self.post_fk_func(self, verts, joints, current_frame_only)\n\n skeleton = self.smpl_layer.skeletons()[\"body\"].T\n faces = self.smpl_layer.bm.faces.astype(np.int64)\n joints = joints[:, : skeleton.shape[0]]\n\n if current_frame_only:\n return c2c(verts)[0], c2c(joints)[0], c2c(faces), c2c(skeleton)\n else:\n return c2c(verts), c2c(joints), c2c(faces), c2c(skeleton)\n\n def interpolate(self, frame_ids):\n \"\"\"\n Replace the frames at the given frame IDs via an interpolation of its neighbors. Only the body pose as well\n as the root pose and translation are interpolated.\n :param frame_ids: A list of frame ids to be interpolated.\n \"\"\"\n ids = np.unique(frame_ids)\n all_ids = np.arange(self.n_frames)\n mask_avail = np.ones(self.n_frames, dtype=np.bool)\n mask_avail[ids] = False\n\n # Interpolate poses.\n all_poses = torch.cat([self.poses_root, self.poses_body], dim=-1)\n ps = np.reshape(all_poses.cpu().numpy(), (self.n_frames, -1, 3))\n ps_interp = interpolate_rotations(ps[mask_avail], all_ids[mask_avail], ids)\n all_poses[ids] = torch.from_numpy(ps_interp.reshape(len(ids), -1)).to(\n dtype=self.betas.dtype, device=self.betas.device\n )\n self.poses_root = all_poses[:, :3]\n self.poses_body = all_poses[:, 3:]\n\n # Interpolate global translation.\n ts = self.trans.cpu().numpy()\n ts_interp = interpolate_positions(ts[mask_avail], all_ids[mask_avail], ids)\n self.trans[ids] = torch.from_numpy(ts_interp).to(dtype=self.betas.dtype, device=self.betas.device)\n\n self.redraw()\n\n @hooked\n def on_before_frame_update(self):\n if self._edit_mode and self._edit_pose_dirty:\n self._edit_pose = self.poses[self.current_frame_id].clone()\n self.redraw(current_frame_only=True)\n self._edit_pose_dirty = False\n\n @hooked\n def on_frame_update(self):\n if self.edit_mode:\n self._edit_pose = self.poses[self.current_frame_id].clone()\n self._edit_pose_dirty = False\n\n def redraw(self, **kwargs):\n current_frame_only = kwargs.get(\"current_frame_only\", False)\n\n # Use the edited pose if in edit mode.\n vertices, joints, self.faces, self.skeleton = self.fk(current_frame_only)\n\n if current_frame_only:\n self.vertices[self.current_frame_id] = vertices\n self.joints[self.current_frame_id] = joints\n\n if self._is_rigged:\n self.skeleton_seq.current_joint_positions = joints\n\n # Use current frame data.\n if self._edit_mode:\n pose = self._edit_pose\n else:\n pose = torch.cat(\n [\n self.poses_root[self.current_frame_id],\n self.poses_body[self.current_frame_id],\n ],\n dim=-1,\n )\n\n # Update rigid bodies.\n if self.smpl_layer.model_type != \"flame\":\n global_oris = local_to_global(pose, self.skeleton[:, 0], output_format=\"rotmat\")\n global_oris = global_oris.reshape((-1, 3, 3))\n self.rbs.current_rb_ori = c2c(global_oris)\n self.rbs.current_rb_pos = self.joints[self.current_frame_id]\n\n # Update mesh.\n self.mesh_seq.current_vertices = vertices\n else:\n self.vertices = vertices\n self.joints = joints\n\n # Update skeleton.\n if self._is_rigged:\n self.skeleton_seq.joint_positions = self.joints\n\n # Extract poses including the edited pose.\n if self._edit_mode:\n poses_root = self.poses_root.clone()\n poses_body = self.poses_body.clone()\n\n poses_root[self.current_frame_id] = self._edit_pose[:3]\n poses_body[self.current_frame_id] = self._edit_pose[3:]\n else:\n poses_body = self.poses_body\n poses_root = self.poses_root\n\n # Update rigid bodies.\n if self.smpl_layer.model_type != \"flame\":\n global_oris = local_to_global(\n torch.cat([poses_root, poses_body], dim=-1),\n self.skeleton[:, 0],\n output_format=\"rotmat\",\n )\n global_oris = global_oris.reshape((self.n_frames, -1, 3, 3))\n self.rbs.rb_ori = c2c(global_oris)\n self.rbs.rb_pos = self.joints\n\n # Update mesh\n self.mesh_seq.vertices = vertices\n\n super().redraw(**kwargs)\n\n @property\n def edit_mode(self):\n return self._edit_mode\n\n @property\n def selected_mode(self):\n return self._selected_mode\n\n @selected_mode.setter\n def selected_mode(self, selected_mode):\n if self._selected_mode == selected_mode:\n return\n self._selected_mode = selected_mode\n\n if self.selected_mode == \"edit\":\n self.rbs.enabled = True\n self.rbs.is_selectable = False\n self._edit_pose = self.poses[self.current_frame_id].clone()\n\n # Disable picking for the mesh\n self.mesh_seq.backface_fragmap = True\n self.rbs.color = (1, 0, 0.5, 1.0)\n self._view_mode_color = self.mesh_seq.color\n self.mesh_seq.color = (\n *self._view_mode_color[:3],\n min(self._view_mode_color[3], 0.5),\n )\n else:\n self.mesh_seq.backface_fragmap = False\n self.mesh_seq.color = self._view_mode_color\n\n self.rbs.color = (0, 1, 0.5, 1.0)\n self.rbs.enabled = self._view_mode_joint_angles\n self.rbs.is_selectable = True\n\n self.redraw(current_frame_only=True)\n\n def _gui_joint(self, imgui, j, tree=None):\n name = \"unknown\"\n if self.smpl_layer.model_type == \"smplh\":\n if j < len(SMPLH_JOINT_NAMES):\n name = SMPLH_JOINT_NAMES[j]\n else:\n if j < len(JOINT_NAMES):\n name = JOINT_NAMES[j]\n\n if tree:\n e = imgui.tree_node(f\"{j} - {name}\")\n else:\n e = True\n imgui.text(f\"{j} - {name}\")\n\n if e:\n # Euler angles sliders.\n aa = self._edit_pose[j * 3 : (j + 1) * 3].cpu().numpy()\n euler = aa2euler_numpy(aa, degrees=True)\n\n _, self._edit_local_axes = imgui.checkbox(\"Local axes\", self._edit_local_axes)\n\n # If we are editing local axes generate an empty slider on top\n # of the euler angle sliders to capture the input of the slider\n # without modifying the euler angle values.\n if self._edit_local_axes:\n # Get the current draw position.\n pos = imgui.get_cursor_position()\n\n # Make the next widget transparent.\n imgui.push_style_var(imgui.STYLE_ALPHA, 0.0)\n u, new_euler = imgui.drag_float3(f\"\", 0, 0, 0, 0.003, format=\"\")\n imgui.pop_style_var()\n\n if u:\n base = Rotation.from_rotvec(aa)\n for i in range(3):\n delta = new_euler[i]\n if delta == 0:\n continue\n\n # Get the world coordinates of the current axis from the\n # respective column of the rotation matrix.\n axis = Rotation.as_matrix(base)[:, i]\n\n # Create a rotation of 'delta[i]' radians around the axis.\n rot = Rotation.from_rotvec(axis * delta)\n\n # Rotate the current joint and convert back to axis angle.\n aa = Rotation.as_rotvec(rot * base)\n\n self._edit_pose[j * 3 : (j + 1) * 3] = torch.from_numpy(aa)\n self._edit_pose_dirty = True\n self.redraw(current_frame_only=True)\n\n # Reset the draw position so that the next slider is drawn on top of this.\n imgui.set_cursor_pos(pos)\n\n name = \"Local XYZ\" if self._edit_local_axes else \"Euler XYZ\"\n u, euler = imgui.drag_float3(f\"{name}##joint{j}\", *euler, 0.1, format=\"%.3f\")\n if not self._edit_local_axes and u:\n aa = euler2aa_numpy(np.array(euler), degrees=True)\n self._edit_pose[j * 3 : (j + 1) * 3] = torch.from_numpy(aa)\n self._edit_pose_dirty = True\n self.redraw(current_frame_only=True)\n\n if tree:\n for c in tree.get(j, []):\n self._gui_joint(imgui, c, tree)\n imgui.tree_pop()\n\n def gui_mode_edit(self, imgui):\n skel = self.smpl_layer.skeletons()[\"body\"].cpu().numpy()\n\n tree = {}\n for i in range(skel.shape[1]):\n if skel[0, i] != -1:\n tree.setdefault(skel[0, i], []).append(skel[1, i])\n\n if not tree:\n return\n\n if self._edit_joint is None:\n self._gui_joint(imgui, 0, tree)\n else:\n self._gui_joint(imgui, self._edit_joint)\n\n if imgui.button(\"Apply\"):\n self.poses_root[self.current_frame_id] = self._edit_pose[:3]\n self.poses_body[self.current_frame_id] = self._edit_pose[3:]\n self._edit_pose_dirty = False\n self.redraw(current_frame_only=True)\n imgui.same_line()\n if imgui.button(\"Apply to all\"):\n edit_rots = Rotation.from_rotvec(np.reshape(self._edit_pose.cpu().numpy(), (-1, 3)))\n base_rots = Rotation.from_rotvec(np.reshape(self.poses[self.current_frame_id].cpu().numpy(), (-1, 3)))\n relative = edit_rots * base_rots.inv()\n for i in range(self.n_frames):\n root = Rotation.from_rotvec(np.reshape(self.poses_root[i].cpu().numpy(), (-1, 3)))\n self.poses_root[i] = torch.from_numpy((relative[0] * root).as_rotvec().flatten())\n\n body = Rotation.from_rotvec(np.reshape(self.poses_body[i].cpu().numpy(), (-1, 3)))\n self.poses_body[i] = torch.from_numpy((relative[1:] * body).as_rotvec().flatten())\n self._edit_pose_dirty = False\n self.redraw()\n imgui.same_line()\n if imgui.button(\"Reset\"):\n self._edit_pose = self.poses[self.current_frame_id]\n self._edit_pose_dirty = False\n self.redraw(current_frame_only=True)\n\n def gui_io(self, imgui):\n if imgui.button(\"Export sequence to NPZ\"):\n dir = os.path.join(C.export_dir, \"SMPL\")\n os.makedirs(dir, exist_ok=True)\n path = os.path.join(dir, self.name + \".npz\")\n self.export_to_npz(path)\n print(f'Exported SMPL sequence to \"{path}\"')\n\n def gui_context_menu(self, imgui, x: int, y: int):\n if self.edit_mode and self._edit_joint is not None:\n self._gui_joint(imgui, self._edit_joint)\n else:\n if imgui.radio_button(\"View mode\", not self.edit_mode):\n self.selected_mode = \"view\"\n imgui.close_current_popup()\n if imgui.radio_button(\"Edit mode\", self.edit_mode):\n self.selected_mode = \"edit\"\n imgui.close_current_popup()\n\n imgui.spacing()\n imgui.separator()\n imgui.spacing()\n super().gui_context_menu(imgui, x, y)\n\n def on_selection(self, node, instance_id, tri_id):\n if self.edit_mode:\n # Index of the joint that is currently being edited.\n if node != self.mesh_seq:\n self._edit_joint = instance_id\n self.rbs.color_one(self._edit_joint, (0.3, 0.4, 1, 1))\n else:\n self._edit_joint = None\n # Reset color of all spheres to the default color\n self.rbs.color = self.rbs.color\n\n def render_outline(self, *args, **kwargs):\n # Only render outline of the mesh, skipping skeleton and rigid bodies.\n self.mesh_seq.render_outline(*args, **kwargs)\n\n def add_frames(self, poses_body, poses_root=None, trans=None, betas=None):\n # Append poses_body.\n if len(poses_body.shape) == 1:\n poses_body = poses_body[np.newaxis]\n self.poses_body = torch.cat((self.poses_body, to_torch(poses_body, self.dtype, self.device)))\n\n # Append poses_root or zeros.\n if poses_root is None:\n poses_root = torch.zeros([len(poses_body), 3])\n elif len(poses_root.shape) == 1:\n poses_root = poses_root[np.newaxis]\n self.poses_root = torch.cat((self.poses_root, to_torch(poses_root, self.dtype, self.device)))\n\n # Append trans or zeros.\n if trans is None:\n trans = torch.zeros([len(poses_body), 3])\n elif len(trans.shape) == 1:\n trans = trans[np.newaxis]\n self.trans = torch.cat((self.trans, to_torch(trans, self.dtype, self.device)))\n\n # Append betas or zeros .\n if betas is None:\n # If we have only 1 frame of betas we don't need to append zeros, as the first\n # frame of betas will be broadcasted to all frames.\n if betas.shape[0] > 1:\n self.betas = torch.cat(\n (\n self.betas,\n to_torch(\n torch.zeros([1, self.smpl_layer.num_betas]),\n self.dtype,\n self.device,\n ),\n )\n )\n else:\n if len(betas.shape) == 1:\n betas = betas[np.newaxis]\n self.betas = torch.cat((self.betas, to_torch(betas, self.dtype, self.device)))\n\n self.n_frames = len(self.poses_body)\n self.redraw()\n\n def update_frames(self, poses_body, frames, poses_root=None, trans=None, betas=None):\n self.poses_body[frames] = to_torch(poses_body, self.dtype, self.device)\n if poses_root is not None:\n self.poses_root[frames] = to_torch(poses_root, self.dtype, self.device)\n if trans is not None:\n self.trans[frames] = to_torch(trans, self.dtype, self.device)\n if betas is not None:\n self.betas[frames] = to_torch(betas, self.dtype, self.device)\n self.redraw()\n\n def remove_frames(self, frames):\n frames_to_keep = torch.from_numpy(np.setdiff1d(np.arange(self.n_frames), frames)).to(\n dtype=torch.long, device=self.device\n )\n\n self.poses_body = self.poses_body[frames_to_keep]\n self.poses_root = self.poses_root[frames_to_keep]\n self.trans = self.trans[frames_to_keep]\n if self.betas.shape != 1:\n self.betas = self.betas[frames_to_keep]\n\n self.n_frames = len(self.poses_body)\n self.redraw()" } ]
import os import numpy as np from pathlib import Path from random import random, shuffle from aitviewer.configuration import CONFIG as C from aitviewer.remote.renderables.meshes import RemoteMeshes from aitviewer.remote.renderables.smpl import RemoteSMPLSequence from aitviewer.remote.viewer import RemoteViewer from aitviewer.renderables.smpl import SMPLSequence
11,027
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos # Select subset of AMASS dataset to load. a_seqs = list(Path(os.path.join(C.datasets.amass, "Dfaust")).rglob("*poses.npz")) shuffle(a_seqs) # Create (NxN) Grid. N = 5 x = np.linspace(-5, 5, N) z = np.linspace(-5, 5, N) xv, zv = np.meshgrid(x, z) xv = xv.reshape(N * N) zv = zv.reshape(N * N) yv = np.zeros(xv.shape[0]) + 0.6 positions = np.vstack((xv, yv, zv)).T # Create Remote Viewer. v: RemoteViewer = RemoteViewer.create_new_process() for pos, seq in zip(positions, a_seqs):
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos # Select subset of AMASS dataset to load. a_seqs = list(Path(os.path.join(C.datasets.amass, "Dfaust")).rglob("*poses.npz")) shuffle(a_seqs) # Create (NxN) Grid. N = 5 x = np.linspace(-5, 5, N) z = np.linspace(-5, 5, N) xv, zv = np.meshgrid(x, z) xv = xv.reshape(N * N) zv = zv.reshape(N * N) yv = np.zeros(xv.shape[0]) + 0.6 positions = np.vstack((xv, yv, zv)).T # Create Remote Viewer. v: RemoteViewer = RemoteViewer.create_new_process() for pos, seq in zip(positions, a_seqs):
local_smpl = SMPLSequence.from_amass(npz_data_path=seq, fps_out=60.0, end_frame=200, log=False)
4
2023-12-07 16:13:50+00:00
16k
nexB/dejacode
workflow/views.py
[ { "identifier": "get_preserved_filters", "path": "dje/utils.py", "snippet": "def get_preserved_filters(request, model, parameter_name=\"_list_filters\"):\n \"\"\"\n Return the preserved filters querystring.\n Forked from django.contrib.admin.options.ModelAdmin\n \"\"\"\n match = request.resolver_match\n\n if match:\n opts = model._meta\n current_url = f\"{match.app_name}:{match.url_name}\"\n list_url = f\"{opts.app_label}:{opts.model_name}_list\"\n if current_url == list_url: # Craft the filter from URL\n preserved_filters = request.GET.urlencode()\n else: # Load the filter from the request\n preserved_filters = request.GET.get(parameter_name)\n\n if preserved_filters:\n return urlencode({parameter_name: preserved_filters})\n return \"\"" }, { "identifier": "group_by", "path": "dje/utils.py", "snippet": "def group_by(queryset, field_name, values=None, count_on=None, distinct=False):\n from django.db.models import Count\n\n values = values or [field_name]\n count_on = count_on or field_name\n\n return queryset.values(*values).order_by().annotate(count=Count(count_on, distinct=distinct))" }, { "identifier": "DataspacedFilterView", "path": "dje/views.py", "snippet": "class DataspacedFilterView(\n DataspaceScopeMixin,\n GetDataspaceMixin,\n HasPermissionMixin,\n TableHeaderMixin,\n PreviousNextPaginationMixin,\n FilterView,\n):\n template_name = \"object_list_base.html\"\n template_list_table = None\n paginate_by = settings.PAGINATE_BY or 100\n # Required if `show_previous_and_next_object_links` enabled on the\n # details view.\n put_results_in_session = False\n group_name_version = False\n strict = False\n\n def get_filterset_kwargs(self, filterset_class):\n \"\"\"\n Add the dataspace in the filterset kwargs.\n\n Deletes the page_kwarg from the data if present,\n so the current pagination value is not included in the filters.\n \"\"\"\n kwargs = super().get_filterset_kwargs(filterset_class)\n\n if self.page_kwarg in self.request.GET:\n data = self.request.GET.copy()\n del data[\"page\"]\n kwargs.update({\"data\": data})\n\n kwargs.update({\"dataspace\": self.dataspace})\n return kwargs\n\n def get_queryset(self):\n \"\"\"Scope the QuerySet with the request User Dataspace.\"\"\"\n return super().get_queryset().scope(self.dataspace)\n\n def get_extra_add_urls(self):\n extra_add_urls = []\n opts = self.model._meta\n\n with suppress(NoReverseMatch):\n import_url = reverse(f\"admin:{opts.app_label}_{opts.model_name}_import\")\n extra_add_urls.append((f\"Import {opts.verbose_name_plural}\", import_url))\n\n return extra_add_urls\n\n def get_context_data(self, **kwargs):\n context_data = super().get_context_data(**kwargs)\n\n if self.put_results_in_session:\n session_key = build_session_key(self.model._meta.verbose_name)\n object_ids = [int(obj.id) for obj in context_data[\"object_list\"]]\n self.request.session[session_key] = object_ids\n\n if self.group_name_version:\n if not self.request.GET.get(\"sort\", None):\n name_version_groups = group_by_name_version(context_data[\"object_list\"])\n else:\n name_version_groups = [[obj] for obj in context_data[\"object_list\"]]\n\n context_data.update(\n {\n \"name_version_groups\": name_version_groups,\n \"is_grouping_active\": bool(\n [1 for group in name_version_groups if len(group) > 1]\n ),\n }\n )\n\n opts = self.model._meta\n\n add_url = None\n with suppress(NoReverseMatch):\n add_url = reverse(f\"{opts.app_label}:{opts.model_name}_add\")\n\n context_data.update(\n {\n \"opts\": opts,\n \"add_url\": add_url,\n \"extra_add_urls\": self.get_extra_add_urls(),\n \"preserved_filters\": get_preserved_filters(self.request, self.model),\n # Required for compatibility with navbar_header.html\n \"search_query\": self.request.GET.get(\"q\", \"\"),\n \"template_list_table\": self.template_list_table,\n }\n )\n\n return context_data" }, { "identifier": "RequestFilterSet", "path": "workflow/filters.py", "snippet": "class RequestFilterSet(DataspacedFilterSet):\n related_only = [\n \"status\",\n \"request_template\",\n \"requester\",\n \"assignee\",\n \"priority\",\n ]\n q = SearchFilter(\n label=_(\"Search\"),\n search_fields=[\n \"title\",\n \"notes\",\n \"product_context__name\",\n \"product_context__version\",\n \"content_object_repr\",\n \"serialized_data\",\n ],\n )\n sort = DefaultOrderingFilter(\n label=_(\"Sort\"),\n empty_label=\"Recent activity (default)\",\n choices=(\n (\"-created_date\", \"Newest\"),\n (\"priority\", \"Priority\"),\n (\"status\", \"Status\"),\n (\"request_template\", \"Form\"),\n (\"requester\", \"Requester\"),\n (\"assignee\", \"Assignee\"),\n ),\n widget=DropDownAsListWidget,\n )\n following = FollowedByMeFilter(label=_(\"Following\"))\n\n class Meta:\n model = Request\n fields = (\n \"q\",\n \"status\",\n \"request_template\",\n \"requester\",\n \"assignee\",\n \"priority\",\n \"following\",\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.filters[\"request_template\"].extra[\"to_field_name\"] = \"uuid\"\n self.filters[\"request_template\"].label = _(\"Form\")\n self.filters[\"requester\"].extra[\"to_field_name\"] = \"username\"\n self.filters[\"assignee\"].extra[\"to_field_name\"] = \"username\"\n self.filters[\"priority\"].extra[\"to_field_name\"] = \"label\"\n\n self.filters[\"request_template\"].extra[\"widget\"] = DropDownAsListWidget(label=\"Form\")\n for filter_name in [\"status\", \"requester\", \"assignee\", \"priority\"]:\n self.filters[filter_name].extra[\"widget\"] = DropDownAsListWidget()" }, { "identifier": "RequestAttachmentForm", "path": "workflow/forms.py", "snippet": "class RequestAttachmentForm(forms.ModelForm):\n SUBMIT_VALUE = _(\"Upload\")\n # `max_upload_size` is not provided, the MAX_UPLOAD_SIZE settings value is used.\n file = SmartFileField()\n\n class Meta:\n model = RequestAttachment\n fields = [\"file\"]\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop(\"user\")\n if not self.user:\n raise AttributeError(\"User is required.\")\n\n self.request_instance = kwargs.pop(\"request_instance\")\n if not self.request_instance:\n raise AttributeError(\"A Request instance is required.\")\n\n super().__init__(*args, **kwargs)\n\n @property\n def helper(self):\n helper = FormHelper()\n helper.form_method = \"post\"\n helper.form_id = \"request-attachment-form\"\n helper.form_show_labels = False\n helper.form_class = \"no-margin\"\n helper.attrs = {\n \"autocomplete\": \"off\",\n }\n helper.layout = Layout(\n \"file\",\n Submit(\"submit\", self.SUBMIT_VALUE, css_class=\"btn-success\"),\n )\n return helper\n\n def save(self, *args, **kwargs):\n # Do not change those values on edition\n if not self.instance.id:\n self.instance.dataspace = self.user.dataspace\n self.instance.uploader = self.user\n self.instance.request = self.request_instance\n\n return super().save(*args, **kwargs)" }, { "identifier": "RequestForm", "path": "workflow/forms.py", "snippet": "class RequestForm(forms.ModelForm):\n object_id = forms.CharField(\n widget=forms.HiddenInput,\n required=False,\n )\n applies_to = forms.CharField(\n label=_(\"Applies to\"),\n required=False,\n widget=AutocompleteInput(display_link=False),\n help_text=_(\n \"Identify the application object associated with this request. \"\n \"This can be a component, package, license, or product depending on \"\n \"the type of request.\"\n ),\n )\n add_object_to_product = forms.BooleanField(\n required=False,\n initial=True,\n help_text=_(\n 'Assign the object defined in \"Applies to\" to the Product defined '\n 'in \"Product context\" on creation of this Request.'\n ),\n )\n\n class Meta:\n model = Request\n fields = [\n \"title\",\n \"applies_to\",\n \"add_object_to_product\",\n \"product_context\",\n \"assignee\",\n \"priority\",\n \"status\",\n \"notes\",\n \"cc_emails\",\n ]\n widgets = {\n \"notes\": forms.Textarea(attrs={\"rows\": 2}),\n }\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop(\"user\")\n if not self.user:\n raise AttributeError(\"User is required.\")\n\n request_template = kwargs.pop(\"request_template\")\n if not request_template:\n raise AttributeError(\"RequestTemplate is required.\")\n self.request_template = request_template\n\n # Validate the given object_id, this Set self.content_object\n object_id = kwargs.get(\"initial\", {}).get(\"object_id\")\n if object_id:\n if not self.set_content_object(object_id):\n del kwargs[\"initial\"][\"object_id\"]\n\n super().__init__(*args, **kwargs)\n\n self.is_addition = not self.instance.id\n\n has_change_permission = all(\n [\n self.user.is_staff,\n self.user.has_perm(\"workflow.change_request\"),\n ]\n )\n if self.is_addition or not has_change_permission:\n del self.fields[\"status\"]\n del self.fields[\"notes\"]\n\n content_object = self.instance.content_object or getattr(self, \"content_object\", None)\n\n if request_template.include_applies_to:\n api_url = reverse(f\"api_v2:{request_template.content_type.model}-list\")\n self.fields[\"applies_to\"].widget.attrs.update({\"data-api_url\": api_url})\n self.set_applies_to_view_link(content_object)\n if content_object:\n self.fields[\"applies_to\"].initial = str(content_object)\n # Force text to avoid str() vs. int() equality issue in field.has_changed()\n self.fields[\"object_id\"].initial = str(content_object.id)\n else:\n del self.fields[\"applies_to\"]\n\n model_name = self.request_template.content_type.model\n include_add_object_to_product = all(\n [\n self.is_addition,\n request_template.include_applies_to,\n request_template.include_product,\n model_name in [\"component\", \"package\"],\n self.user.has_perm(f\"product_portfolio.add_product{model_name}\"),\n ]\n )\n if not include_add_object_to_product:\n del self.fields[\"add_object_to_product\"]\n\n if request_template.include_product:\n self.fields[\"product_context\"].queryset = Product.objects.get_queryset(self.user)\n else:\n del self.fields[\"product_context\"]\n\n assignee_field = self.fields[\"assignee\"]\n # The assignee requirement is done in the `self.clean()` method.\n # We do not enforce it on the HTML side since it can be automatically\n # set to self on \"Saving Draft\"\n assignee_field.required = False\n assignee_field.queryset = assignee_field.queryset.scope(self.user.dataspace)\n if request_template.default_assignee:\n assignee_field.initial = request_template.default_assignee\n\n priority_field = self.fields[\"priority\"]\n priority_field.queryset = priority_field.queryset.scope(self.user.dataspace)\n\n self.questions = request_template.questions.all()\n self.add_question_fields()\n\n def set_applies_to_view_link(self, content_object=None):\n if content_object:\n absolute_url = content_object.get_absolute_url()\n style = \"\"\n else:\n absolute_url = \"\"\n style = \"display: none;\"\n\n initial_label = self.fields[\"applies_to\"].label\n view_object_link_template = (\n '{} <a href=\"{}\" style=\"{}\" id=\"id_applies_to_link\" target=\"_blank\" '\n ' title=\"View object\" data-bs-toggle=\"tooltip\" aria-label=\"View object\">'\n '<i class=\"fas fa-external-link-alt ms-1\"></i>'\n \"</a>\"\n )\n view_object_link = format_html(\n view_object_link_template, initial_label, absolute_url, style\n )\n self.fields[\"applies_to\"].label = view_object_link\n\n @property\n def helper(self):\n helper = FormHelper()\n helper.form_tag = False\n helper.form_method = \"post\"\n helper.form_id = \"workflow-request-form\"\n\n question_fields = [\n Field(field) for field in self.fields.keys() if field not in self._meta.fields\n ]\n\n helper.layout = Layout(\n Fieldset(\n None,\n \"title\",\n *question_fields,\n ),\n HTML(\"<hr>\"),\n Fieldset(\n None,\n Button(\"cancel\", _(\"Cancel\"), css_class=\"btn-secondary\"),\n Div(\n submit_as_private,\n save_draft if self.is_addition else None,\n StrictSubmit(\"submit\", _(\"Submit\"), css_class=\"btn-success\"),\n css_class=\"float-end\",\n ),\n ),\n )\n\n return helper\n\n @property\n def helper_right_side(self):\n helper = FormHelper()\n helper.form_tag = False\n\n requester_html = \"\"\n if self.instance.id:\n requester = self.instance.requester\n created_date = naturaltime(self.instance.created_date)\n requester_html = (\n f\"<hr>\"\n f'<div class=\"small-label mb-3\">'\n f\"Created by <strong>{requester}</strong> {created_date}\"\n f\"</div>\"\n )\n\n request_fields = [\n Field(field, css_class=\"form-control-sm\")\n for field in self._meta.fields\n if field in self.fields.keys() and field != \"title\"\n ]\n\n helper.layout = Layout(\n Fieldset(\n None,\n *request_fields,\n HTML(requester_html),\n css_class=\"right-side\",\n )\n )\n\n return helper\n\n def add_question_fields(self):\n \"\"\"\n Create fields on this form instance from the list of questions assigned\n to the RequestTemplate.\n \"\"\"\n initial_data_as_dict = self.instance.get_serialized_data() if self.instance.id else {}\n\n for question in self.questions:\n extra = None\n if question.input_type == \"TextField\":\n question.input_type = \"CharField\"\n extra = {\"widget\": forms.Textarea(attrs={\"rows\": 2})}\n if question.input_type == \"BooleanField\":\n # Using a Select on purpose to make the Yes/No choice more clear\n question.input_type = \"ChoiceField\"\n choices = (\n (1, \"Yes\"),\n (0, \"No\"),\n )\n extra = {\n \"choices\": choices,\n \"widget\": forms.Select,\n }\n if question.input_type == \"DateField\":\n extra = {\n \"widget\": DatePicker,\n \"error_messages\": {\n \"invalid\": _(\"Enter a valid date: YYYY-MM-DD.\"),\n },\n }\n\n field_class = getattr(fields, question.input_type, None)\n if not field_class:\n continue\n\n field_args = {\n \"label\": question.label,\n \"required\": question.is_required,\n \"help_text\": question.help_text,\n }\n\n if extra:\n field_args.update(extra)\n\n value = initial_data_as_dict.get(question.label)\n if value:\n # Set proper type to avoid `field.has_changed` failure to compare\n if question.input_type == \"DateField\":\n value = parse_date(value)\n field_args.update({\"initial\": value})\n\n key = f\"field_{question.position}\"\n self.fields[key] = field_class(**field_args)\n\n def set_content_object(self, object_id):\n \"\"\"\n If an object_id is given when GETing the view, the validity of this id\n is checked on Form initialization.\n It's also validated on Form submission, using the usual clean_ methods.\n \"\"\"\n if not object_id or not self.request_template.include_applies_to:\n return\n\n content_type = self.request_template.content_type\n pk_field = \"uuid\" if len(object_id) == 36 else \"id\"\n filters = {\n \"dataspace\": self.user.dataspace,\n pk_field: object_id,\n }\n\n try:\n self.content_object = content_type.get_object_for_this_type(**filters)\n except ObjectDoesNotExist:\n # Instance with this id does not exists or not in the user dataspace\n return\n return object_id\n\n def clean_object_id(self):\n object_id = self.cleaned_data.get(\"object_id\")\n\n if not object_id or not self.request_template.include_applies_to:\n return\n\n if not self.set_content_object(object_id):\n self.add_error(\"applies_to\", \"Invalid value.\")\n raise forms.ValidationError(\"Invalid value.\")\n\n return object_id\n\n def clean(self):\n cleaned_data = super().clean()\n\n assignee = self.cleaned_data.get(\"assignee\")\n is_draft = \"save_draft\" in self.data\n if not assignee and not is_draft:\n self.add_error(\"assignee\", \"This field is required.\")\n\n applies_to = self.cleaned_data.get(\"applies_to\")\n object_id = self.cleaned_data.get(\"object_id\")\n content_object = getattr(self, \"content_object\", \"\")\n\n conditions = [\n applies_to and not object_id,\n applies_to and applies_to != str(content_object),\n ]\n\n if any(conditions):\n self.add_error(\"applies_to\", \"Invalid value.\")\n\n permission_error_msg = \"{} does not have the permission to view {}\"\n product_context = self.cleaned_data.get(\"product_context\")\n if assignee and product_context and not assignee.has_perm(\"view_product\", product_context):\n self.add_error(\"assignee\", permission_error_msg.format(assignee, product_context))\n\n if assignee and content_object:\n manager = content_object.__class__._default_manager\n if is_secured(manager) and not assignee.has_perm(\"view_product\", content_object):\n self.add_error(\"assignee\", permission_error_msg.format(assignee, content_object))\n\n return cleaned_data\n\n def save(self, *args, **kwargs):\n content_object = getattr(self, \"content_object\", None)\n # Set content_object rather than object_id so instance.content_object\n # is available in further processing (required in notifications)\n if content_object:\n self.instance.content_object = self.content_object\n else: # Remove the content_object\n self.instance.object_id = None\n\n serialized_data = {}\n for q in self.questions:\n cleaned_value = self.cleaned_data.get(f\"field_{q.position}\")\n # str Convert the datetime object in a string\n # None is stored as empty string rather than str(None)\n value = str(cleaned_value) if cleaned_value is not None else \"\"\n serialized_data[q.label] = value\n self.instance.serialized_data = json.dumps(serialized_data)\n\n # Protect those fields value on edition\n if not self.instance.id:\n self.instance.requester = self.user\n self.instance.dataspace = self.user.dataspace\n self.instance.request_template = self.request_template\n else:\n self.instance.last_modified_by = self.user\n\n if \"submit_as_private\" in self.data:\n self.instance.is_private = True\n\n if \"save_draft\" in self.data:\n self.instance.assignee = self.user\n self.instance.status = Request.Status.DRAFT\n\n instance = super().save(*args, **kwargs)\n\n product = self.cleaned_data.get(\"product_context\")\n model_name = self.request_template.content_type.model\n do_assign_objects = all(\n [\n self.is_addition,\n content_object,\n self.cleaned_data.get(\"add_object_to_product\"),\n product and product.can_be_changed_by(self.user),\n model_name in [\"component\", \"package\"],\n self.user.has_perm(f\"product_portfolio.add_product{model_name}\"),\n ]\n )\n if do_assign_objects:\n product.assign_objects([content_object], self.user)\n\n return instance" }, { "identifier": "Request", "path": "workflow/models.py", "snippet": "class Request(HistoryDateFieldsMixin, DataspacedModel):\n request_template = models.ForeignKey(\n to=\"workflow.RequestTemplate\",\n related_name=\"requests\",\n on_delete=models.PROTECT,\n editable=False,\n )\n\n class Status(models.TextChoices):\n OPEN = \"open\", _(\"Open\")\n CLOSED = \"closed\", _(\"Closed\")\n DRAFT = \"draft\", _(\"Draft\")\n\n status = models.CharField(\n max_length=10,\n choices=Status.choices,\n default=Status.OPEN,\n db_index=True,\n help_text=_(\n 'Status of the request. \"Draft\" indicates that the request is not '\n \"yet ready for action, pending further details from the requestor. \"\n '\"Open\" indicates that the assignee has not finished the requested '\n \"actions, and also that comments from all interested parties are \"\n 'welcome. \"Closed\" indicates that no further actions or comments '\n \"are needed or expected.\"\n ),\n )\n\n is_private = models.BooleanField(\n default=False,\n db_index=True,\n help_text=_(\n \"When checked, the details of this request are visible only\"\n \" to the original requester and to request reviewers, and \"\n \"other users only see a limited summary. As an \"\n \"administrator, you can check or un-check this indicator to\"\n \" make a request private or public.\"\n ),\n )\n\n notes = models.TextField(\n blank=True,\n help_text=_(\n \"Notes from one or more request reviewers regarding \"\n \"research, issues, and conclusions related to the \"\n \"request.\"\n ),\n )\n\n requester = models.ForeignKey(\n to=settings.AUTH_USER_MODEL,\n on_delete=models.PROTECT,\n related_name=\"request_as_requester\",\n editable=False,\n help_text=_(\"Creator of the request.\"),\n )\n\n assignee = models.ForeignKey(\n to=settings.AUTH_USER_MODEL,\n on_delete=models.SET_NULL,\n related_name=\"request_as_assignee\",\n limit_choices_to={\"is_staff\": True, \"is_active\": True},\n null=True,\n blank=True,\n help_text=_(\n \"The application user currently assigned to review the \"\n \"request and take appropriate action.\"\n ),\n )\n\n product_context = models.ForeignKey(\n to=\"product_portfolio.Product\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n # Bypass the validation in ForeignKey.validate()\n # Required since we do not have control over the QuerySet in that method.\n parent_link=True,\n help_text=_(\"Identify the Product impacted by your Request.\"),\n )\n\n serialized_data = models.TextField(\n blank=True,\n help_text=_(\n \"Optional data provided by the User making the request. \"\n \"Can be used by an Admin to pre-fill a form. Stored as \"\n \"JSON format.\"\n ),\n )\n\n content_type = models.ForeignKey(\n to=ContentType,\n on_delete=models.PROTECT,\n limit_choices_to=CONTENT_TYPES,\n help_text=_(\n \"Stores the type of the object requested. Supported types \"\n \"are Component, Package, License and Product\"\n ),\n )\n\n object_id = models.PositiveIntegerField(\n null=True,\n blank=True,\n db_index=True,\n help_text=_(\n \"ID of the object attached to this request. This is used \"\n \"in combination with the content_type for the \"\n \"content_object field.\"\n ),\n )\n\n # No need to be explicit about the content_type abd object_id field names as\n # we are using the default ones.\n content_object = GenericForeignKey()\n\n content_object_repr = models.CharField(\n max_length=1000,\n blank=True,\n help_text=_(\n \"String representation of the attached content_object if any. \"\n \"This is useful for search purposes and not intended for display.\"\n ),\n )\n\n priority = models.ForeignKey(\n to=\"workflow.Priority\",\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n help_text=_(\n \"The priority is intended to provide team members with a guideline \"\n \"for selecting and assigning requests for additional action, based on the \"\n \"criticality of the request.\"\n ),\n )\n\n title = models.CharField(\n max_length=255,\n db_index=True,\n help_text=_(\"The Request Title is a concise statement of the Request purpose and content.\"),\n )\n\n cc_emails = ArrayField(\n base_field=models.EmailField(),\n null=True,\n blank=True,\n help_text=_(\n \"You can provide a comma-separated list of email addresses to publish email \"\n \"notifications to any users that should be aware of the progress of this request.\"\n ),\n )\n\n last_modified_by = LastModifiedByField()\n\n objects = DataspacedManager.from_queryset(RequestQuerySet)()\n\n class Meta:\n ordering = [\"-last_modified_date\"]\n unique_together = (\"dataspace\", \"uuid\")\n\n def __str__(self):\n return f\"#{self.pk}\"\n\n def save(self, *args, **kwargs):\n \"\"\"Add the `update_request_count` logic on the related `content_object`.\"\"\"\n self.content_type = self.request_template.content_type\n\n # Store the repr of the content_object for search purposes.\n if self.object_id:\n # Bypass the broken GenericForeignKey.__get__ introduced in\n # https://github.com/django/django/commit/cc4cb95\n try:\n self.content_object = self.content_type.get_object_for_this_type(\n id=self.object_id,\n )\n except ObjectDoesNotExist:\n pass\n else:\n self.content_object_repr = str(self.content_object)\n\n # `previous_object_id` logic is only required on edition.\n previous_object_id = None\n is_addition = self.pk\n if is_addition:\n previous_object_id = self.__class__.objects.get(pk=self.pk).object_id\n\n super().save(*args, **kwargs)\n\n # Need to be post-save so the current Request exists in the DB before the count()\n if self.content_object and not self.is_draft:\n self.content_object.update_request_count()\n\n # The `content_object` was changed or removed, we need to update the `request_count`\n # of the previous object instance too. Warning: The previous object may not exist anymore.\n if previous_object_id and previous_object_id != self.object_id:\n try:\n previous_object = self.content_type.get_object_for_this_type(id=previous_object_id)\n except ObjectDoesNotExist:\n return\n previous_object.update_request_count()\n\n def get_absolute_url(self):\n return reverse(\"workflow:request_details\", args=[self.uuid])\n\n @property\n def details_url(self):\n return self.get_absolute_url()\n\n def get_serialized_data(self):\n if not self.serialized_data:\n return {}\n\n try:\n serialized_data = json.loads(self.serialized_data)\n except (ValueError, TypeError):\n return {}\n\n if not isinstance(serialized_data, dict):\n return {}\n\n return serialized_data\n\n def get_serialized_data_as_list(self):\n \"\"\"Return a python iterable from the serialized_data field.\"\"\"\n serialized_data = self.get_serialized_data()\n if not serialized_data:\n return []\n\n return [\n {\n \"label\": question.label,\n \"input_type\": question.input_type,\n \"value\": serialized_data.get(question.label),\n }\n for question in self.request_template.questions.all()\n ]\n\n def get_serialized_data_as_html(self, html_template=\"{label}: {value}\", separator=\"<br>\"):\n \"\"\"Return a HTML content of the serialized_data.\"\"\"\n serialized_data = []\n for data in self.get_serialized_data_as_list():\n try:\n value = data[\"value\"]\n if data[\"input_type\"] == \"BooleanField\":\n value = \"Yes\" if bool(data.get(\"value\")) == 1 else \"No\"\n line = str(html_template).format(label=data[\"label\"], value=escape(value))\n except KeyError:\n return 'Error in the \"Serialized data\" value.'\n else:\n serialized_data.append(line)\n\n return format_html(separator.join(serialized_data))\n\n @property\n def serialized_data_html(self):\n return self.get_serialized_data_as_html()\n\n @property\n def is_open(self):\n return self.status == self.Status.OPEN\n\n @property\n def is_closed(self):\n return self.status == self.Status.CLOSED\n\n @property\n def is_draft(self):\n return self.status == self.Status.DRAFT\n\n def has_details_permission(self, user):\n \"\"\"\n Private Requests are not available to regular user unless he is the\n requester or is an administrator.\n \"\"\"\n return user == self.requester or user.is_staff or not self.is_private\n\n def has_edit_permission(self, user):\n \"\"\"\n Only the requester or an administrator can edit a Request,\n unless the Request is closed already.\n \"\"\"\n return (user == self.requester or user.is_staff) and not self.is_closed\n\n def has_close_permission(self, user):\n \"\"\"Only the requester can close a Request if not closed already.\"\"\"\n return user == self.requester and not self.is_closed\n\n def get_involved_users(self, exclude=None):\n \"\"\"\n Return the set of users involved is the Requests:\n - requestor\n - assignee\n - edited by (multiple)\n - commented by (multiple)\n \"\"\"\n users = {\n self.requester,\n *(event.user for event in self.events.all()),\n *(comment.user for comment in self.comments.all()),\n }\n\n # The assignee is now required on the RequestForm but not on the Request model.\n # Keeping this condition for compatibility with old Request instances.\n if self.assignee:\n users.add(self.assignee)\n\n if exclude:\n users.discard(exclude)\n\n return users\n\n def serialize_hook(self, hook):\n if \"hooks.slack.com\" in hook.target:\n return request_slack_payload(self, created=\"added\" in hook.event)\n\n from workflow.api import RequestSerializer\n\n serializer = RequestSerializer(self, context={\"request\": None})\n\n return {\n \"hook\": hook.dict(),\n \"data\": serializer.data,\n }" }, { "identifier": "RequestAttachment", "path": "workflow/models.py", "snippet": "class RequestAttachment(HistoryDateFieldsMixin, DataspacedModel):\n request = models.ForeignKey(\n to=\"workflow.Request\",\n on_delete=models.CASCADE,\n related_name=\"attachments\",\n )\n\n file = models.FileField(\n upload_to=generate_attachment_path,\n # Assuming the prefix is roughly 100 chars,\n # we need ~256 chars available for the actual filename.\n max_length=350,\n )\n\n uploader = models.ForeignKey(\n to=settings.AUTH_USER_MODEL,\n on_delete=models.PROTECT,\n editable=False,\n )\n\n class Meta:\n ordering = [\"created_date\"]\n unique_together = (\"dataspace\", \"uuid\")\n\n def __str__(self):\n return self.filename\n\n @cached_property\n def filename(self):\n return os.path.basename(self.file.name)\n\n def exists(self):\n \"\"\"Return True if the file exists on the filesystem.\"\"\"\n return self.file.storage.exists(self.file.name)\n\n def has_delete_permission(self, user):\n \"\"\"\n Only the Uploader or an administrator with the proper permissions\n can delete an Attachment.\n \"\"\"\n return user == self.uploader or (\n user.is_staff and user.has_perm(\"workflow.delete_requestattachment\")\n )" }, { "identifier": "RequestComment", "path": "workflow/models.py", "snippet": "class RequestComment(AbstractRequestEvent):\n request = models.ForeignKey(\n to=\"workflow.Request\",\n on_delete=models.CASCADE,\n related_name=\"comments\",\n )\n\n class Meta:\n ordering = [\"created_date\"]\n unique_together = (\"dataspace\", \"uuid\")\n\n def __str__(self):\n return f\"{self.user.username}: {self.text[:50]}...\"\n\n def has_delete_permission(self, user):\n \"\"\"\n Only the Commenter or an administrator with the proper permissions\n can delete a Comment.\n \"\"\"\n return user == self.user or (\n user.is_staff and user.has_perm(\"workflow.delete_requestcomment\")\n )\n\n def as_html(self):\n \"\"\"\n Convert user provided commented content into HTML using markdown.\n The URLs are converted into links using the bleach Linkify feature.\n The HTML code is sanitized using bleach to prevent XSS attacks.\n The clean needs to be applied to the Markdown’s output, not the input.\n\n See https://michelf.ca/blog/2010/markdown-and-xss/ for details.\n\n See also the chapter about safe mode in\n https://python-markdown.github.io/change_log/release-3.0/\n \"\"\"\n unsafe_html = markdown.markdown(\n text=self.text,\n extensions=[\"markdown.extensions.nl2br\"],\n )\n\n # Using `Cleaner()` with the 1LinkifyFilter1 to clean and linkify in one pass.\n # See https://bleach.readthedocs.io/en/latest/linkify.html notes\n cleaner = Cleaner(\n tags=markdown_tags,\n attributes=markdown_attrs,\n filters=[LinkifyFilter],\n )\n html = cleaner.clean(unsafe_html)\n\n return format_html(html)\n\n def serialize_hook(self, hook):\n if \"hooks.slack.com\" in hook.target:\n return request_comment_slack_payload(self)\n\n from workflow.api import RequestCommentSerializer\n from workflow.api import RequestSerializer\n\n comment_serializer = RequestCommentSerializer(self, context={\"request\": None})\n request_serializer = RequestSerializer(self.request, context={\"request\": None})\n\n data = comment_serializer.data\n data[\"request\"] = request_serializer.data\n\n return {\n \"hook\": hook.dict(),\n \"data\": data,\n }" }, { "identifier": "RequestEvent", "path": "workflow/models.py", "snippet": "class RequestEvent(AbstractRequestEvent):\n request = models.ForeignKey(\n to=\"workflow.Request\",\n on_delete=models.CASCADE,\n related_name=\"events\",\n )\n\n EDIT = 1\n ATTACHMENT = 2\n CLOSED = 3\n\n EVENT_TYPE_CHOICES = (\n (EDIT, \"Edition\"),\n (ATTACHMENT, \"Attachment\"),\n (CLOSED, \"Closed\"),\n )\n\n event_type = models.IntegerField(\n choices=EVENT_TYPE_CHOICES,\n )\n\n class Meta:\n ordering = [\"created_date\"]\n unique_together = (\"dataspace\", \"uuid\")\n\n def __str__(self):\n return f\"{self.get_event_type_display()} by {self.user.username}\"" }, { "identifier": "RequestTemplate", "path": "workflow/models.py", "snippet": "class RequestTemplate(HistoryFieldsMixin, DataspacedModel):\n \"\"\"\n WARNING: Modifying the schema of this model will require data migration\n (next to the usual schema migration).\n \"\"\"\n\n name = models.CharField(\n max_length=100,\n help_text=_(\"Unique name of the template.\"),\n )\n\n description = models.TextField(\n verbose_name=_(\"Request header text\"),\n help_text=_(\n \"Provide a title and/or general instructions to the Requestor about this \"\n \"Request form.\"\n ),\n )\n\n content_type = models.ForeignKey(\n to=ContentType,\n on_delete=models.PROTECT,\n verbose_name=_(\"object type\"),\n limit_choices_to=CONTENT_TYPES,\n help_text=_(\"You can define one Request Template for each application object.\"),\n )\n\n is_active = models.BooleanField(\n default=False,\n db_index=True,\n help_text=_(\n \"Enable this to set the current form active. \"\n \"Only one Form can be active per content type.\"\n ),\n )\n\n include_applies_to = models.BooleanField(\n default=True,\n help_text=_(\n 'Enable this to present an \"Applies to\" field to a requester creating a '\n \"request based on this template, or anyone subsequently editing that request. \"\n 'Disable it for a request that does not need an \"Applies to\" reference.'\n ),\n )\n\n include_product = models.BooleanField(\n default=False,\n help_text=_(\n \"Enable this to present a Product choice to a requester using this template. \"\n \"Disable it for a request that does not need a Product context.\"\n ),\n )\n\n default_assignee = models.ForeignKey(\n to=settings.AUTH_USER_MODEL,\n limit_choices_to={\"is_staff\": True, \"is_active\": True},\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n serialize=False,\n help_text=_(\n \"Optionally specify the application user that should be the first to review \"\n \"a request using this template, and should receive an email when the request \"\n \"is submitted.\"\n ),\n )\n\n objects = DataspacedManager.from_queryset(RequestTemplateQuerySet)()\n\n class Meta:\n unique_together = ((\"dataspace\", \"name\"), (\"dataspace\", \"uuid\"))\n ordering = [\"name\"]\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\"workflow:request_add\", args=[self.uuid])\n\n @staticmethod\n def get_extra_relational_fields():\n return [\"questions\"]\n\n def create_request(self, **kwargs):\n if \"assignee\" not in kwargs and self.default_assignee:\n kwargs[\"assignee\"] = self.default_assignee\n\n return Request.objects.create(\n request_template=self,\n content_type=self.content_type,\n dataspace=self.dataspace,\n **kwargs,\n )" }, { "identifier": "send_request_comment_notification", "path": "workflow/notification.py", "snippet": "def send_request_comment_notification(comment, closed=False):\n \"\"\"\n Send an email notification following the addition of a comment on a Request.\n An email is sent to the users involved in the Request except for the\n user responsible for this action, regardless of their email_notification flag.\n \"\"\"\n req = comment.request\n\n content_object_verbose = \"\"\n if req.content_object:\n content_object_verbose = f\" for {req.content_object}\"\n\n data = {\n \"comment\": comment,\n \"req\": req,\n \"action\": \"closed\" if closed else \"commented\",\n \"content_object_verbose\": content_object_verbose,\n \"site_url\": settings.SITE_URL.rstrip(\"/\"),\n }\n\n subject = (\n \"Request {req}{content_object_verbose} \" \"{action} by {comment.user} in \" \"{req.dataspace}\"\n ).format(**data)\n\n body = render_to_string(\"workflow/comment_created_email.txt\", data)\n\n recipients = req.get_involved_users()\n\n emails = get_recipient_emails(recipients, req.cc_emails)\n send_mail_task.delay(subject, body, settings.DEFAULT_FROM_EMAIL, emails)\n\n # Remove the `comment.user` from the internal notification recipients\n recipients.discard(comment.user)\n\n internal_notification.send(\n sender=comment.user,\n verb=\"commented on Request\",\n action_object=req,\n recipient=list(recipients),\n description=comment.text,\n )" }, { "identifier": "send_request_notification", "path": "workflow/notification.py", "snippet": "def send_request_notification(req, created, extra=None):\n \"\"\"\n Send an email notification following a Request creation or edition.\n An email is sent to the users involved in the Request, based on their\n `workflow_email_notification` flag.\n \"\"\"\n content_object_verbose = f\" for {req.content_object}\" if req.content_object else \"\"\n action_user = req.requester if created else req.last_modified_by\n\n data = {\n \"req\": req,\n \"content_object_verbose\": content_object_verbose,\n \"action\": \"submitted\" if created else \"updated\",\n \"action_user\": action_user,\n \"site_url\": settings.SITE_URL.rstrip(\"/\"),\n }\n\n subject = (\n \"Request {req}{content_object_verbose} {action} \" \"by {action_user} in {req.dataspace}\"\n ).format(**data)\n\n template = \"request_created_email.txt\" if created else \"request_updated_email.txt\"\n body = render_to_string(f\"workflow/{template}\", data)\n\n recipients = req.get_involved_users()\n\n emails = get_recipient_emails(recipients, req.cc_emails)\n send_mail_task.delay(subject, body, settings.DEFAULT_FROM_EMAIL, emails)\n\n # Remove the `action_user` from the internal notification recipients\n recipients.discard(action_user)\n\n internal_notification.send(\n sender=action_user,\n verb=\"{} Request\".format(\"submitted\" if created else \"updated\"),\n action_object=req,\n recipient=list(recipients),\n description=extra.get(\"description\") if extra else \"\",\n )" } ]
from itertools import groupby from operator import attrgetter from urllib.parse import quote_plus from django.contrib import messages from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.http import FileResponse from django.http import Http404 from django.shortcuts import get_object_or_404 from django.shortcuts import redirect from django.shortcuts import render from django.utils.html import format_html from dje.utils import get_preserved_filters from dje.utils import group_by from dje.views import DataspacedFilterView from workflow.filters import RequestFilterSet from workflow.forms import RequestAttachmentForm from workflow.forms import RequestForm from workflow.models import Request from workflow.models import RequestAttachment from workflow.models import RequestComment from workflow.models import RequestEvent from workflow.models import RequestTemplate from workflow.notification import send_request_comment_notification from workflow.notification import send_request_notification
11,372
) form = RequestForm( request.POST or None, user=request.user, request_template=request_template, initial={"object_id": request.GET.get("content_object_id")}, ) if form.is_valid(): instance = form.save() if instance.is_draft: msg = "Your request was saved as a draft and self-assigned to you." else: send_request_notification(instance, created=True) msg = ( f"Your request was successfully submitted as {instance} with an " f"email notification to the assignee, and a copy to you.\n" f"You can open your Request at any time to add Attachments and/or " f"Comments." ) msg += ( f"\n" f'<a href="{request_template.get_absolute_url()}">' f'Add a new "{request_template.name}" Request' f"</a>" ) messages.success(request, format_html(msg)) return redirect(instance.get_absolute_url()) return render(request, "workflow/request_form.html", {"form": form}) @login_required def request_edit_view(request, request_uuid): """Edit a Request.""" qs = Request.objects.for_edit_view(request.user) request_instance = get_object_or_404(qs, uuid=request_uuid, dataspace=request.user.dataspace) request_template = request_instance.request_template has_change_permission = request.user.has_perm("workflow.change_request") has_edit_permission = request_instance.has_edit_permission(request.user) if not has_edit_permission and not has_change_permission: raise Http404("No match for the given query.") form = RequestForm( request.POST or None, user=request.user, request_template=request_template, instance=request_instance, ) if form.is_valid() and form.has_changed(): instance = form.save() updated_labels = [] for field_name in form.changed_data: if field_name == "applies_to": updated_labels.append("Applies to") # `object_id` is already referenced with `applies_to` elif field_name != "object_id": label = str(form.fields.get(field_name).label) updated_labels.append(label) updated_labels = ", ".join(updated_labels) if instance.is_draft: msg = "Your request was updated as a draft and self-assigned to you." else: msg = ( f"Your request was successfully edited as {instance} with " f"an email notification to the requester and the assignee." ) extra = {"description": f"Updated: {updated_labels}."} send_request_notification(instance, created=False, extra=extra) request_instance.events.create( user=request.user, text=f"Request edited. Updated: {updated_labels}.", event_type=RequestEvent.EDIT, dataspace=request.user.dataspace, ) msg += ( f"\n" f'<a href="{request_template.get_absolute_url()}">' f'Add a new "{request_template.name}" Request' f"</a>" ) messages.success(request, format_html(msg)) return redirect(request_instance) elif not form.has_changed(): messages.warning(request, "No fields changed.") return redirect(request_instance) return render( request, "workflow/request_form.html", {"form": form, "request_instance": request_instance} ) def get_productrelation_review_status_summary(product): """Return the count of Product relationships for each review_status as links.""" product_url = product.get_absolute_url() tab = "inventory" querysets = { "catalog": product.productcomponents.catalogs(), "custom": product.productcomponents.customs(), "package": product.productpackages.all(), } status_summary = {} for object_type, queryset in querysets.items(): links = []
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # class RequestListView( LoginRequiredMixin, DataspacedFilterView, ): """Display a list of current Request objects.""" model = Request filterset_class = RequestFilterSet template_name = "workflow/request_list.html" template_list_table = "workflow/includes/request_list_table.html" paginate_by = 50 def get_queryset(self): """ Scope the QuerySet to the current user dataspace. Instances with is_private=True are included in this QuerySet but those will not be displayed unless the user is the requester or a superuser. """ return ( super() .get_queryset() .for_list_view(user=self.request.user) .order_by("-last_modified_date") ) def get_context_data(self, **kwargs): context_data = super().get_context_data(**kwargs) # order_by content_type matter for proper following groupby request_templates_qs = ( RequestTemplate.objects.scope(self.request.user.dataspace) .actives() .select_related("content_type") .order_by("content_type", "name") ) grouped = groupby(request_templates_qs, attrgetter("content_type")) # Converting into a list in the view as Django templates does not handle # well generators. request_templates_grouped = [ (content_type, list(request_templates)) for content_type, request_templates in grouped ] context_data.update( { "request_templates_grouped": request_templates_grouped, } ) return context_data @login_required def request_add_view(request, template_uuid): """Form based on a RequestTemplate, to submit a new Request.""" request_template = get_object_or_404( RequestTemplate, uuid=template_uuid, dataspace=request.user.dataspace ) form = RequestForm( request.POST or None, user=request.user, request_template=request_template, initial={"object_id": request.GET.get("content_object_id")}, ) if form.is_valid(): instance = form.save() if instance.is_draft: msg = "Your request was saved as a draft and self-assigned to you." else: send_request_notification(instance, created=True) msg = ( f"Your request was successfully submitted as {instance} with an " f"email notification to the assignee, and a copy to you.\n" f"You can open your Request at any time to add Attachments and/or " f"Comments." ) msg += ( f"\n" f'<a href="{request_template.get_absolute_url()}">' f'Add a new "{request_template.name}" Request' f"</a>" ) messages.success(request, format_html(msg)) return redirect(instance.get_absolute_url()) return render(request, "workflow/request_form.html", {"form": form}) @login_required def request_edit_view(request, request_uuid): """Edit a Request.""" qs = Request.objects.for_edit_view(request.user) request_instance = get_object_or_404(qs, uuid=request_uuid, dataspace=request.user.dataspace) request_template = request_instance.request_template has_change_permission = request.user.has_perm("workflow.change_request") has_edit_permission = request_instance.has_edit_permission(request.user) if not has_edit_permission and not has_change_permission: raise Http404("No match for the given query.") form = RequestForm( request.POST or None, user=request.user, request_template=request_template, instance=request_instance, ) if form.is_valid() and form.has_changed(): instance = form.save() updated_labels = [] for field_name in form.changed_data: if field_name == "applies_to": updated_labels.append("Applies to") # `object_id` is already referenced with `applies_to` elif field_name != "object_id": label = str(form.fields.get(field_name).label) updated_labels.append(label) updated_labels = ", ".join(updated_labels) if instance.is_draft: msg = "Your request was updated as a draft and self-assigned to you." else: msg = ( f"Your request was successfully edited as {instance} with " f"an email notification to the requester and the assignee." ) extra = {"description": f"Updated: {updated_labels}."} send_request_notification(instance, created=False, extra=extra) request_instance.events.create( user=request.user, text=f"Request edited. Updated: {updated_labels}.", event_type=RequestEvent.EDIT, dataspace=request.user.dataspace, ) msg += ( f"\n" f'<a href="{request_template.get_absolute_url()}">' f'Add a new "{request_template.name}" Request' f"</a>" ) messages.success(request, format_html(msg)) return redirect(request_instance) elif not form.has_changed(): messages.warning(request, "No fields changed.") return redirect(request_instance) return render( request, "workflow/request_form.html", {"form": form, "request_instance": request_instance} ) def get_productrelation_review_status_summary(product): """Return the count of Product relationships for each review_status as links.""" product_url = product.get_absolute_url() tab = "inventory" querysets = { "catalog": product.productcomponents.catalogs(), "custom": product.productcomponents.customs(), "package": product.productpackages.all(), } status_summary = {} for object_type, queryset in querysets.items(): links = []
for data in group_by(queryset, field_name="review_status", values=["review_status__label"]):
1
2023-12-07 16:57:42+00:00
16k
wusize/CLIM
src/open_clip/factory.py
[ { "identifier": "OPENAI_DATASET_MEAN", "path": "src/open_clip/constants.py", "snippet": "OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)" }, { "identifier": "OPENAI_DATASET_STD", "path": "src/open_clip/constants.py", "snippet": "OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)" }, { "identifier": "CLIP", "path": "src/open_clip/model.py", "snippet": "class CLIP(nn.Module):\n output_dict: torch.jit.Final[bool]\n\n def __init__(\n self,\n embed_dim: int,\n vision_cfg: CLIPVisionCfg,\n text_cfg: CLIPTextCfg,\n quick_gelu: bool = False,\n cast_dtype: Optional[torch.dtype] = None,\n output_dict: bool = False,\n freeze_text=True,\n ):\n assert freeze_text, 'For now we must freeze text'\n super().__init__()\n self.output_dict = output_dict\n self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)\n\n text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)\n if freeze_text:\n print(f'Freeze text encoder parameters', flush=True)\n for param in text.parameters():\n param.requires_grad = False\n text.eval()\n self.transformer = text.transformer\n self.vocab_size = text.vocab_size\n self.embed_dim = embed_dim\n self.token_embedding = text.token_embedding\n self.positional_embedding = text.positional_embedding\n self.ln_final = text.ln_final\n self.text_projection = text.text_projection\n self.register_buffer('attn_mask', text.attn_mask, persistent=False)\n\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n\n def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False, **kwargs):\n self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.visual.set_grad_checkpointing(enable)\n self.transformer.grad_checkpointing = enable\n\n def encode_image(self, image, normalize: bool = False):\n features = self.visual(image)\n return F.normalize(features, dim=-1) if normalize else features\n\n def encode_dense(self, image, normalize: bool = False, keep_shape=False):\n features = self.visual.encode_dense(image, keep_shape=keep_shape)\n if normalize:\n if keep_shape:\n features = F.normalize(features, dim=1)\n else:\n features = F.normalize(features, dim=-1)\n return features\n\n def encode_pseudo_boxes(self, image, normed_boxes, normalize: bool = False,\n extract_type='v1'):\n features = self.visual.extract_roi_features(image, normed_boxes,\n extract_type=extract_type)\n if normalize:\n features = F.normalize(features, dim=-1)\n return features\n\n def _pool_masks(self, image, masks, normalize, mask_attn=False):\n if mask_attn:\n mask_pooled = self.visual.mask_attn_pool(image, masks)\n else:\n mask_pooled = self.visual.mask_pool(image, masks)\n if normalize:\n mask_pooled = F.normalize(mask_pooled, dim=-1)\n return mask_pooled\n\n def _pool_masks_v3(self, image, masks, normalize):\n mask_pooled_v1, x_dense = self.visual.mask_attn_pool(image, masks, return_dense=True)\n x_dense = F.normalize(x_dense, dim=-1).flatten(1, 2) # bs, h*w, c\n x_dense = torch.repeat_interleave(\n x_dense, torch.tensor([len(m) for m in masks], device=x_dense.device), dim=0)\n masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n mask_pooled_v2 = (x_dense * masks.unsqueeze(-1)).sum(1) / masks.sum(1, keepdim=True)\n if normalize:\n mask_pooled_v1 = F.normalize(mask_pooled_v1, dim=-1)\n mask_pooled_v2 = F.normalize(mask_pooled_v2, dim=-1)\n return mask_pooled_v1, mask_pooled_v2\n\n def encode_masks(self, image, masks, normalize=True, mask_attn=False):\n return self._pool_masks(image, masks, normalize, mask_attn)\n\n def encode_text(self, text, normalize: bool = False):\n cast_dtype = self.transformer.get_cast_dtype()\n\n x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]\n\n x = x + self.positional_embedding.to(cast_dtype)\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x, attn_mask=self.attn_mask)\n x = x.permute(1, 0, 2) # LND -> NLD\n x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection\n return F.normalize(x, dim=-1) if normalize else x\n\n def forward(self, image, text=None):\n image_features = self.encode_image(image, normalize=True)\n if text is None:\n text_features = None\n else:\n text_features = self.encode_text(text, normalize=True)\n if self.output_dict:\n return {\n \"image_features\": image_features,\n \"text_features\": text_features,\n \"logit_scale\": self.logit_scale.exp()\n }\n return image_features, text_features, self.logit_scale.exp()\n\n def train(self, mode: bool = True):\n if not isinstance(mode, bool):\n raise ValueError(\"training mode is expected to be boolean\")\n self.training = mode\n for name, module in self.named_children():\n if name == 'visual':\n if mode:\n logging.info(f'========Set module {name} as train mode========')\n else:\n logging.info(f'========Set module {name} as eval mode========')\n module.train(mode)\n else:\n logging.info(f'========Set module {name} as eval mode========')\n module.train(mode=False)\n return self" }, { "identifier": "CustomTextCLIP", "path": "src/open_clip/model.py", "snippet": "class CustomTextCLIP(nn.Module):\n output_dict: torch.jit.Final[bool]\n\n def __init__(\n self,\n embed_dim: int,\n vision_cfg: CLIPVisionCfg,\n text_cfg: CLIPTextCfg,\n quick_gelu: bool = False,\n cast_dtype: Optional[torch.dtype] = None,\n output_dict: bool = False,\n ):\n super().__init__()\n self.output_dict = output_dict\n self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)\n self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n\n def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):\n # lock image tower as per LiT - https://arxiv.org/abs/2111.07991\n self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)\n\n def lock_text_tower(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):\n self.text.lock(unlocked_layers, freeze_layer_norm)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.visual.set_grad_checkpointing(enable)\n self.text.set_grad_checkpointing(enable)\n\n def encode_pseudo_boxes(self, image, normed_boxes, normalize: bool = False):\n features = self.visual.extract_roi_features(image, normed_boxes)\n return F.normalize(features, dim=-1) if normalize else features\n\n def encode_image(self, image, normalize: bool = False):\n features = self.visual(image)\n return F.normalize(features, dim=-1) if normalize else features\n\n def encode_text(self, text, normalize: bool = False):\n features = self.text(text)\n return F.normalize(features, dim=-1) if normalize else features\n\n def forward(self, image, text):\n image_features = self.encode_image(image, normalize=True)\n if text is None:\n text_features = None\n else:\n text_features = self.encode_text(text, normalize=True)\n if self.output_dict:\n return {\n \"image_features\": image_features,\n \"text_features\": text_features,\n \"logit_scale\": self.logit_scale.exp()\n }\n return image_features, text_features, self.logit_scale.exp()" }, { "identifier": "convert_weights_to_lp", "path": "src/open_clip/model.py", "snippet": "def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):\n \"\"\"Convert applicable model parameters to low-precision (bf16 or fp16)\"\"\"\n\n def _convert_weights(l):\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):\n l.weight.data = l.weight.data.to(dtype)\n if l.bias is not None:\n l.bias.data = l.bias.data.to(dtype)\n\n if isinstance(l, (nn.MultiheadAttention, Attention)):\n for attr in [*[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]], \"in_proj_bias\", \"bias_k\", \"bias_v\"]:\n tensor = getattr(l, attr)\n if tensor is not None:\n tensor.data = tensor.data.to(dtype)\n\n for name in [\"text_projection\", \"proj\"]:\n if hasattr(l, name):\n attr = getattr(l, name)\n if attr is not None:\n attr.data = attr.data.to(dtype)\n\n model.apply(_convert_weights)" }, { "identifier": "convert_to_custom_text_state_dict", "path": "src/open_clip/model.py", "snippet": "def convert_to_custom_text_state_dict(state_dict: dict):\n if 'text_projection' in state_dict:\n # old format state_dict, move text tower -> .text\n new_state_dict = {}\n for k, v in state_dict.items():\n if any(k.startswith(p) for p in (\n 'text_projection',\n 'positional_embedding',\n 'token_embedding',\n 'transformer',\n 'ln_final',\n )):\n k = 'text.' + k\n new_state_dict[k] = v\n return new_state_dict\n return state_dict" }, { "identifier": "resize_pos_embed", "path": "src/open_clip/model.py", "snippet": "def resize_pos_embed(state_dict, model, interpolation: str = 'bicubic', antialias: bool = True):\n # Rescale the grid of position embeddings when loading from state_dict\n old_pos_embed = state_dict.get('visual.positional_embedding', None)\n if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):\n return\n grid_size = to_2tuple(model.visual.grid_size)\n extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)\n new_seq_len = grid_size[0] * grid_size[1] + extra_tokens\n if new_seq_len == old_pos_embed.shape[0]:\n return\n\n if extra_tokens:\n pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]\n else:\n pos_emb_tok, pos_emb_img = None, old_pos_embed\n old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))\n\n logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)\n pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)\n pos_emb_img = F.interpolate(\n pos_emb_img,\n size=grid_size,\n mode=interpolation,\n antialias=antialias,\n align_corners=False,\n )\n pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]\n if pos_emb_tok is not None:\n new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)\n else:\n new_pos_embed = pos_emb_img\n state_dict['visual.positional_embedding'] = new_pos_embed" }, { "identifier": "get_cast_dtype", "path": "src/open_clip/model.py", "snippet": "def get_cast_dtype(precision: str):\n cast_dtype = None\n if precision == 'bf16':\n cast_dtype = torch.bfloat16\n elif precision == 'fp16':\n cast_dtype = torch.float16\n return cast_dtype" }, { "identifier": "CoCa", "path": "src/open_clip/coca_model.py", "snippet": "class CoCa(nn.Module):\n def __init__(\n self,\n embed_dim,\n multimodal_cfg: MultimodalCfg,\n text_cfg: CLIPTextCfg,\n vision_cfg: CLIPVisionCfg,\n quick_gelu: bool = False,\n cast_dtype: Optional[torch.dtype] = None,\n pad_id: int = 0,\n ):\n super().__init__()\n multimodal_cfg = MultimodalCfg(**multimodal_cfg) if isinstance(multimodal_cfg, dict) else multimodal_cfg\n text_cfg = CLIPTextCfg(**text_cfg) if isinstance(text_cfg, dict) else text_cfg\n vision_cfg = CLIPVisionCfg(**vision_cfg) if isinstance(vision_cfg, dict) else vision_cfg\n\n self.text = _build_text_tower(\n embed_dim=embed_dim,\n text_cfg=text_cfg,\n quick_gelu=quick_gelu,\n cast_dtype=cast_dtype,\n )\n\n vocab_size = (\n text_cfg.vocab_size # for hf models\n if hasattr(text_cfg, \"hf_model_name\") and text_cfg.hf_model_name is not None\n else text_cfg.vocab_size\n )\n\n self.visual = _build_vision_tower(\n embed_dim=embed_dim,\n vision_cfg=vision_cfg,\n quick_gelu=quick_gelu,\n cast_dtype=cast_dtype,\n )\n\n self.text_decoder = _build_text_decoder_tower(\n vocab_size,\n multimodal_cfg=multimodal_cfg,\n quick_gelu=quick_gelu,\n cast_dtype=cast_dtype,\n )\n\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n self.pad_id = pad_id\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.visual.set_grad_checkpointing(enable)\n self.text.set_grad_checkpointing(enable)\n self.text_decoder.set_grad_checkpointing(enable)\n\n def _encode_image(self, images, normalize=True):\n image_latent, tokens_embs = self.visual(images)\n image_latent = F.normalize(image_latent, dim=-1) if normalize else image_latent\n return image_latent, tokens_embs\n\n def _encode_text(self, text, normalize=True, embed_cls=True):\n text = text[:, :-1] if embed_cls else text # make space for CLS token\n text_latent, token_emb = self.text(text)\n text_latent = F.normalize(text_latent, dim=-1) if normalize else text_latent\n return text_latent, token_emb\n\n def encode_image(self, images, normalize=True):\n image_latent, _ = self._encode_image(images, normalize=normalize)\n return image_latent\n\n def encode_text(self, text, normalize=True, embed_cls=True):\n text_latent, _ = self._encode_text(text, normalize=normalize, embed_cls=embed_cls)\n return text_latent\n\n def forward(self, image, text, embed_cls=True, image_latent=None, image_embs=None):\n text_latent, token_embs = self._encode_text(text, embed_cls=embed_cls)\n if image_latent is None or image_embs is None:\n image_latent, image_embs = self._encode_image(image)\n\n # TODO: add assertion to avoid bugs?\n labels = text[:, -token_embs.shape[1]:]\n\n logits = self.text_decoder(image_embs, token_embs)\n return {\n \"image_features\": image_latent,\n \"text_features\": text_latent,\n \"logits\": logits,\n \"labels\": labels,\n \"logit_scale\": self.logit_scale.exp()\n }\n\n def generate(\n self,\n image,\n text=None,\n seq_len=30,\n max_seq_len=77,\n temperature=1.,\n generation_type=\"beam_search\",\n top_p=0.1, # keep tokens in the 1 - top_p quantile\n top_k=1, # keeps the top_k most probable tokens\n pad_token_id=None,\n eos_token_id=None,\n sot_token_id=None,\n num_beams=6,\n num_beam_groups=3,\n min_seq_len=5,\n stopping_criteria=None,\n repetition_penalty=1.0,\n fixed_output_length=False # if True output.shape == (batch_size, seq_len)\n ):\n # taking many ideas and components from HuggingFace GenerationMixin\n # https://huggingface.co/docs/transformers/main/en/main_classes/text_generation\n assert _has_transformers, \"Please install transformers for generate functionality. `pip install transformers`.\"\n assert seq_len > min_seq_len, \"seq_len must be larger than min_seq_len\"\n\n with torch.no_grad():\n sot_token_id = 49406 if sot_token_id is None else sot_token_id\n eos_token_id = 49407 if eos_token_id is None else eos_token_id\n pad_token_id = self.pad_id if pad_token_id is None else pad_token_id\n logit_processor = LogitsProcessorList(\n [\n MinLengthLogitsProcessor(min_seq_len, eos_token_id),\n RepetitionPenaltyLogitsProcessor(repetition_penalty),\n ]\n )\n\n if stopping_criteria is None:\n stopping_criteria = [MaxLengthCriteria(max_length=seq_len)]\n\n stopping_criteria = StoppingCriteriaList(\n stopping_criteria\n )\n\n device = image.device\n\n if generation_type == \"beam_search\":\n output = self._generate_beamsearch(\n image_inputs = image,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n sot_token_id=sot_token_id,\n num_beams=num_beams,\n num_beam_groups=num_beam_groups,\n min_seq_len=min_seq_len,\n stopping_criteria=stopping_criteria,\n logit_processor=logit_processor,\n )\n if fixed_output_length and output.shape[1] < seq_len:\n return torch.cat(\n (output, torch.ones(output.shape[0], seq_len-output.shape[1], device=device, dtype=output.dtype) * self.pad_id),\n dim=1\n )\n return output\n\n elif generation_type == \"top_p\":\n logit_warper = GENERATION_TYPES[generation_type](top_p)\n elif generation_type == \"top_k\":\n logit_warper = GENERATION_TYPES[generation_type](top_k)\n else:\n raise ValueError(\n f\"generation_type has to be one of \"\n f\"{'| ' + ' | '.join(list(GENERATION_TYPES.keys())) + ' |'}.\"\n )\n\n image_latent, image_embs = self._encode_image(image)\n\n if text is None:\n text = torch.ones((image.shape[0], 1), device=device, dtype=torch.long) * sot_token_id\n\n was_training = self.training\n num_dims = len(text.shape)\n\n if num_dims == 1:\n text = text[None, :]\n\n cur_len = text.shape[1]\n self.eval()\n out = text\n\n while True:\n x = out[:, -max_seq_len:]\n cur_len = x.shape[1]\n logits = self(image, x, image_latent=image_latent, image_embs=image_embs, embed_cls=False)[\"logits\"][:, -1]\n mask = (out[:, -1] == eos_token_id) | (out[:, -1] == pad_token_id)\n sample = torch.ones((out.shape[0], 1), device=device, dtype=torch.long) * pad_token_id\n\n if mask.all():\n if not fixed_output_length:\n break\n else:\n logits = logits[~mask, :]\n filtered_logits = logit_processor(x[~mask, :], logits)\n filtered_logits = logit_warper(x[~mask, :], filtered_logits)\n probs = F.softmax(filtered_logits / temperature, dim=-1)\n\n if (cur_len + 1 == seq_len):\n sample[~mask, :] = torch.ones((sum(~mask), 1), device=device, dtype=torch.long) * eos_token_id\n else:\n sample[~mask, :] = torch.multinomial(probs, 1)\n\n out = torch.cat((out, sample), dim=-1)\n\n cur_len += 1\n\n if stopping_criteria(out, None):\n break\n\n if num_dims == 1:\n out = out.squeeze(0)\n\n self.train(was_training)\n return out\n\n def _generate_beamsearch(\n self,\n image_inputs,\n pad_token_id=None,\n eos_token_id=None,\n sot_token_id=None,\n num_beams=6,\n num_beam_groups=3,\n min_seq_len=5,\n stopping_criteria=None,\n logit_processor=None,\n logit_warper=None,\n ):\n device = image_inputs.device\n batch_size = image_inputs.shape[0]\n image_inputs = torch.repeat_interleave(image_inputs, num_beams, dim=0)\n image_latent, image_embs = self._encode_image(image_inputs)\n\n input_ids = torch.ones((batch_size * num_beams, 1), device=device, dtype=torch.long)\n input_ids = input_ids * sot_token_id\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=num_beams,\n device=device,\n num_beam_groups=num_beam_groups,\n )\n # instantiate logits processors\n logits_processor = (\n LogitsProcessorList([MinLengthLogitsProcessor(min_seq_len, eos_token_id=eos_token_id)])\n if logit_processor is None\n else logit_processor\n )\n\n batch_size = len(beam_scorer._beam_hyps)\n num_beams = beam_scorer.num_beams\n num_beam_groups = beam_scorer.num_beam_groups\n num_sub_beams = num_beams // num_beam_groups\n batch_beam_size, cur_len = input_ids.shape\n beam_indices = None\n\n if num_beams * batch_size != batch_beam_size:\n raise ValueError(\n f\"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}.\"\n )\n\n beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device)\n # initialise score of first beam of each group with 0 and the rest with 1e-9. This ensures that the beams in\n # the same group don't produce same tokens everytime.\n beam_scores[:, ::num_sub_beams] = 0\n beam_scores = beam_scores.view((batch_size * num_beams,))\n\n while True:\n\n # predicted tokens in cur_len step\n current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device)\n\n # indices which will form the beams in the next time step\n reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device)\n\n # do one decoder step on all beams of all sentences in batch\n model_inputs = prepare_inputs_for_generation(input_ids=input_ids, image_inputs=image_inputs)\n outputs = self(\n model_inputs['images'],\n model_inputs['text'],\n embed_cls=False,\n image_latent=image_latent,\n image_embs=image_embs\n )\n\n for beam_group_idx in range(num_beam_groups):\n group_start_idx = beam_group_idx * num_sub_beams\n group_end_idx = min(group_start_idx + num_sub_beams, num_beams)\n group_size = group_end_idx - group_start_idx\n\n # indices of beams of current group among all sentences in batch\n batch_group_indices = []\n\n for batch_idx in range(batch_size):\n batch_group_indices.extend(\n [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)]\n )\n group_input_ids = input_ids[batch_group_indices]\n\n # select outputs of beams of currentg group only\n next_token_logits = outputs['logits'][batch_group_indices, -1, :]\n vocab_size = next_token_logits.shape[-1]\n\n next_token_scores_processed = logits_processor(\n group_input_ids, next_token_logits, current_tokens=current_tokens, beam_group_idx=beam_group_idx\n )\n next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1)\n next_token_scores = next_token_scores.expand_as(next_token_scores_processed)\n\n # reshape for beam search\n next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size)\n\n next_token_scores, next_tokens = torch.topk(\n next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True\n )\n\n next_indices = torch.div(next_tokens, vocab_size, rounding_mode=\"floor\")\n next_tokens = next_tokens % vocab_size\n\n # stateless\n process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None\n beam_outputs = beam_scorer.process(\n group_input_ids,\n next_token_scores,\n next_tokens,\n next_indices,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n beam_indices=process_beam_indices,\n )\n beam_scores[batch_group_indices] = beam_outputs[\"next_beam_scores\"]\n beam_next_tokens = beam_outputs[\"next_beam_tokens\"]\n beam_idx = beam_outputs[\"next_beam_indices\"]\n\n input_ids[batch_group_indices] = group_input_ids[beam_idx]\n group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)\n current_tokens[batch_group_indices] = group_input_ids[:, -1]\n\n # (beam_idx // group_size) -> batch_idx\n # (beam_idx % group_size) -> offset of idx inside the group\n reordering_indices[batch_group_indices] = (\n num_beams * torch.div(beam_idx, group_size, rounding_mode=\"floor\") + group_start_idx + (beam_idx % group_size)\n )\n\n input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1)\n\n # increase cur_len\n cur_len = cur_len + 1\n if beam_scorer.is_done or stopping_criteria(input_ids, None):\n break\n\n final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None\n sequence_outputs = beam_scorer.finalize(\n input_ids,\n beam_scores,\n next_tokens,\n next_indices,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n max_length=stopping_criteria.max_length,\n beam_indices=final_beam_indices,\n )\n return sequence_outputs['sequences']" }, { "identifier": "ClipLoss", "path": "src/open_clip/loss.py", "snippet": "class ClipLoss(nn.Module):\n\n def __init__(\n self,\n local_loss=False,\n gather_with_grad=False,\n cache_labels=False,\n rank=0,\n world_size=1,\n use_horovod=False,\n ):\n super().__init__()\n self.local_loss = local_loss\n self.gather_with_grad = gather_with_grad\n self.cache_labels = cache_labels\n self.rank = rank\n self.world_size = world_size\n self.use_horovod = use_horovod\n\n # cache state\n self.prev_num_logits = 0\n self.labels = {}\n\n def get_ground_truth(self, device, num_logits) -> torch.Tensor:\n # calculated ground-truth and cache if enabled\n if self.prev_num_logits != num_logits or device not in self.labels:\n labels = torch.arange(num_logits, device=device, dtype=torch.long)\n if self.world_size > 1 and self.local_loss:\n labels = labels + num_logits * self.rank\n if self.cache_labels:\n self.labels[device] = labels\n self.prev_num_logits = num_logits\n else:\n labels = self.labels[device]\n return labels\n\n def get_logits(self, image_features, text_features, logit_scale):\n if self.world_size > 1:\n all_image_features, all_text_features = gather_features(\n image_features, text_features,\n self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod)\n\n if self.local_loss:\n logits_per_image = logit_scale * image_features @ all_text_features.T\n logits_per_text = logit_scale * text_features @ all_image_features.T\n else:\n logits_per_image = logit_scale * all_image_features @ all_text_features.T\n logits_per_text = logits_per_image.T\n else:\n logits_per_image = logit_scale * image_features @ text_features.T\n logits_per_text = logit_scale * text_features @ image_features.T\n \n return logits_per_image, logits_per_text\n\n def forward(self, image_features, text_features, logit_scale, output_dict=False):\n device = image_features.device\n logits_per_image, logits_per_text = self.get_logits(image_features, text_features, logit_scale)\n\n labels = self.get_ground_truth(device, logits_per_image.shape[0])\n\n total_loss = (\n F.cross_entropy(logits_per_image, labels) +\n F.cross_entropy(logits_per_text, labels)\n ) / 2\n\n return {\"contrastive_loss\": total_loss} if output_dict else total_loss" }, { "identifier": "DistillClipLoss", "path": "src/open_clip/loss.py", "snippet": "class DistillClipLoss(ClipLoss):\n\n def dist_loss(self, teacher_logits, student_logits):\n loss = F.kl_div(student_logits.log_softmax(dim=1),\n teacher_logits.softmax(dim=1), reduction='batchmean')\n return loss\n # return -(teacher_logits.softmax(dim=1) * student_logits.log_softmax(dim=1)).sum(dim=1).mean(dim=0)\n\n def forward(\n self,\n image_features,\n text_features,\n logit_scale,\n dist_image_features,\n dist_text_features,\n dist_logit_scale,\n output_dict=False,\n ):\n logits_per_image, logits_per_text = \\\n self.get_logits(image_features, text_features, logit_scale)\n\n dist_logits_per_image, dist_logits_per_text = \\\n self.get_logits(dist_image_features, dist_text_features, dist_logit_scale)\n\n labels = self.get_ground_truth(image_features.device, logits_per_image.shape[0])\n\n contrastive_loss = (\n F.cross_entropy(logits_per_image, labels) +\n F.cross_entropy(logits_per_text, labels)\n ) / 2\n\n distill_loss = (\n self.dist_loss(dist_logits_per_image, logits_per_image) +\n self.dist_loss(dist_logits_per_text, logits_per_text)\n ) / 2\n\n if output_dict:\n return {\"contrastive_loss\": contrastive_loss, \"loss_kl\": distill_loss}\n\n return contrastive_loss, distill_loss" }, { "identifier": "CoCaLoss", "path": "src/open_clip/loss.py", "snippet": "class CoCaLoss(ClipLoss):\n def __init__(\n self,\n caption_loss_weight,\n clip_loss_weight,\n pad_id=0, # pad_token for open_clip custom tokenizer\n local_loss=False,\n gather_with_grad=False,\n cache_labels=False,\n rank=0,\n world_size=1,\n use_horovod=False,\n ):\n super().__init__(\n local_loss=local_loss,\n gather_with_grad=gather_with_grad,\n cache_labels=cache_labels,\n rank=rank,\n world_size=world_size,\n use_horovod=use_horovod\n )\n\n self.clip_loss_weight = clip_loss_weight\n self.caption_loss_weight = caption_loss_weight\n self.caption_loss = nn.CrossEntropyLoss(ignore_index=pad_id)\n\n def forward(self, image_features, text_features, logits, labels, logit_scale, output_dict=False):\n clip_loss = super().forward(image_features, text_features, logit_scale)\n clip_loss = self.clip_loss_weight * clip_loss\n\n caption_loss = self.caption_loss(\n logits.permute(0, 2, 1),\n labels,\n )\n caption_loss = caption_loss * self.caption_loss_weight\n\n if output_dict:\n return {\"contrastive_loss\": clip_loss, \"caption_loss\": caption_loss}\n\n return clip_loss, caption_loss" }, { "identifier": "load_openai_model", "path": "src/open_clip/openai.py", "snippet": "def load_openai_model(\n name: str,\n precision: Optional[str] = None,\n device: Optional[Union[str, torch.device]] = None,\n jit: bool = True,\n cache_dir: Optional[str] = None,\n):\n \"\"\"Load a CLIP model\n\n Parameters\n ----------\n name : str\n A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict\n precision: str\n Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.\n device : Union[str, torch.device]\n The device to put the loaded model\n jit : bool\n Whether to load the optimized JIT model (default) or more hackable non-JIT model.\n cache_dir : Optional[str]\n The directory to cache the downloaded model weights\n\n Returns\n -------\n model : torch.nn.Module\n The CLIP model\n preprocess : Callable[[PIL.Image], torch.Tensor]\n A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input\n \"\"\"\n if device is None:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if precision is None:\n precision = 'fp32' if device == 'cpu' else 'fp16'\n\n if get_pretrained_url(name, 'openai'):\n model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)\n elif os.path.isfile(name):\n model_path = name\n else:\n raise RuntimeError(f\"Model {name} not found; available models = {list_openai_models()}\")\n\n try:\n # loading JIT archive\n model = torch.jit.load(model_path, map_location=device if jit else \"cpu\").eval()\n state_dict = None\n except RuntimeError:\n # loading saved state dict\n if jit:\n warnings.warn(f\"File {model_path} is not a JIT archive. Loading as a state dict instead\")\n jit = False\n state_dict = torch.load(model_path, map_location=\"cpu\")\n\n if not jit:\n # Build a non-jit model from the OpenAI jitted model state dict\n cast_dtype = get_cast_dtype(precision)\n try:\n model = build_model_from_openai_state_dict(state_dict or model.state_dict(), cast_dtype=cast_dtype)\n except KeyError:\n sd = {k[7:]: v for k, v in state_dict[\"state_dict\"].items()}\n model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype)\n\n # model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use\n model = model.to(device)\n if precision.startswith('amp') or precision == 'fp32':\n model.float()\n elif precision == 'bf16':\n convert_weights_to_lp(model, dtype=torch.bfloat16)\n\n return model\n\n # patch the device names\n device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])\n device_node = [n for n in device_holder.graph.findAllNodes(\"prim::Constant\") if \"Device\" in repr(n)][-1]\n\n def patch_device(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"prim::Constant\"):\n if \"value\" in node.attributeNames() and str(node[\"value\"]).startswith(\"cuda\"):\n node.copyAttributes(device_node)\n\n model.apply(patch_device)\n patch_device(model.encode_image)\n patch_device(model.encode_text)\n\n # patch dtype to float32 (typically for CPU)\n if precision == 'fp32':\n float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])\n float_input = list(float_holder.graph.findNode(\"aten::to\").inputs())[1]\n float_node = float_input.node()\n\n def patch_float(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"aten::to\"):\n inputs = list(node.inputs())\n for i in [1, 2]: # dtype can be the second or third argument to aten::to()\n if inputs[i].node()[\"value\"] == 5:\n inputs[i].node().copyAttributes(float_node)\n\n model.apply(patch_float)\n patch_float(model.encode_image)\n patch_float(model.encode_text)\n model.float()\n\n # ensure image_size attr available at consistent location for both jit and non-jit\n model.visual.image_size = model.input_resolution.item()\n return model" }, { "identifier": "is_pretrained_cfg", "path": "src/open_clip/pretrained.py", "snippet": "def is_pretrained_cfg(model: str, tag: str):\n if model not in _PRETRAINED:\n return False\n return _clean_tag(tag) in _PRETRAINED[model]" }, { "identifier": "get_pretrained_cfg", "path": "src/open_clip/pretrained.py", "snippet": "def get_pretrained_cfg(model: str, tag: str):\n if model not in _PRETRAINED:\n return {}\n model_pretrained = _PRETRAINED[model]\n return model_pretrained.get(_clean_tag(tag), {})" }, { "identifier": "download_pretrained", "path": "src/open_clip/pretrained.py", "snippet": "def download_pretrained(\n cfg: Dict,\n force_hf_hub: bool = False,\n cache_dir: Union[str, None] = None,\n):\n target = ''\n if not cfg:\n return target\n\n download_url = cfg.get('url', '')\n download_hf_hub = cfg.get('hf_hub', '')\n if download_hf_hub and force_hf_hub:\n # use HF hub even if url exists\n download_url = ''\n\n if download_url:\n target = download_pretrained_from_url(download_url, cache_dir=cache_dir)\n elif download_hf_hub:\n has_hf_hub(True)\n # we assume the hf_hub entries in pretrained config combine model_id + filename in\n # 'org/model_name/filename.pt' form. To specify just the model id w/o filename and\n # use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'.\n model_id, filename = os.path.split(download_hf_hub)\n if filename:\n target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir)\n else:\n target = download_pretrained_from_hf(model_id, cache_dir=cache_dir)\n\n return target" }, { "identifier": "list_pretrained_tags_by_model", "path": "src/open_clip/pretrained.py", "snippet": "def list_pretrained_tags_by_model(model: str):\n \"\"\" return all pretrain tags for the specified model architecture \"\"\"\n tags = []\n if model in _PRETRAINED:\n tags.extend(_PRETRAINED[model].keys())\n return tags" }, { "identifier": "download_pretrained_from_hf", "path": "src/open_clip/pretrained.py", "snippet": "def download_pretrained_from_hf(\n model_id: str,\n filename: str = 'open_clip_pytorch_model.bin',\n revision=None,\n cache_dir: Union[str, None] = None,\n):\n has_hf_hub(True)\n cached_file = hf_hub_download(model_id, filename, revision=revision, cache_dir=cache_dir)\n return cached_file" }, { "identifier": "image_transform", "path": "src/open_clip/transform.py", "snippet": "def image_transform(\n image_size: int,\n is_train: bool,\n mean: Optional[Tuple[float, ...]] = None,\n std: Optional[Tuple[float, ...]] = None,\n resize_longest_max: bool = False,\n fill_color: int = 0,\n aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,\n):\n mean = mean or OPENAI_DATASET_MEAN\n if not isinstance(mean, (list, tuple)):\n mean = (mean,) * 3\n\n std = std or OPENAI_DATASET_STD\n if not isinstance(std, (list, tuple)):\n std = (std,) * 3\n\n if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:\n # for square size, pass size as int so that Resize() uses aspect preserving shortest edge\n image_size = image_size[0]\n\n if isinstance(aug_cfg, dict):\n aug_cfg = AugmentationCfg(**aug_cfg)\n else:\n aug_cfg = aug_cfg or AugmentationCfg()\n normalize = Normalize(mean=mean, std=std)\n if is_train:\n aug_cfg_dict = {k: v for k, v in asdict(aug_cfg).items() if v is not None}\n use_timm = aug_cfg_dict.pop('use_timm', False)\n if use_timm:\n from timm.data import create_transform # timm can still be optional\n if isinstance(image_size, (tuple, list)):\n assert len(image_size) >= 2\n input_size = (3,) + image_size[-2:]\n else:\n input_size = (3, image_size, image_size)\n # by default, timm aug randomly alternates bicubic & bilinear for better robustness at inference time\n aug_cfg_dict.setdefault('interpolation', 'random')\n aug_cfg_dict.setdefault('color_jitter', None) # disable by default\n train_transform = create_transform(\n input_size=input_size,\n is_training=True,\n hflip=0.,\n mean=mean,\n std=std,\n re_mode='pixel',\n **aug_cfg_dict,\n )\n else:\n train_transform = Compose([\n RandomResizedCrop(\n image_size,\n scale=aug_cfg_dict.pop('scale'),\n interpolation=InterpolationMode.BICUBIC,\n ),\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ])\n if aug_cfg_dict:\n warnings.warn(f'Unused augmentation cfg items, specify `use_timm` to use ({list(aug_cfg_dict.keys())}).')\n return train_transform\n else:\n if resize_longest_max:\n transforms = [\n ResizeMaxSize(image_size, fill=fill_color)\n ]\n else:\n transforms = [\n Resize(image_size, interpolation=InterpolationMode.BICUBIC),\n CenterCrop(image_size),\n ]\n transforms.extend([\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ])\n return Compose(transforms)" }, { "identifier": "AugmentationCfg", "path": "src/open_clip/transform.py", "snippet": "class AugmentationCfg:\n scale: Tuple[float, float] = (0.9, 1.0)\n ratio: Optional[Tuple[float, float]] = None\n color_jitter: Optional[Union[float, Tuple[float, float, float]]] = None\n interpolation: Optional[str] = None\n re_prob: Optional[float] = None\n re_count: Optional[int] = None\n use_timm: bool = False" }, { "identifier": "det_image_transform", "path": "src/open_clip/transform.py", "snippet": "def det_image_transform(\n image_size: int,\n is_train: bool,\n mean: Optional[Tuple[float, ...]] = None,\n std: Optional[Tuple[float, ...]] = None,\n fill_color: int = 0,\n aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,\n):\n mean = mean or OPENAI_DATASET_MEAN\n if not isinstance(mean, (list, tuple)):\n mean = (mean,) * 3\n\n std = std or OPENAI_DATASET_STD\n if not isinstance(std, (list, tuple)):\n std = (std,) * 3\n\n if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:\n # for square size, pass size as int so that Resize() uses aspect preserving shortest edge\n image_size = image_size[0]\n\n normalize = Normalize(mean=mean, std=std)\n if is_train:\n raise NotImplementedError\n else:\n transforms = [\n ResizeLongest(image_size, fill=fill_color),\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ]\n return Compose(transforms)" }, { "identifier": "HFTokenizer", "path": "src/open_clip/tokenizer.py", "snippet": "class HFTokenizer:\n \"\"\"HuggingFace tokenizer wrapper\"\"\"\n\n def __init__(self, tokenizer_name: str):\n from transformers import AutoTokenizer\n self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n\n def save_pretrained(self, dest):\n self.tokenizer.save_pretrained(dest)\n\n def __call__(self, texts: Union[str, List[str]], context_length: int = 77) -> torch.Tensor:\n # same cleaning as for default tokenizer, except lowercasing\n # adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance\n if isinstance(texts, str):\n texts = [texts]\n texts = [whitespace_clean(basic_clean(text)) for text in texts]\n input_ids = self.tokenizer(\n texts,\n return_tensors='pt',\n max_length=context_length,\n padding='max_length',\n truncation=True,\n ).input_ids\n return input_ids" }, { "identifier": "tokenize", "path": "src/open_clip/tokenizer.py", "snippet": "def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:\n \"\"\"\n Returns the tokenized representation of given input string(s)\n\n Parameters\n ----------\n texts : Union[str, List[str]]\n An input string or a list of input strings to tokenize\n context_length : int\n The context length to use; all CLIP models use 77 as the context length\n\n Returns\n -------\n A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]\n \"\"\"\n if isinstance(texts, str):\n texts = [texts]\n\n sot_token = _tokenizer.encoder[\"<start_of_text>\"]\n eot_token = _tokenizer.encoder[\"<end_of_text>\"]\n all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]\n result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)\n\n for i, tokens in enumerate(all_tokens):\n if len(tokens) > context_length:\n tokens = tokens[:context_length] # Truncate\n tokens[-1] = eot_token\n result[i, :len(tokens)] = torch.tensor(tokens)\n\n return result" } ]
import json import logging import os import pathlib import re import torch from copy import deepcopy from pathlib import Path from typing import Any, Dict, Optional, Tuple, Union from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\ resize_pos_embed, get_cast_dtype from .coca_model import CoCa from .loss import ClipLoss, DistillClipLoss, CoCaLoss from .openai import load_openai_model from .pretrained import is_pretrained_cfg, get_pretrained_cfg, \ download_pretrained, list_pretrained_tags_by_model, download_pretrained_from_hf from .transform import image_transform, AugmentationCfg, det_image_transform from .tokenizer import HFTokenizer, tokenize from open_clip import eva_clip from open_clip import eva_clip
13,828
cache_dir=cache_dir, ) # to always output dict even if it is clip if output_dict and hasattr(model, "output_dict"): model.output_dict = True else: model_cfg = model_cfg or get_model_config(model_name) if model_cfg is not None: logging.info(f'Loaded {model_name} model config.') else: logging.error(f'Model config for {model_name} not found; available models {list_models()}.') raise RuntimeError(f'Model config for {model_name} not found.') if force_quick_gelu: # override for use of QuickGELU on non-OpenAI transformer models model_cfg["quick_gelu"] = True if force_patch_dropout is not None: # override the default patch dropout value model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout if force_image_size is not None: # override model config's image size model_cfg["vision_cfg"]["image_size"] = force_image_size if pretrained_image: if 'timm_model_name' in model_cfg.get('vision_cfg', {}): # pretrained weight loading for timm models set via vision_cfg model_cfg['vision_cfg']['timm_model_pretrained'] = True else: assert False, 'pretrained image towers currently only supported for timm models' cast_dtype = get_cast_dtype(precision) is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {}) custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model if custom_text: if is_hf_model: model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf if "coca" in model_name: model = CoCa(**model_cfg, cast_dtype=cast_dtype) else: model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype) else: model = CLIP(**model_cfg, cast_dtype=cast_dtype) pretrained_loaded = False if pretrained: checkpoint_path = '' pretrained_cfg = get_pretrained_cfg(model_name, pretrained) if pretrained_cfg: checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir) elif os.path.exists(pretrained): checkpoint_path = pretrained if checkpoint_path: print(f'Loading pretrained {model_name} weights ({pretrained}).', flush=True) logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') load_checkpoint(model, checkpoint_path) else: error_str = ( f'Pretrained weights ({pretrained}) not found for model {model_name}.' f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.') logging.warning(error_str) raise RuntimeError(error_str) pretrained_loaded = True elif has_hf_hub_prefix: logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') load_checkpoint(model, checkpoint_path) pretrained_loaded = True if require_pretrained and not pretrained_loaded: # callers of create_model_from_pretrained always expect pretrained weights raise RuntimeError( f'Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded.') model.to(device=device) if precision in ("fp16", "bf16"): convert_weights_to_lp(model, dtype=torch.bfloat16 if precision == 'bf16' else torch.float16) # set image / mean metadata from pretrained_cfg if available, or use default model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD # to always output dict even if it is clip if output_dict and hasattr(model, "output_dict"): model.output_dict = True if jit: model = torch.jit.script(model) return model def create_loss(args): return ClipLoss( local_loss=True, gather_with_grad=True, # use gather with grad cache_labels=True, rank=args.rank, world_size=args.world_size, use_horovod=args.horovod, ) def create_model_and_transforms( model_name: str, pretrained: Optional[str] = None, precision: str = 'fp32', device: Union[str, torch.device] = 'cpu', jit: bool = False, force_quick_gelu: bool = False, force_custom_text: bool = False, force_patch_dropout: Optional[float] = None, force_image_size: Optional[Union[int, Tuple[int, int]]] = None, pretrained_image: bool = False, pretrained_hf: bool = True, image_mean: Optional[Tuple[float, ...]] = None, image_std: Optional[Tuple[float, ...]] = None,
HF_HUB_PREFIX = 'hf-hub:' _MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"] _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs def _natural_key(string_): return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] def _rescan_model_configs(): global _MODEL_CONFIGS config_ext = ('.json',) config_files = [] for config_path in _MODEL_CONFIG_PATHS: if config_path.is_file() and config_path.suffix in config_ext: config_files.append(config_path) elif config_path.is_dir(): for ext in config_ext: config_files.extend(config_path.glob(f'*{ext}')) for cf in config_files: with open(cf, 'r') as f: model_cfg = json.load(f) if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')): _MODEL_CONFIGS[cf.stem] = model_cfg _MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))} _rescan_model_configs() # initial populate of model config registry def list_models(): """ enumerate available model architectures based on config files """ return list(_MODEL_CONFIGS.keys()) def add_model_config(path): """ add model config path or file and update registry """ if not isinstance(path, Path): path = Path(path) _MODEL_CONFIG_PATHS.append(path) _rescan_model_configs() def get_model_config(model_name): if model_name in _MODEL_CONFIGS: return deepcopy(_MODEL_CONFIGS[model_name]) else: return None def get_tokenizer(model_name): if 'EVA' in model_name: return eva_clip.get_tokenizer(model_name) if model_name.startswith(HF_HUB_PREFIX): tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):]) else: config = get_model_config(model_name) tokenizer = HFTokenizer( config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize return tokenizer def load_state_dict(checkpoint_path: str, map_location='cpu'): checkpoint = torch.load(checkpoint_path, map_location=map_location) if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: state_dict = checkpoint if next(iter(state_dict.items()))[0].startswith('module'): state_dict = {k[7:]: v for k, v in state_dict.items()} return state_dict def load_checkpoint(model, checkpoint_path, strict=True): state_dict = load_state_dict(checkpoint_path) # detect old format and make compatible with new format if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'): state_dict = convert_to_custom_text_state_dict(state_dict) resize_pos_embed(state_dict, model) incompatible_keys = model.load_state_dict(state_dict, strict=strict) return incompatible_keys def create_model( model_name: str, pretrained: Optional[str] = None, precision: str = 'fp32', device: Union[str, torch.device] = 'cpu', jit: bool = False, force_quick_gelu: bool = False, force_custom_text: bool = False, force_patch_dropout: Optional[float] = None, force_image_size: Optional[Union[int, Tuple[int, int]]] = None, pretrained_image: bool = False, pretrained_hf: bool = True, cache_dir: Optional[str] = None, output_dict: Optional[bool] = None, require_pretrained: bool = False, ): has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX) if has_hf_hub_prefix: model_id = model_name[len(HF_HUB_PREFIX):] checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir) config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir) with open(config_path, 'r', encoding='utf-8') as f: config = json.load(f) pretrained_cfg = config['preprocess_cfg'] model_cfg = config['model_cfg'] else: model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names checkpoint_path = None pretrained_cfg = {} model_cfg = None if isinstance(device, str): device = torch.device(device) if pretrained == 'eva': return eva_clip.create_model(model_name=model_name, pretrained=cache_dir, force_custom_clip=True, precision=precision, device=device,) if pretrained and pretrained.lower() == 'openai': logging.info(f'Loading pretrained {model_name} from OpenAI.') model = load_openai_model( model_name, precision=precision, device=device, jit=jit, cache_dir=cache_dir, ) # to always output dict even if it is clip if output_dict and hasattr(model, "output_dict"): model.output_dict = True else: model_cfg = model_cfg or get_model_config(model_name) if model_cfg is not None: logging.info(f'Loaded {model_name} model config.') else: logging.error(f'Model config for {model_name} not found; available models {list_models()}.') raise RuntimeError(f'Model config for {model_name} not found.') if force_quick_gelu: # override for use of QuickGELU on non-OpenAI transformer models model_cfg["quick_gelu"] = True if force_patch_dropout is not None: # override the default patch dropout value model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout if force_image_size is not None: # override model config's image size model_cfg["vision_cfg"]["image_size"] = force_image_size if pretrained_image: if 'timm_model_name' in model_cfg.get('vision_cfg', {}): # pretrained weight loading for timm models set via vision_cfg model_cfg['vision_cfg']['timm_model_pretrained'] = True else: assert False, 'pretrained image towers currently only supported for timm models' cast_dtype = get_cast_dtype(precision) is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {}) custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model if custom_text: if is_hf_model: model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf if "coca" in model_name: model = CoCa(**model_cfg, cast_dtype=cast_dtype) else: model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype) else: model = CLIP(**model_cfg, cast_dtype=cast_dtype) pretrained_loaded = False if pretrained: checkpoint_path = '' pretrained_cfg = get_pretrained_cfg(model_name, pretrained) if pretrained_cfg: checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir) elif os.path.exists(pretrained): checkpoint_path = pretrained if checkpoint_path: print(f'Loading pretrained {model_name} weights ({pretrained}).', flush=True) logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') load_checkpoint(model, checkpoint_path) else: error_str = ( f'Pretrained weights ({pretrained}) not found for model {model_name}.' f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.') logging.warning(error_str) raise RuntimeError(error_str) pretrained_loaded = True elif has_hf_hub_prefix: logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') load_checkpoint(model, checkpoint_path) pretrained_loaded = True if require_pretrained and not pretrained_loaded: # callers of create_model_from_pretrained always expect pretrained weights raise RuntimeError( f'Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded.') model.to(device=device) if precision in ("fp16", "bf16"): convert_weights_to_lp(model, dtype=torch.bfloat16 if precision == 'bf16' else torch.float16) # set image / mean metadata from pretrained_cfg if available, or use default model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD # to always output dict even if it is clip if output_dict and hasattr(model, "output_dict"): model.output_dict = True if jit: model = torch.jit.script(model) return model def create_loss(args): return ClipLoss( local_loss=True, gather_with_grad=True, # use gather with grad cache_labels=True, rank=args.rank, world_size=args.world_size, use_horovod=args.horovod, ) def create_model_and_transforms( model_name: str, pretrained: Optional[str] = None, precision: str = 'fp32', device: Union[str, torch.device] = 'cpu', jit: bool = False, force_quick_gelu: bool = False, force_custom_text: bool = False, force_patch_dropout: Optional[float] = None, force_image_size: Optional[Union[int, Tuple[int, int]]] = None, pretrained_image: bool = False, pretrained_hf: bool = True, image_mean: Optional[Tuple[float, ...]] = None, image_std: Optional[Tuple[float, ...]] = None,
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
19
2023-12-09 05:43:08+00:00
16k
moonshot-admin/moonshot
third-party/tqdm-4.66.1/tqdm/auto.py
[ { "identifier": "TqdmExperimentalWarning", "path": "third-party/tqdm-4.66.1/tqdm/std.py", "snippet": "class TqdmExperimentalWarning(TqdmWarning, FutureWarning):\n \"\"\"beta feature, unstable API and behaviour\"\"\"\n pass" }, { "identifier": "tqdm", "path": "third-party/tqdm-4.66.1/tqdm/asyncio.py", "snippet": "class tqdm_asyncio(std_tqdm):\n def __init__(self, iterable=None, *args, **kwargs):\n def __aiter__(self):\n async def __anext__(self):\n def send(self, *args, **kwargs):\n def as_completed(cls, fs, *, loop=None, timeout=None, total=None, **tqdm_kwargs):\n async def gather(cls, *fs, loop=None, timeout=None, total=None, **tqdm_kwargs):\n async def wrap_awaitable(i, f):\ndef tarange(*args, **kwargs):" }, { "identifier": "tqdm", "path": "third-party/tqdm-4.66.1/tqdm/std.py", "snippet": "class tqdm(Comparable):\n \"\"\"\n Decorate an iterable object, returning an iterator which acts exactly\n like the original iterable, but prints a dynamically updating\n progressbar every time a value is requested.\n\n Parameters\n ----------\n iterable : iterable, optional\n Iterable to decorate with a progressbar.\n Leave blank to manually manage the updates.\n desc : str, optional\n Prefix for the progressbar.\n total : int or float, optional\n The number of expected iterations. If unspecified,\n len(iterable) is used if possible. If float(\"inf\") or as a last\n resort, only basic progress statistics are displayed\n (no ETA, no progressbar).\n If `gui` is True and this parameter needs subsequent updating,\n specify an initial arbitrary large positive number,\n e.g. 9e9.\n leave : bool, optional\n If [default: True], keeps all traces of the progressbar\n upon termination of iteration.\n If `None`, will leave only if `position` is `0`.\n file : `io.TextIOWrapper` or `io.StringIO`, optional\n Specifies where to output the progress messages\n (default: sys.stderr). Uses `file.write(str)` and `file.flush()`\n methods. For encoding, see `write_bytes`.\n ncols : int, optional\n The width of the entire output message. If specified,\n dynamically resizes the progressbar to stay within this bound.\n If unspecified, attempts to use environment width. The\n fallback is a meter width of 10 and no limit for the counter and\n statistics. If 0, will not print any meter (only stats).\n mininterval : float, optional\n Minimum progress display update interval [default: 0.1] seconds.\n maxinterval : float, optional\n Maximum progress display update interval [default: 10] seconds.\n Automatically adjusts `miniters` to correspond to `mininterval`\n after long display update lag. Only works if `dynamic_miniters`\n or monitor thread is enabled.\n miniters : int or float, optional\n Minimum progress display update interval, in iterations.\n If 0 and `dynamic_miniters`, will automatically adjust to equal\n `mininterval` (more CPU efficient, good for tight loops).\n If > 0, will skip display of specified number of iterations.\n Tweak this and `mininterval` to get very efficient loops.\n If your progress is erratic with both fast and slow iterations\n (network, skipping items, etc) you should set miniters=1.\n ascii : bool or str, optional\n If unspecified or False, use unicode (smooth blocks) to fill\n the meter. The fallback is to use ASCII characters \" 123456789#\".\n disable : bool, optional\n Whether to disable the entire progressbar wrapper\n [default: False]. If set to None, disable on non-TTY.\n unit : str, optional\n String that will be used to define the unit of each iteration\n [default: it].\n unit_scale : bool or int or float, optional\n If 1 or True, the number of iterations will be reduced/scaled\n automatically and a metric prefix following the\n International System of Units standard will be added\n (kilo, mega, etc.) [default: False]. If any other non-zero\n number, will scale `total` and `n`.\n dynamic_ncols : bool, optional\n If set, constantly alters `ncols` and `nrows` to the\n environment (allowing for window resizes) [default: False].\n smoothing : float, optional\n Exponential moving average smoothing factor for speed estimates\n (ignored in GUI mode). Ranges from 0 (average speed) to 1\n (current/instantaneous speed) [default: 0.3].\n bar_format : str, optional\n Specify a custom bar string formatting. May impact performance.\n [default: '{l_bar}{bar}{r_bar}'], where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '\n '{rate_fmt}{postfix}]'\n Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,\n percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,\n rate, rate_fmt, rate_noinv, rate_noinv_fmt,\n rate_inv, rate_inv_fmt, postfix, unit_divisor,\n remaining, remaining_s, eta.\n Note that a trailing \": \" is automatically removed after {desc}\n if the latter is empty.\n initial : int or float, optional\n The initial counter value. Useful when restarting a progress\n bar [default: 0]. If using float, consider specifying `{n:.3f}`\n or similar in `bar_format`, or specifying `unit_scale`.\n position : int, optional\n Specify the line offset to print this bar (starting from 0)\n Automatic if unspecified.\n Useful to manage multiple bars at once (eg, from threads).\n postfix : dict or *, optional\n Specify additional stats to display at the end of the bar.\n Calls `set_postfix(**postfix)` if possible (dict).\n unit_divisor : float, optional\n [default: 1000], ignored unless `unit_scale` is True.\n write_bytes : bool, optional\n Whether to write bytes. If (default: False) will write unicode.\n lock_args : tuple, optional\n Passed to `refresh` for intermediate output\n (initialisation, iterating, and updating).\n nrows : int, optional\n The screen height. If specified, hides nested bars outside this\n bound. If unspecified, attempts to use environment height.\n The fallback is 20.\n colour : str, optional\n Bar colour (e.g. 'green', '#00ff00').\n delay : float, optional\n Don't display until [default: 0] seconds have elapsed.\n gui : bool, optional\n WARNING: internal parameter - do not use.\n Use tqdm.gui.tqdm(...) instead. If set, will attempt to use\n matplotlib animations for a graphical output [default: False].\n\n Returns\n -------\n out : decorated iterator.\n \"\"\"\n\n monitor_interval = 10 # set to 0 to disable the thread\n monitor = None\n _instances = WeakSet()\n\n @staticmethod\n def format_sizeof(num, suffix='', divisor=1000):\n \"\"\"\n Formats a number (greater than unity) with SI Order of Magnitude\n prefixes.\n\n Parameters\n ----------\n num : float\n Number ( >= 1) to format.\n suffix : str, optional\n Post-postfix [default: ''].\n divisor : float, optional\n Divisor between prefixes [default: 1000].\n\n Returns\n -------\n out : str\n Number with Order of Magnitude SI unit postfix.\n \"\"\"\n for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:\n if abs(num) < 999.5:\n if abs(num) < 99.95:\n if abs(num) < 9.995:\n return '{0:1.2f}'.format(num) + unit + suffix\n return '{0:2.1f}'.format(num) + unit + suffix\n return '{0:3.0f}'.format(num) + unit + suffix\n num /= divisor\n return '{0:3.1f}Y'.format(num) + suffix\n\n @staticmethod\n def format_interval(t):\n \"\"\"\n Formats a number of seconds as a clock time, [H:]MM:SS\n\n Parameters\n ----------\n t : int\n Number of seconds.\n\n Returns\n -------\n out : str\n [H:]MM:SS\n \"\"\"\n mins, s = divmod(int(t), 60)\n h, m = divmod(mins, 60)\n if h:\n return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)\n else:\n return '{0:02d}:{1:02d}'.format(m, s)\n\n @staticmethod\n def format_num(n):\n \"\"\"\n Intelligent scientific notation (.3g).\n\n Parameters\n ----------\n n : int or float or Numeric\n A Number.\n\n Returns\n -------\n out : str\n Formatted number.\n \"\"\"\n f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')\n n = str(n)\n return f if len(f) < len(n) else n\n\n @staticmethod\n def status_printer(file):\n \"\"\"\n Manage the printing and in-place updating of a line of characters.\n Note that if the string is longer than a line, then in-place\n updating may not work (it will print a new line at each refresh).\n \"\"\"\n fp = file\n fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover\n if fp in (sys.stderr, sys.stdout):\n getattr(sys.stderr, 'flush', lambda: None)()\n getattr(sys.stdout, 'flush', lambda: None)()\n\n def fp_write(s):\n fp.write(str(s))\n fp_flush()\n\n last_len = [0]\n\n def print_status(s):\n len_s = disp_len(s)\n fp_write('\\r' + s + (' ' * max(last_len[0] - len_s, 0)))\n last_len[0] = len_s\n\n return print_status\n\n @staticmethod\n def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False, unit='it',\n unit_scale=False, rate=None, bar_format=None, postfix=None,\n unit_divisor=1000, initial=0, colour=None, **extra_kwargs):\n \"\"\"\n Return a string-based progress bar given some parameters\n\n Parameters\n ----------\n n : int or float\n Number of finished iterations.\n total : int or float\n The expected total number of iterations. If meaningless (None),\n only basic progress statistics are displayed (no ETA).\n elapsed : float\n Number of seconds passed since start.\n ncols : int, optional\n The width of the entire output message. If specified,\n dynamically resizes `{bar}` to stay within this bound\n [default: None]. If `0`, will not print any bar (only stats).\n The fallback is `{bar:10}`.\n prefix : str, optional\n Prefix message (included in total width) [default: ''].\n Use as {desc} in bar_format string.\n ascii : bool, optional or str, optional\n If not set, use unicode (smooth blocks) to fill the meter\n [default: False]. The fallback is to use ASCII characters\n \" 123456789#\".\n unit : str, optional\n The iteration unit [default: 'it'].\n unit_scale : bool or int or float, optional\n If 1 or True, the number of iterations will be printed with an\n appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)\n [default: False]. If any other non-zero number, will scale\n `total` and `n`.\n rate : float, optional\n Manual override for iteration rate.\n If [default: None], uses n/elapsed.\n bar_format : str, optional\n Specify a custom bar string formatting. May impact performance.\n [default: '{l_bar}{bar}{r_bar}'], where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '\n '{rate_fmt}{postfix}]'\n Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,\n percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,\n rate, rate_fmt, rate_noinv, rate_noinv_fmt,\n rate_inv, rate_inv_fmt, postfix, unit_divisor,\n remaining, remaining_s, eta.\n Note that a trailing \": \" is automatically removed after {desc}\n if the latter is empty.\n postfix : *, optional\n Similar to `prefix`, but placed at the end\n (e.g. for additional stats).\n Note: postfix is usually a string (not a dict) for this method,\n and will if possible be set to postfix = ', ' + postfix.\n However other types are supported (#382).\n unit_divisor : float, optional\n [default: 1000], ignored unless `unit_scale` is True.\n initial : int or float, optional\n The initial counter value [default: 0].\n colour : str, optional\n Bar colour (e.g. 'green', '#00ff00').\n\n Returns\n -------\n out : Formatted meter and stats, ready to display.\n \"\"\"\n\n # sanity check: total\n if total and n >= (total + 0.5): # allow float imprecision (#849)\n total = None\n\n # apply custom scale if necessary\n if unit_scale and unit_scale not in (True, 1):\n if total:\n total *= unit_scale\n n *= unit_scale\n if rate:\n rate *= unit_scale # by default rate = self.avg_dn / self.avg_dt\n unit_scale = False\n\n elapsed_str = tqdm.format_interval(elapsed)\n\n # if unspecified, attempt to use rate = average speed\n # (we allow manual override since predicting time is an arcane art)\n if rate is None and elapsed:\n rate = (n - initial) / elapsed\n inv_rate = 1 / rate if rate else None\n format_sizeof = tqdm.format_sizeof\n rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else\n '{0:5.2f}'.format(rate)) if rate else '?') + unit + '/s'\n rate_inv_fmt = (\n (format_sizeof(inv_rate) if unit_scale else '{0:5.2f}'.format(inv_rate))\n if inv_rate else '?') + 's/' + unit\n rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt\n\n if unit_scale:\n n_fmt = format_sizeof(n, divisor=unit_divisor)\n total_fmt = format_sizeof(total, divisor=unit_divisor) if total is not None else '?'\n else:\n n_fmt = str(n)\n total_fmt = str(total) if total is not None else '?'\n\n try:\n postfix = ', ' + postfix if postfix else ''\n except TypeError:\n pass\n\n remaining = (total - n) / rate if rate and total else 0\n remaining_str = tqdm.format_interval(remaining) if rate else '?'\n try:\n eta_dt = (datetime.now() + timedelta(seconds=remaining)\n if rate and total else datetime.utcfromtimestamp(0))\n except OverflowError:\n eta_dt = datetime.max\n\n # format the stats displayed to the left and right sides of the bar\n if prefix:\n # old prefix setup work around\n bool_prefix_colon_already = (prefix[-2:] == \": \")\n l_bar = prefix if bool_prefix_colon_already else prefix + \": \"\n else:\n l_bar = ''\n\n r_bar = f'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}{postfix}]'\n\n # Custom bar formatting\n # Populate a dict with all available progress indicators\n format_dict = {\n # slight extension of self.format_dict\n 'n': n, 'n_fmt': n_fmt, 'total': total, 'total_fmt': total_fmt,\n 'elapsed': elapsed_str, 'elapsed_s': elapsed,\n 'ncols': ncols, 'desc': prefix or '', 'unit': unit,\n 'rate': inv_rate if inv_rate and inv_rate > 1 else rate,\n 'rate_fmt': rate_fmt, 'rate_noinv': rate,\n 'rate_noinv_fmt': rate_noinv_fmt, 'rate_inv': inv_rate,\n 'rate_inv_fmt': rate_inv_fmt,\n 'postfix': postfix, 'unit_divisor': unit_divisor,\n 'colour': colour,\n # plus more useful definitions\n 'remaining': remaining_str, 'remaining_s': remaining,\n 'l_bar': l_bar, 'r_bar': r_bar, 'eta': eta_dt,\n **extra_kwargs}\n\n # total is known: we can predict some stats\n if total:\n # fractional and percentage progress\n frac = n / total\n percentage = frac * 100\n\n l_bar += '{0:3.0f}%|'.format(percentage)\n\n if ncols == 0:\n return l_bar[:-1] + r_bar[1:]\n\n format_dict.update(l_bar=l_bar)\n if bar_format:\n format_dict.update(percentage=percentage)\n\n # auto-remove colon for empty `{desc}`\n if not prefix:\n bar_format = bar_format.replace(\"{desc}: \", '')\n else:\n bar_format = \"{l_bar}{bar}{r_bar}\"\n\n full_bar = FormatReplace()\n nobar = bar_format.format(bar=full_bar, **format_dict)\n if not full_bar.format_called:\n return nobar # no `{bar}`; nothing else to do\n\n # Formatting progress bar space available for bar's display\n full_bar = Bar(frac,\n max(1, ncols - disp_len(nobar)) if ncols else 10,\n charset=Bar.ASCII if ascii is True else ascii or Bar.UTF,\n colour=colour)\n if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):\n bar_format = str(bar_format)\n res = bar_format.format(bar=full_bar, **format_dict)\n return disp_trim(res, ncols) if ncols else res\n\n elif bar_format:\n # user-specified bar_format but no total\n l_bar += '|'\n format_dict.update(l_bar=l_bar, percentage=0)\n full_bar = FormatReplace()\n nobar = bar_format.format(bar=full_bar, **format_dict)\n if not full_bar.format_called:\n return nobar\n full_bar = Bar(0,\n max(1, ncols - disp_len(nobar)) if ncols else 10,\n charset=Bar.BLANK, colour=colour)\n res = bar_format.format(bar=full_bar, **format_dict)\n return disp_trim(res, ncols) if ncols else res\n else:\n # no total: no progressbar, ETA, just progress stats\n return (f'{(prefix + \": \") if prefix else \"\"}'\n f'{n_fmt}{unit} [{elapsed_str}, {rate_fmt}{postfix}]')\n\n def __new__(cls, *_, **__):\n instance = object.__new__(cls)\n with cls.get_lock(): # also constructs lock if non-existent\n cls._instances.add(instance)\n # create monitoring thread\n if cls.monitor_interval and (cls.monitor is None\n or not cls.monitor.report()):\n try:\n cls.monitor = TMonitor(cls, cls.monitor_interval)\n except Exception as e: # pragma: nocover\n warn(\"tqdm:disabling monitor support\"\n \" (monitor_interval = 0) due to:\\n\" + str(e),\n TqdmMonitorWarning, stacklevel=2)\n cls.monitor_interval = 0\n return instance\n\n @classmethod\n def _get_free_pos(cls, instance=None):\n \"\"\"Skips specified instance.\"\"\"\n positions = {abs(inst.pos) for inst in cls._instances\n if inst is not instance and hasattr(inst, \"pos\")}\n return min(set(range(len(positions) + 1)).difference(positions))\n\n @classmethod\n def _decr_instances(cls, instance):\n \"\"\"\n Remove from list and reposition another unfixed bar\n to fill the new gap.\n\n This means that by default (where all nested bars are unfixed),\n order is not maintained but screen flicker/blank space is minimised.\n (tqdm<=4.44.1 moved ALL subsequent unfixed bars up.)\n \"\"\"\n with cls._lock:\n try:\n cls._instances.remove(instance)\n except KeyError:\n # if not instance.gui: # pragma: no cover\n # raise\n pass # py2: maybe magically removed already\n # else:\n if not instance.gui:\n last = (instance.nrows or 20) - 1\n # find unfixed (`pos >= 0`) overflow (`pos >= nrows - 1`)\n instances = list(filter(\n lambda i: hasattr(i, \"pos\") and last <= i.pos,\n cls._instances))\n # set first found to current `pos`\n if instances:\n inst = min(instances, key=lambda i: i.pos)\n inst.clear(nolock=True)\n inst.pos = abs(instance.pos)\n\n @classmethod\n def write(cls, s, file=None, end=\"\\n\", nolock=False):\n \"\"\"Print a message via tqdm (without overlap with bars).\"\"\"\n fp = file if file is not None else sys.stdout\n with cls.external_write_mode(file=file, nolock=nolock):\n # Write the message\n fp.write(s)\n fp.write(end)\n\n @classmethod\n @contextmanager\n def external_write_mode(cls, file=None, nolock=False):\n \"\"\"\n Disable tqdm within context and refresh tqdm when exits.\n Useful when writing to standard output stream\n \"\"\"\n fp = file if file is not None else sys.stdout\n\n try:\n if not nolock:\n cls.get_lock().acquire()\n # Clear all bars\n inst_cleared = []\n for inst in getattr(cls, '_instances', []):\n # Clear instance if in the target output file\n # or if write output + tqdm output are both either\n # sys.stdout or sys.stderr (because both are mixed in terminal)\n if hasattr(inst, \"start_t\") and (inst.fp == fp or all(\n f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):\n inst.clear(nolock=True)\n inst_cleared.append(inst)\n yield\n # Force refresh display of bars we cleared\n for inst in inst_cleared:\n inst.refresh(nolock=True)\n finally:\n if not nolock:\n cls._lock.release()\n\n @classmethod\n def set_lock(cls, lock):\n \"\"\"Set the global lock.\"\"\"\n cls._lock = lock\n\n @classmethod\n def get_lock(cls):\n \"\"\"Get the global lock. Construct it if it does not exist.\"\"\"\n if not hasattr(cls, '_lock'):\n cls._lock = TqdmDefaultWriteLock()\n return cls._lock\n\n @classmethod\n def pandas(cls, **tqdm_kwargs):\n \"\"\"\n Registers the current `tqdm` class with\n pandas.core.\n ( frame.DataFrame\n | series.Series\n | groupby.(generic.)DataFrameGroupBy\n | groupby.(generic.)SeriesGroupBy\n ).progress_apply\n\n A new instance will be created every time `progress_apply` is called,\n and each instance will automatically `close()` upon completion.\n\n Parameters\n ----------\n tqdm_kwargs : arguments for the tqdm instance\n\n Examples\n --------\n >>> import pandas as pd\n >>> import numpy as np\n >>> from tqdm import tqdm\n >>> from tqdm.gui import tqdm as tqdm_gui\n >>>\n >>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))\n >>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc\n >>> # Now you can use `progress_apply` instead of `apply`\n >>> df.groupby(0).progress_apply(lambda x: x**2)\n\n References\n ----------\n <https://stackoverflow.com/questions/18603270/\\\n progress-indicator-during-pandas-operations-python>\n \"\"\"\n from warnings import catch_warnings, simplefilter\n\n from pandas.core.frame import DataFrame\n from pandas.core.series import Series\n try:\n with catch_warnings():\n simplefilter(\"ignore\", category=FutureWarning)\n from pandas import Panel\n except ImportError: # pandas>=1.2.0\n Panel = None\n Rolling, Expanding = None, None\n try: # pandas>=1.0.0\n from pandas.core.window.rolling import _Rolling_and_Expanding\n except ImportError:\n try: # pandas>=0.18.0\n from pandas.core.window import _Rolling_and_Expanding\n except ImportError: # pandas>=1.2.0\n try: # pandas>=1.2.0\n from pandas.core.window.expanding import Expanding\n from pandas.core.window.rolling import Rolling\n _Rolling_and_Expanding = Rolling, Expanding\n except ImportError: # pragma: no cover\n _Rolling_and_Expanding = None\n try: # pandas>=0.25.0\n from pandas.core.groupby.generic import SeriesGroupBy # , NDFrameGroupBy\n from pandas.core.groupby.generic import DataFrameGroupBy\n except ImportError: # pragma: no cover\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy\n except ImportError:\n from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import GroupBy\n except ImportError: # pragma: no cover\n from pandas.core.groupby import GroupBy\n\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import PanelGroupBy\n except ImportError:\n try:\n from pandas.core.groupby import PanelGroupBy\n except ImportError: # pandas>=0.25.0\n PanelGroupBy = None\n\n tqdm_kwargs = tqdm_kwargs.copy()\n deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)]\n\n def inner_generator(df_function='apply'):\n def inner(df, func, *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n df : (DataFrame|Series)[GroupBy]\n Data (may be grouped).\n func : function\n To be applied on the (grouped) data.\n **kwargs : optional\n Transmitted to `df.apply()`.\n \"\"\"\n\n # Precompute total iterations\n total = tqdm_kwargs.pop(\"total\", getattr(df, 'ngroups', None))\n if total is None: # not grouped\n if df_function == 'applymap':\n total = df.size\n elif isinstance(df, Series):\n total = len(df)\n elif (_Rolling_and_Expanding is None or\n not isinstance(df, _Rolling_and_Expanding)):\n # DataFrame or Panel\n axis = kwargs.get('axis', 0)\n if axis == 'index':\n axis = 0\n elif axis == 'columns':\n axis = 1\n # when axis=0, total is shape[axis1]\n total = df.size // df.shape[axis]\n\n # Init bar\n if deprecated_t[0] is not None:\n t = deprecated_t[0]\n deprecated_t[0] = None\n else:\n t = cls(total=total, **tqdm_kwargs)\n\n if len(args) > 0:\n # *args intentionally not supported (see #244, #299)\n TqdmDeprecationWarning(\n \"Except func, normal arguments are intentionally\" +\n \" not supported by\" +\n \" `(DataFrame|Series|GroupBy).progress_apply`.\" +\n \" Use keyword arguments instead.\",\n fp_write=getattr(t.fp, 'write', sys.stderr.write))\n\n try: # pandas>=1.3.0\n from pandas.core.common import is_builtin_func\n except ImportError:\n is_builtin_func = df._is_builtin_func\n try:\n func = is_builtin_func(func)\n except TypeError:\n pass\n\n # Define bar updating wrapper\n def wrapper(*args, **kwargs):\n # update tbar correctly\n # it seems `pandas apply` calls `func` twice\n # on the first column/row to decide whether it can\n # take a fast or slow code path; so stop when t.total==t.n\n t.update(n=1 if not t.total or t.n < t.total else 0)\n return func(*args, **kwargs)\n\n # Apply the provided function (in **kwargs)\n # on the df using our wrapper (which provides bar updating)\n try:\n return getattr(df, df_function)(wrapper, **kwargs)\n finally:\n t.close()\n\n return inner\n\n # Monkeypatch pandas to provide easy methods\n # Enable custom tqdm progress in pandas!\n Series.progress_apply = inner_generator()\n SeriesGroupBy.progress_apply = inner_generator()\n Series.progress_map = inner_generator('map')\n SeriesGroupBy.progress_map = inner_generator('map')\n\n DataFrame.progress_apply = inner_generator()\n DataFrameGroupBy.progress_apply = inner_generator()\n DataFrame.progress_applymap = inner_generator('applymap')\n\n if Panel is not None:\n Panel.progress_apply = inner_generator()\n if PanelGroupBy is not None:\n PanelGroupBy.progress_apply = inner_generator()\n\n GroupBy.progress_apply = inner_generator()\n GroupBy.progress_aggregate = inner_generator('aggregate')\n GroupBy.progress_transform = inner_generator('transform')\n\n if Rolling is not None and Expanding is not None:\n Rolling.progress_apply = inner_generator()\n Expanding.progress_apply = inner_generator()\n elif _Rolling_and_Expanding is not None:\n _Rolling_and_Expanding.progress_apply = inner_generator()\n\n # override defaults via env vars\n @envwrap(\"TQDM_\", is_method=True, types={'total': float, 'ncols': int, 'miniters': float,\n 'position': int, 'nrows': int})\n def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None,\n ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None,\n ascii=None, disable=False, unit='it', unit_scale=False,\n dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0,\n position=None, postfix=None, unit_divisor=1000, write_bytes=False,\n lock_args=None, nrows=None, colour=None, delay=0.0, gui=False,\n **kwargs):\n \"\"\"see tqdm.tqdm for arguments\"\"\"\n if file is None:\n file = sys.stderr\n\n if write_bytes:\n # Despite coercing unicode into bytes, py2 sys.std* streams\n # should have bytes written to them.\n file = SimpleTextIOWrapper(\n file, encoding=getattr(file, 'encoding', None) or 'utf-8')\n\n file = DisableOnWriteError(file, tqdm_instance=self)\n\n if disable is None and hasattr(file, \"isatty\") and not file.isatty():\n disable = True\n\n if total is None and iterable is not None:\n try:\n total = len(iterable)\n except (TypeError, AttributeError):\n total = None\n if total == float(\"inf\"):\n # Infinite iterations, behave same as unknown\n total = None\n\n if disable:\n self.iterable = iterable\n self.disable = disable\n with self._lock:\n self.pos = self._get_free_pos(self)\n self._instances.remove(self)\n self.n = initial\n self.total = total\n self.leave = leave\n return\n\n if kwargs:\n self.disable = True\n with self._lock:\n self.pos = self._get_free_pos(self)\n self._instances.remove(self)\n raise (\n TqdmDeprecationWarning(\n \"`nested` is deprecated and automated.\\n\"\n \"Use `position` instead for manual control.\\n\",\n fp_write=getattr(file, 'write', sys.stderr.write))\n if \"nested\" in kwargs else\n TqdmKeyError(\"Unknown argument(s): \" + str(kwargs)))\n\n # Preprocess the arguments\n if (\n (ncols is None or nrows is None) and (file in (sys.stderr, sys.stdout))\n ) or dynamic_ncols: # pragma: no cover\n if dynamic_ncols:\n dynamic_ncols = _screen_shape_wrapper()\n if dynamic_ncols:\n ncols, nrows = dynamic_ncols(file)\n else:\n _dynamic_ncols = _screen_shape_wrapper()\n if _dynamic_ncols:\n _ncols, _nrows = _dynamic_ncols(file)\n if ncols is None:\n ncols = _ncols\n if nrows is None:\n nrows = _nrows\n\n if miniters is None:\n miniters = 0\n dynamic_miniters = True\n else:\n dynamic_miniters = False\n\n if mininterval is None:\n mininterval = 0\n\n if maxinterval is None:\n maxinterval = 0\n\n if ascii is None:\n ascii = not _supports_unicode(file)\n\n if bar_format and ascii is not True and not _is_ascii(ascii):\n # Convert bar format into unicode since terminal uses unicode\n bar_format = str(bar_format)\n\n if smoothing is None:\n smoothing = 0\n\n # Store the arguments\n self.iterable = iterable\n self.desc = desc or ''\n self.total = total\n self.leave = leave\n self.fp = file\n self.ncols = ncols\n self.nrows = nrows\n self.mininterval = mininterval\n self.maxinterval = maxinterval\n self.miniters = miniters\n self.dynamic_miniters = dynamic_miniters\n self.ascii = ascii\n self.disable = disable\n self.unit = unit\n self.unit_scale = unit_scale\n self.unit_divisor = unit_divisor\n self.initial = initial\n self.lock_args = lock_args\n self.delay = delay\n self.gui = gui\n self.dynamic_ncols = dynamic_ncols\n self.smoothing = smoothing\n self._ema_dn = EMA(smoothing)\n self._ema_dt = EMA(smoothing)\n self._ema_miniters = EMA(smoothing)\n self.bar_format = bar_format\n self.postfix = None\n self.colour = colour\n self._time = time\n if postfix:\n try:\n self.set_postfix(refresh=False, **postfix)\n except TypeError:\n self.postfix = postfix\n\n # Init the iterations counters\n self.last_print_n = initial\n self.n = initial\n\n # if nested, at initial sp() call we replace '\\r' by '\\n' to\n # not overwrite the outer progress bar\n with self._lock:\n # mark fixed positions as negative\n self.pos = self._get_free_pos(self) if position is None else -position\n\n if not gui:\n # Initialize the screen printer\n self.sp = self.status_printer(self.fp)\n if delay <= 0:\n self.refresh(lock_args=self.lock_args)\n\n # Init the time counter\n self.last_print_t = self._time()\n # NB: Avoid race conditions by setting start_t at the very end of init\n self.start_t = self.last_print_t\n\n def __bool__(self):\n if self.total is not None:\n return self.total > 0\n if self.iterable is None:\n raise TypeError('bool() undefined when iterable == total == None')\n return bool(self.iterable)\n\n def __len__(self):\n return (\n self.total if self.iterable is None\n else self.iterable.shape[0] if hasattr(self.iterable, \"shape\")\n else len(self.iterable) if hasattr(self.iterable, \"__len__\")\n else self.iterable.__length_hint__() if hasattr(self.iterable, \"__length_hint__\")\n else getattr(self, \"total\", None))\n\n def __reversed__(self):\n try:\n orig = self.iterable\n except AttributeError:\n raise TypeError(\"'tqdm' object is not reversible\")\n else:\n self.iterable = reversed(self.iterable)\n return self.__iter__()\n finally:\n self.iterable = orig\n\n def __contains__(self, item):\n contains = getattr(self.iterable, '__contains__', None)\n return contains(item) if contains is not None else item in self.__iter__()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n try:\n self.close()\n except AttributeError:\n # maybe eager thread cleanup upon external error\n if (exc_type, exc_value, traceback) == (None, None, None):\n raise\n warn(\"AttributeError ignored\", TqdmWarning, stacklevel=2)\n\n def __del__(self):\n self.close()\n\n def __str__(self):\n return self.format_meter(**self.format_dict)\n\n @property\n def _comparable(self):\n return abs(getattr(self, \"pos\", 1 << 31))\n\n def __hash__(self):\n return id(self)\n\n def __iter__(self):\n \"\"\"Backward-compatibility to use: for x in tqdm(iterable)\"\"\"\n\n # Inlining instance variables as locals (speed optimisation)\n iterable = self.iterable\n\n # If the bar is disabled, then just walk the iterable\n # (note: keep this check outside the loop for performance)\n if self.disable:\n for obj in iterable:\n yield obj\n return\n\n mininterval = self.mininterval\n last_print_t = self.last_print_t\n last_print_n = self.last_print_n\n min_start_t = self.start_t + self.delay\n n = self.n\n time = self._time\n\n try:\n for obj in iterable:\n yield obj\n # Update and possibly print the progressbar.\n # Note: does not call self.update(1) for speed optimisation.\n n += 1\n\n if n - last_print_n >= self.miniters:\n cur_t = time()\n dt = cur_t - last_print_t\n if dt >= mininterval and cur_t >= min_start_t:\n self.update(n - last_print_n)\n last_print_n = self.last_print_n\n last_print_t = self.last_print_t\n finally:\n self.n = n\n self.close()\n\n def update(self, n=1):\n \"\"\"\n Manually update the progress bar, useful for streams\n such as reading files.\n E.g.:\n >>> t = tqdm(total=filesize) # Initialise\n >>> for current_buffer in stream:\n ... ...\n ... t.update(len(current_buffer))\n >>> t.close()\n The last line is highly recommended, but possibly not necessary if\n `t.update()` will be called in such a way that `filesize` will be\n exactly reached and printed.\n\n Parameters\n ----------\n n : int or float, optional\n Increment to add to the internal counter of iterations\n [default: 1]. If using float, consider specifying `{n:.3f}`\n or similar in `bar_format`, or specifying `unit_scale`.\n\n Returns\n -------\n out : bool or None\n True if a `display()` was triggered.\n \"\"\"\n if self.disable:\n return\n\n if n < 0:\n self.last_print_n += n # for auto-refresh logic to work\n self.n += n\n\n # check counter first to reduce calls to time()\n if self.n - self.last_print_n >= self.miniters:\n cur_t = self._time()\n dt = cur_t - self.last_print_t\n if dt >= self.mininterval and cur_t >= self.start_t + self.delay:\n cur_t = self._time()\n dn = self.n - self.last_print_n # >= n\n if self.smoothing and dt and dn:\n # EMA (not just overall average)\n self._ema_dn(dn)\n self._ema_dt(dt)\n self.refresh(lock_args=self.lock_args)\n if self.dynamic_miniters:\n # If no `miniters` was specified, adjust automatically to the\n # maximum iteration rate seen so far between two prints.\n # e.g.: After running `tqdm.update(5)`, subsequent\n # calls to `tqdm.update()` will only cause an update after\n # at least 5 more iterations.\n if self.maxinterval and dt >= self.maxinterval:\n self.miniters = dn * (self.mininterval or self.maxinterval) / dt\n elif self.smoothing:\n # EMA miniters update\n self.miniters = self._ema_miniters(\n dn * (self.mininterval / dt if self.mininterval and dt\n else 1))\n else:\n # max iters between two prints\n self.miniters = max(self.miniters, dn)\n\n # Store old values for next call\n self.last_print_n = self.n\n self.last_print_t = cur_t\n return True\n\n def close(self):\n \"\"\"Cleanup and (if leave=False) close the progressbar.\"\"\"\n if self.disable:\n return\n\n # Prevent multiple closures\n self.disable = True\n\n # decrement instance pos and remove from internal set\n pos = abs(self.pos)\n self._decr_instances(self)\n\n if self.last_print_t < self.start_t + self.delay:\n # haven't ever displayed; nothing to clear\n return\n\n # GUI mode\n if getattr(self, 'sp', None) is None:\n return\n\n # annoyingly, _supports_unicode isn't good enough\n def fp_write(s):\n self.fp.write(str(s))\n\n try:\n fp_write('')\n except ValueError as e:\n if 'closed' in str(e):\n return\n raise # pragma: no cover\n\n leave = pos == 0 if self.leave is None else self.leave\n\n with self._lock:\n if leave:\n # stats for overall rate (no weighted average)\n self._ema_dt = lambda: None\n self.display(pos=0)\n fp_write('\\n')\n else:\n # clear previous display\n if self.display(msg='', pos=pos) and not pos:\n fp_write('\\r')\n\n def clear(self, nolock=False):\n \"\"\"Clear current bar display.\"\"\"\n if self.disable:\n return\n\n if not nolock:\n self._lock.acquire()\n pos = abs(self.pos)\n if pos < (self.nrows or 20):\n self.moveto(pos)\n self.sp('')\n self.fp.write('\\r') # place cursor back at the beginning of line\n self.moveto(-pos)\n if not nolock:\n self._lock.release()\n\n def refresh(self, nolock=False, lock_args=None):\n \"\"\"\n Force refresh the display of this bar.\n\n Parameters\n ----------\n nolock : bool, optional\n If `True`, does not lock.\n If [default: `False`]: calls `acquire()` on internal lock.\n lock_args : tuple, optional\n Passed to internal lock's `acquire()`.\n If specified, will only `display()` if `acquire()` returns `True`.\n \"\"\"\n if self.disable:\n return\n\n if not nolock:\n if lock_args:\n if not self._lock.acquire(*lock_args):\n return False\n else:\n self._lock.acquire()\n self.display()\n if not nolock:\n self._lock.release()\n return True\n\n def unpause(self):\n \"\"\"Restart tqdm timer from last print time.\"\"\"\n if self.disable:\n return\n cur_t = self._time()\n self.start_t += cur_t - self.last_print_t\n self.last_print_t = cur_t\n\n def reset(self, total=None):\n \"\"\"\n Resets to 0 iterations for repeated use.\n\n Consider combining with `leave=True`.\n\n Parameters\n ----------\n total : int or float, optional. Total to use for the new bar.\n \"\"\"\n self.n = 0\n if total is not None:\n self.total = total\n if self.disable:\n return\n self.last_print_n = 0\n self.last_print_t = self.start_t = self._time()\n self._ema_dn = EMA(self.smoothing)\n self._ema_dt = EMA(self.smoothing)\n self._ema_miniters = EMA(self.smoothing)\n self.refresh()\n\n def set_description(self, desc=None, refresh=True):\n \"\"\"\n Set/modify description of the progress bar.\n\n Parameters\n ----------\n desc : str, optional\n refresh : bool, optional\n Forces refresh [default: True].\n \"\"\"\n self.desc = desc + ': ' if desc else ''\n if refresh:\n self.refresh()\n\n def set_description_str(self, desc=None, refresh=True):\n \"\"\"Set/modify description without ': ' appended.\"\"\"\n self.desc = desc or ''\n if refresh:\n self.refresh()\n\n def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):\n \"\"\"\n Set/modify postfix (additional stats)\n with automatic formatting based on datatype.\n\n Parameters\n ----------\n ordered_dict : dict or OrderedDict, optional\n refresh : bool, optional\n Forces refresh [default: True].\n kwargs : dict, optional\n \"\"\"\n # Sort in alphabetical order to be more deterministic\n postfix = OrderedDict([] if ordered_dict is None else ordered_dict)\n for key in sorted(kwargs.keys()):\n postfix[key] = kwargs[key]\n # Preprocess stats according to datatype\n for key in postfix.keys():\n # Number: limit the length of the string\n if isinstance(postfix[key], Number):\n postfix[key] = self.format_num(postfix[key])\n # Else for any other type, try to get the string conversion\n elif not isinstance(postfix[key], str):\n postfix[key] = str(postfix[key])\n # Else if it's a string, don't need to preprocess anything\n # Stitch together to get the final postfix\n self.postfix = ', '.join(key + '=' + postfix[key].strip()\n for key in postfix.keys())\n if refresh:\n self.refresh()\n\n def set_postfix_str(self, s='', refresh=True):\n \"\"\"\n Postfix without dictionary expansion, similar to prefix handling.\n \"\"\"\n self.postfix = str(s)\n if refresh:\n self.refresh()\n\n def moveto(self, n):\n # TODO: private method\n self.fp.write('\\n' * n + _term_move_up() * -n)\n getattr(self.fp, 'flush', lambda: None)()\n\n @property\n def format_dict(self):\n \"\"\"Public API for read-only member access.\"\"\"\n if self.disable and not hasattr(self, 'unit'):\n return defaultdict(lambda: None, {\n 'n': self.n, 'total': self.total, 'elapsed': 0, 'unit': 'it'})\n if self.dynamic_ncols:\n self.ncols, self.nrows = self.dynamic_ncols(self.fp)\n return {\n 'n': self.n, 'total': self.total,\n 'elapsed': self._time() - self.start_t if hasattr(self, 'start_t') else 0,\n 'ncols': self.ncols, 'nrows': self.nrows, 'prefix': self.desc,\n 'ascii': self.ascii, 'unit': self.unit, 'unit_scale': self.unit_scale,\n 'rate': self._ema_dn() / self._ema_dt() if self._ema_dt() else None,\n 'bar_format': self.bar_format, 'postfix': self.postfix,\n 'unit_divisor': self.unit_divisor, 'initial': self.initial,\n 'colour': self.colour}\n\n def display(self, msg=None, pos=None):\n \"\"\"\n Use `self.sp` to display `msg` in the specified `pos`.\n\n Consider overloading this function when inheriting to use e.g.:\n `self.some_frontend(**self.format_dict)` instead of `self.sp`.\n\n Parameters\n ----------\n msg : str, optional. What to display (default: `repr(self)`).\n pos : int, optional. Position to `moveto`\n (default: `abs(self.pos)`).\n \"\"\"\n if pos is None:\n pos = abs(self.pos)\n\n nrows = self.nrows or 20\n if pos >= nrows - 1:\n if pos >= nrows:\n return False\n if msg or msg is None: # override at `nrows - 1`\n msg = \" ... (more hidden) ...\"\n\n if not hasattr(self, \"sp\"):\n raise TqdmDeprecationWarning(\n \"Please use `tqdm.gui.tqdm(...)`\"\n \" instead of `tqdm(..., gui=True)`\\n\",\n fp_write=getattr(self.fp, 'write', sys.stderr.write))\n\n if pos:\n self.moveto(pos)\n self.sp(self.__str__() if msg is None else msg)\n if pos:\n self.moveto(-pos)\n return True\n\n @classmethod\n @contextmanager\n def wrapattr(cls, stream, method, total=None, bytes=True, **tqdm_kwargs):\n \"\"\"\n stream : file-like object.\n method : str, \"read\" or \"write\". The result of `read()` and\n the first argument of `write()` should have a `len()`.\n\n >>> with tqdm.wrapattr(file_obj, \"read\", total=file_obj.size) as fobj:\n ... while True:\n ... chunk = fobj.read(chunk_size)\n ... if not chunk:\n ... break\n \"\"\"\n with cls(total=total, **tqdm_kwargs) as t:\n if bytes:\n t.unit = \"B\"\n t.unit_scale = True\n t.unit_divisor = 1024\n yield CallbackIOWrapper(t.update, stream, method)" } ]
import warnings from .std import TqdmExperimentalWarning from .autonotebook import tqdm as notebook_tqdm from .asyncio import tqdm as asyncio_tqdm from .std import tqdm as std_tqdm
12,691
""" Enables multiple commonly used features. Method resolution order: - `tqdm.autonotebook` without import warnings - `tqdm.asyncio` - `tqdm.std` base class Usage: >>> from tqdm.auto import trange, tqdm >>> for i in trange(10): ... ... """ with warnings.catch_warnings(): warnings.simplefilter("ignore", category=TqdmExperimentalWarning)
""" Enables multiple commonly used features. Method resolution order: - `tqdm.autonotebook` without import warnings - `tqdm.asyncio` - `tqdm.std` base class Usage: >>> from tqdm.auto import trange, tqdm >>> for i in trange(10): ... ... """ with warnings.catch_warnings(): warnings.simplefilter("ignore", category=TqdmExperimentalWarning)
if notebook_tqdm != std_tqdm:
0
2023-12-14 07:43:03+00:00
16k
LkPrtctrd/BSL-V53
Heart/Logic/LogicLaserMessageFactory.py
[ { "identifier": "ClientHelloMessage", "path": "Heart/Packets/Client/Authentification/ClientHelloMessage.py", "snippet": "class ClientHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"Protocol\"] = self.readInt()\n fields[\"KeyVersion\"] = self.readInt()\n fields[\"MajorVersion\"] = self.readInt()\n fields[\"MinorVersion\"] = self.readInt()\n fields[\"Build\"] = self.readInt()\n fields[\"ContentHash\"] = self.readString()\n fields[\"DeviceType\"] = self.readInt()\n fields[\"AppStore\"] = self.readInt()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(20100, fields, cryptoInit)\n\n def getMessageType(self):\n return 10100\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginMessage", "path": "Heart/Packets/Client/Authentification/LoginMessage.py", "snippet": "class LoginMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"AccountID\"] = self.readLong()\n fields[\"PassToken\"] = self.readString()\n fields[\"ClientMajor\"] = self.readInt()\n fields[\"ClientMinor\"] = self.readInt()\n fields[\"ClientBuild\"] = self.readInt()\n fields[\"ResourceSha\"] = self.readString()\n fields[\"Device\"] = self.readString()\n fields[\"PreferredLanguage\"] = self.readDataReference()\n fields[\"PreferredDeviceLanguage\"] = self.readString()\n fields[\"OSVersion\"] = self.readString()\n fields[\"isAndroid\"] = self.readBoolean()\n fields[\"IMEI\"] = self.readString()\n fields[\"AndroidID\"] = self.readString()\n fields[\"isAdvertisingEnabled\"] = self.readBoolean()\n fields[\"AppleIFV\"] = self.readString()\n fields[\"RndKey\"] = self.readInt()\n fields[\"AppStore\"] = self.readVInt()\n fields[\"ClientVersion\"] = self.readString()\n fields[\"TencentOpenId\"] = self.readString()\n fields[\"TencentToken\"] = self.readString()\n fields[\"TencentPlatform\"] = self.readVInt()\n fields[\"DeviceVerifierResponse\"] = self.readString()\n fields[\"AppLicensingSignature\"] = self.readString()\n fields[\"DeviceVerifierResponse\"] = self.readString()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n if fields[\"ClientMajor\"]==53:\n calling_instance.player.ClientVersion = f'{str(fields[\"ClientMajor\"])}.{str(fields[\"ClientBuild\"])}.{str(fields[\"ClientMinor\"])}'\n fields[\"Socket\"] = calling_instance.client\n db_instance = DatabaseHandler()\n if db_instance.playerExist(fields[\"PassToken\"], fields[\"AccountID\"]):\n player_data = json.loads(db_instance.getPlayerEntry(fields[\"AccountID\"])[2])\n db_instance.loadAccount(calling_instance.player, fields[\"AccountID\"])\n else:\n db_instance.createAccount(calling_instance.player.getDataTemplate(fields[\"AccountID\"][0], fields[\"AccountID\"][1], fields[\"PassToken\"]))\n ClientsManager.AddPlayer(calling_instance.player.ID, calling_instance.client)\n Messaging.sendMessage(20104, fields, cryptoInit, calling_instance.player)\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n Messaging.sendMessage(24399, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 10101\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AskForBattleEndMessage", "path": "Heart/Packets/Client/Battle/AskForBattleEndMessage.py", "snippet": "class AskForBattleEndMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"Unk1\"] = self.readVInt()\n fields[\"Result\"] = self.readVInt()\n fields[\"Rank\"] = self.readVInt()\n fields[\"MapID\"] = self.readDataReference()\n fields[\"HeroesCount\"] = self.readVInt()\n fields[\"Heroes\"] = []\n for i in range(fields[\"HeroesCount\"]): fields[\"Heroes\"].append({\"Brawler\": {\"ID\": self.readDataReference(), \"SkinID\": self.readDataReference()}, \"Team\": self.readVInt(), \"IsPlayer\": self.readBoolean(), \"PlayerName\": self.readString()})\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(23456, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14110\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "ChangeAvatarNameMessage", "path": "Heart/Packets/Client/Home/ChangeAvatarNameMessage.py", "snippet": "class ChangeAvatarNameMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeString(fields[\"Name\"])\n self.writeBoolean(fields[\"NameSetByUser\"])\n\n def decode(self):\n fields = {}\n fields[\"Name\"] = self.readString()\n fields[\"NameSetByUser\"] = self.readBoolean()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n db_instance = DatabaseHandler()\n playerData = db_instance.getPlayer(calling_instance.player.ID)\n playerData[\"Name\"] = fields[\"Name\"]\n playerData[\"Registered\"] = True\n db_instance.updatePlayerData(playerData, calling_instance)\n fields[\"Socket\"] = calling_instance.client\n fields[\"Command\"] = {\"ID\": 201}\n Messaging.sendMessage(24111, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 10212\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "EndClientTurnMessage", "path": "Heart/Packets/Client/Home/EndClientTurnMessage.py", "snippet": "class EndClientTurnMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n fields[\"Tick\"] = self.readVInt()\n fields[\"Checksum\"] = self.readVInt()\n fields[\"CommandsCount\"] = self.readVInt()\n super().decode(fields)\n fields[\"Commands\"] = []\n for i in range(fields[\"CommandsCount\"]):\n fields[\"Commands\"].append({\"ID\": self.readVInt()})\n if LogicCommandManager.commandExist(fields[\"Commands\"][i][\"ID\"]):\n command = LogicCommandManager.createCommand(fields[\"Commands\"][i][\"ID\"])\n print(\"Command\", LogicCommandManager.getCommandsName(fields[\"Commands\"][i][\"ID\"]))\n if command is not None:\n fields[\"Commands\"][i][\"Fields\"] = command.decode(self)\n fields[\"Commands\"][i][\"Instance\"] = command\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n for command in fields[\"Commands\"]:\n if \"Instance\" not in command.keys():\n return\n\n if hasattr(command[\"Instance\"], 'execute'):\n command[\"Instance\"].execute(calling_instance, command[\"Fields\"], cryptoInit)\n if command[\"ID\"] == 519:\n Messaging.sendMessage(24104, {\"Socket\": calling_instance.client, \"ServerChecksum\": 0, \"ClientChecksum\": 0, \"Tick\": 0}, cryptoInit)\n\n def getMessageType(self):\n return 14102\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GoHomeFromOfflinePractiseMessage", "path": "Heart/Packets/Client/Home/GoHomeFromOfflinePractiseMessage.py", "snippet": "class GoHomeFromOfflinePractiseMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14109\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GoHomeMessage", "path": "Heart/Packets/Client/Home/GoHomeMessage.py", "snippet": "class GoHomeMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 17750\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GetPlayerProfileMessage", "path": "Heart/Packets/Client/Home/GetPlayerProfileMessage.py", "snippet": "class GetPlayerProfileMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"BattleInfoBoolean\"] = self.readBoolean()\n if fields[\"BattleInfoBoolean\"]:\n fields[\"unk1\"] = self.readVInt()\n fields[\"AnotherID\"] = self.readLong()\n fields[\"unk2\"] = self.readVInt()\n for i in self.readVInt():\n fields[\"CsvID\"] = self.readDataReference()\n fields[\"unk3\"] = self.readVInt()\n fields[\"unk4\"] = self.readVInt()\n fields[\"unk5\"] = self.readVInt()\n fields[\"unk6\"] = self.readVInt()\n fields[\"PlayerName\"] = self.readString()\n fields[\"unk7\"] = self.readVInt()\n fields[\"Thumbnail\"] = self.readVInt()\n fields[\"NameColor\"] = self.readVInt()\n fields[\"unk10\"] = self.readVInt()\n fields[\"unk11\"] = self.readVInt()\n fields[\"PlayerHighID\"] = self.readInt()\n fields[\"PlayerLowID\"] = self.readInt()\n super().decode(fields)\n\n\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24113, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 15081\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AskForAllianceDataMessage", "path": "Heart/Packets/Client/Home/AskForAllianceDataMessage.py", "snippet": "class AskForAllianceDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"id\"] = self.readVLong()\n fields[\"isInAlliance\"] = self.readBoolean()\n if fields[\"isInAlliance\"] == True:\n fields[\"anotherIDHigh\"] = self.readVInt()\n fields[\"anotherIDLow\"] = self.readVInt()\n super().decode(fields)\n\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24301, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14302\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "KeepAliveMessage", "path": "Heart/Packets/Client/Socket/KeepAliveMessage.py", "snippet": "class KeepAliveMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(20108, fields, cryptoInit)\n\n def getMessageType(self):\n return 10108\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginFailedMessage", "path": "Heart/Packets/Server/Authentification/LoginFailedMessage.py", "snippet": "class LoginFailedMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeInt(fields['ErrorID'])\n self.writeString(fields['FingerprintData'])\n self.writeString()\n self.writeString(fields['ContentURL'])\n self.writeString()\n self.writeString(fields['Message'])\n self.writeInt(0)\n self.writeBoolean(False)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeString()\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeBoolean(True)\n self.writeString()\n self.writeVInt(0)\n self.writeString()\n self.writeBoolean(False)\n\n def decode(self):\n fields = {}\n fields[\"ErrorCode\"] = self.readInt()\n fields[\"ResourceFingerprintData\"] = self.readString()\n fields[\"RedirectDomain\"] = self.readString()\n fields[\"ContentURL\"] = self.readString()\n fields[\"UpdateURL\"] = self.readString()\n fields[\"Reason\"] = self.readString()\n fields[\"SecondsUntilMaintenanceEnd\"] = self.readInt()\n fields[\"ShowContactSupportForBan\"] = self.readBoolean()\n fields[\"CompressedFingerprintData\"] = self.readBytesWithoutLength()\n fields[\"ContentURLListCount\"] = self.readInt()\n fields[\"ContentURLList\"] = []\n for i in range(fields[\"ContentURLListCount\"]):\n fields[\"ContentURLList\"].append(self.readString())\n fields[\"KunlunAppStore\"] = self.readInt()\n fields[\"MaintenanceType\"] = self.readInt()\n fields[\"HelpshiftFaqId\"] = self.readString()\n fields[\"Tier\"] = self.readInt()\n fields[\"Unk1\"] = self.readBoolean()\n fields[\"Unk2\"] = self.readBoolean()\n fields[\"Unk3\"] = self.readString()\n fields[\"Unk4\"] = self.readVInt()\n fields[\"Unk5\"] = self.readString()\n fields[\"OptionalTargetedAccountIdState\"] = self.readBoolean()\n if fields[\"OptionalTargetedAccountIdState\"] == True:\n fields[\"OptionalTargetedAccountId\"] = self.readLong()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20103\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginOkMessage", "path": "Heart/Packets/Server/Authentification/LoginOkMessage.py", "snippet": "class LoginOkMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 1\n\n def encode(self, fields, player):\n self.writeLong(player.ID[0], player.ID[1])\n self.writeLong(player.ID[0], player.ID[1])\n self.writeString(player.Token)\n self.writeString()\n self.writeString()\n self.writeInt(53)\n self.writeInt(176)\n self.writeInt(1)\n self.writeString(\"dev\")\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeString()\n self.writeString()\n self.writeString()\n self.writeInt(0)\n self.writeString()\n self.writeString(\"RU\")\n self.writeString()\n self.writeInt(0)\n self.writeString()\n self.writeInt(2)\n self.writeString('https://game-assets.brawlstarsgame.com')\n self.writeString('http://a678dbc1c015a893c9fd-4e8cc3b1ad3a3c940c504815caefa967.r87.cf2.rackcdn.com')\n self.writeInt(2)\n self.writeString('https://event-assets.brawlstars.com')\n self.writeString('https://24b999e6da07674e22b0-8209975788a0f2469e68e84405ae4fcf.ssl.cf2.rackcdn.com/event-assets')\n self.writeVInt(0)\n self.writeCompressedString(b'')\n self.writeBoolean(True)\n self.writeBoolean(False)\n self.writeString()\n self.writeString()\n self.writeString()\n self.writeString('https://play.google.com/store/apps/details?id=com.supercell.brawlstars')\n self.writeString()\n self.writeBoolean(False)\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n\n def decode(self):\n fields = {}\n fields[\"AccountID\"] = self.readLong()\n fields[\"HomeID\"] = self.readLong()\n fields[\"PassToken\"] = self.readString()\n fields[\"FacebookID\"] = self.readString()\n fields[\"GamecenterID\"] = self.readString()\n fields[\"ServerMajorVersion\"] = self.readInt()\n fields[\"ContentVersion\"] = self.readInt()\n fields[\"ServerBuild\"] = self.readInt()\n fields[\"ServerEnvironment\"] = self.readString()\n fields[\"SessionCount\"] = self.readInt()\n fields[\"PlayTimeSeconds\"] = self.readInt()\n fields[\"DaysSinceStartedPlaying\"] = self.readInt()\n fields[\"FacebookAppID\"] = self.readString()\n fields[\"ServerTime\"] = self.readString()\n fields[\"AccountCreatedDate\"] = self.readString()\n fields[\"StartupCooldownSeconds\"] = self.readInt()\n fields[\"GoogleServiceID\"] = self.readString()\n fields[\"LoginCountry\"] = self.readString()\n fields[\"KunlunID\"] = self.readString()\n fields[\"Tier\"] = self.readInt()\n fields[\"TencentID\"] = self.readString()\n\n ContentUrlCount = self.readInt()\n fields[\"GameAssetsUrls\"] = []\n for i in range(ContentUrlCount):\n fields[\"GameAssetsUrls\"].append(self.readString())\n\n EventUrlCount = self.readInt()\n fields[\"EventAssetsUrls\"] = []\n for i in range(EventUrlCount):\n fields[\"EventAssetsUrls\"].append(self.readString())\n\n fields[\"SecondsUntilAccountDeletion\"] = self.readVInt()\n fields[\"SupercellIDToken\"] = self.readCompressedString()\n fields[\"IsSupercellIDLogoutAllDevicesAllowed\"] = self.readBoolean()\n fields[\"isSupercellIDEligible\"] = self.readBoolean()\n fields[\"LineID\"] = self.readString()\n fields[\"SessionID\"] = self.readString()\n fields[\"KakaoID\"] = self.readString()\n fields[\"UpdateURL\"] = self.readString()\n fields[\"YoozooPayNotifyUrl\"] = self.readString()\n fields[\"UnbotifyEnabled\"] = self.readBoolean()\n\n Unknown1 = self.readBoolean()\n fields[\"Unknown1\"] = Unknown1\n if Unknown1:\n fields[\"Unknown2\"] = self.readString()\n\n Unknown3 = self.readBoolean()\n fields[\"Unknown3\"] = Unknown1\n if Unknown3:\n fields[\"Unknown4\"] = self.readString()\n\n Unknown5 = self.readBoolean()\n fields[\"Unknown5\"] = Unknown1\n if Unknown5:\n fields[\"Unknown6\"] = self.readString()\n\n Unknown7 = self.readBoolean()\n fields[\"Unknown7\"] = Unknown1\n if Unknown7:\n fields[\"Unknown8\"] = self.readString()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20104\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "OutOfSyncMessage", "path": "Heart/Packets/Server/Authentification/OutOfSyncMessage.py", "snippet": "class OutOfSyncMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeVInt(fields[\"ServerChecksum\"])\n self.writeVInt(fields[\"ClientChecksum\"])\n self.writeVInt(fields[\"Tick\"])\n\n def decode(self):\n fields = {}\n fields[\"ServerChecksum\"] = self.readVInt()\n fields[\"ClientChecksum\"] = self.readVInt()\n fields[\"Tick\"] = self.readVInt()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24104\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "ServerHelloMessage", "path": "Heart/Packets/Server/Authentification/ServerHelloMessage.py", "snippet": "class ServerHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeBytes(urandom(24), 24)\n\n def decode(self):\n fields = {}\n fields[\"Random\"] = self.readBytesWithoutLength()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20100\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "BattleEndMessage", "path": "Heart/Packets/Server/Battle/BattleEndMessage.py", "snippet": "class BattleEndMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeLong(0, 0) # Battle UUID High\n self.writeLong(0, 0) # Battle UUID Low\n self.writeVInt(2) # Battle End Game Mode (gametype)\n self.writeVInt(fields[\"Rank\"]) # Result (Victory/Defeat/Draw/Rank Score)\n self.writeVInt(0) # Tokens Gained (Gained Keys)\n self.writeVInt(0) # Trophies Result (Metascore change)\n self.writeVInt(0) # Power Play Points Gained (Pro League Points)\n self.writeVInt(0) # Doubled Tokens (Double Keys)\n self.writeVInt(0) # Double Token Event (Double Event Keys)\n self.writeVInt(0) # Token Doubler Remaining (Double Keys Remaining)\n self.writeVInt(0) # game Lenght In Seconds\n self.writeVInt(0) # Epic Win Power Play Points Gained (op Win Points)\n self.writeVInt(0) # Championship Level Reached (CC Wins)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(True)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeBoolean(False)\n\n self.writeVInt(fields[\"HeroesCount\"])\n for heroEntry in fields[\"Heroes\"]:\n self.writeBoolean(heroEntry[\"IsPlayer\"])\n self.writeBoolean(bool(heroEntry[\"Team\"]))\n self.writeBoolean(bool(heroEntry[\"Team\"]))\n self.writeByte(1)\n for i in range(1):\n self.writeDataReference(heroEntry[\"Brawler\"][\"ID\"][0], heroEntry[\"Brawler\"][\"ID\"][1])\n self.writeByte(1)\n for i in range(1):\n if (heroEntry[\"Brawler\"][\"SkinID\"] is None):\n self.writeVInt(0)\n else:\n self.writeDataReference(heroEntry[\"Brawler\"][\"SkinID\"][0], heroEntry[\"Brawler\"][\"SkinID\"][1])\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(1250)\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(11)\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(0)\n\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeBoolean(heroEntry[\"IsPlayer\"])\n if heroEntry[\"IsPlayer\"]:\n self.writeLong(player.ID[0], player.ID[1])\n self.writeString(heroEntry[\"PlayerName\"])\n self.writeVInt(100)\n self.writeVInt(28000000)\n self.writeVInt(43000000)\n self.writeVInt(-2)\n if heroEntry[\"IsPlayer\"]:\n self.writeBoolean(True)\n self.writeVLong(5, 4181497)\n self.writeString('haccer club')\n self.writeDataReference(8, 16)\n else:\n self.writeBoolean(False)\n\n self.writeInt8(1)\n self.writeVInt(5978)\n self.writeInt8(1)\n self.writeVInt(0)\n\n self.writeInt16(5)\n self.writeInt16(3)\n self.writeInt(27328)\n self.writeInt(25659)\n\n self.writeDataReference(0)\n\n self.writeVInt(0)\n self.writeVInt(1)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n\n def decode(self):\n fields = {}\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 23456\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AvailableServerCommandMessage", "path": "Heart/Packets/Server/Home/AvailableServerCommandMessage.py", "snippet": "class AvailableServerCommandMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(fields[\"Command\"][\"ID\"])\n command = LogicCommandManager.createCommand(fields[\"Command\"][\"ID\"], self.messagePayload)\n self.messagePayload = command.encode(fields)\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24111\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LobbyInfoMessage", "path": "Heart/Packets/Server/Home/LobbyInfoMessage.py", "snippet": "class LobbyInfoMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(ClientsManager.GetCount())\n self.writeString(f\"\"\"Version: {player.ClientVersion}\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\"\"\")\n self.writeVInt(0) # count event\n self.writeVInt(0) # new timer in v51\n\n def decode(self):\n fields = {}\n fields[\"PlayerCount\"] = self.readVInt()\n fields[\"Text\"] = self.readString()\n fields[\"Unk1\"] = self.readVInt()\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 23457\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "OwnHomeDataMessage", "path": "Heart/Packets/Server/Home/OwnHomeDataMessage.py", "snippet": "class OwnHomeDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(1688816070)\n self.writeVInt(1191532375)\n self.writeVInt(2023189)\n self.writeVInt(73530)\n\n self.writeVInt(player.Trophies)\n self.writeVInt(player.HighestTrophies)\n self.writeVInt(player.HighestTrophies) \n self.writeVInt(player.TrophyRoadTier)\n self.writeVInt(player.Experience)\n self.writeDataReference(28, player.Thumbnail)\n self.writeDataReference(43, player.Namecolor)\n\n self.writeVInt(26)\n for x in range(26):\n self.writeVInt(x)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n \n self.writeVInt(len(player.OwnedSkins))\n for x in player.OwnedSkins:\n self.writeDataReference(29, x)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n self.writeVInt(player.HighestTrophies)\n self.writeVInt(0)\n self.writeVInt(2)\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeVInt(115)\n self.writeVInt(335442)\n self.writeVInt(1001442)\n self.writeVInt(5778642) \n\n self.writeVInt(120)\n self.writeVInt(200)\n self.writeVInt(0)\n\n self.writeBoolean(True)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(1) # Shop Offers\n\n self.writeVInt(1) # RewardCount\n\n self.writeVInt(38) # ItemType\n self.writeVInt(1337) # Amount\n self.writeDataReference(0) # CsvID\n self.writeVInt(0) # SkinID\n\n self.writeVInt(0) # Currency(0-Gems, 1-Gold, 3-StarpoInts)\n self.writeVInt(0) # Cost\n self.writeVInt(0) # Time\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # Daily Offer\n self.writeVInt(0) # Old price\n self.writeString('Offer') # Text\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeString(\"offer_bgr_xmas23\") # Background\n self.writeVInt(0)\n self.writeBoolean(False) # This purchase is already being processed\n self.writeVInt(0) # Type Benefit\n self.writeVInt(0) # Benefit\n self.writeString()\n self.writeBoolean(False) # One time offer\n self.writeBoolean(False) # Claimed\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n \n self.writeVInt(20)\n self.writeVInt(1428)\n\n self.writeVInt(0)\n\n self.writeVInt(1)\n self.writeVInt(30)\n\n self.writeByte(1) # count brawlers selected\n self.writeDataReference(16, player.SelectedBrawlers[0]) # selected brawler\n self.writeString(player.Region) # location\n self.writeString(player.ContentCreator) # supported creator\n\n self.writeVInt(6) \n self.writeVInt(1) \n self.writeVInt(9) \n self.writeVInt(1) \n self.writeVInt(22) \n self.writeVInt(3) \n self.writeVInt(25) \n self.writeVInt(1) \n self.writeVInt(24) \n self.writeVInt(0)\n self.writeVInt(15)\n self.writeVInt(32447)\n self.writeVInt(28)\n\n\n self.writeVInt(0)\n\n self.writeVInt(1)\n for season in range(1):\n self.writeVInt(22-1)\n self.writeVInt(40000)\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n\n self.writeVInt(0)\n\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(0) \n\n self.writeBoolean(True) # Vanity items\n self.writeVInt(len(player.OwnedThumbnails)+len(player.OwnedPins))\n for x in player.OwnedThumbnails:\n self.writeVInt(28)\n self.writeVInt(x)\n self.writeVInt(0)\n for x in player.OwnedPins:\n self.writeVInt(52)\n self.writeVInt(x)\n self.writeVInt(0)\n\n\n self.writeBoolean(False) # Power league season data\n\n self.writeInt(0)\n self.writeVInt(0)\n self.writeVInt(16)\n self.writeVInt(76)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(2023189)\n\n self.writeVInt(35) # event slot id\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(3)\n self.writeVInt(4)\n self.writeVInt(5)\n self.writeVInt(6)\n self.writeVInt(7)\n self.writeVInt(8)\n self.writeVInt(9)\n self.writeVInt(10)\n self.writeVInt(11)\n self.writeVInt(12)\n self.writeVInt(13) \n self.writeVInt(14)\n self.writeVInt(15)\n self.writeVInt(16)\n self.writeVInt(17)\n self.writeVInt(18) \n self.writeVInt(19)\n self.writeVInt(20)\n self.writeVInt(21) \n self.writeVInt(22)\n self.writeVInt(23)\n self.writeVInt(24)\n self.writeVInt(25)\n self.writeVInt(26)\n self.writeVInt(27)\n self.writeVInt(28)\n self.writeVInt(29)\n self.writeVInt(30)\n self.writeVInt(31)\n self.writeVInt(32)\n self.writeVInt(33)\n self.writeVInt(34)\n self.writeVInt(35)\n\n self.writeVInt(1)\n\n self.writeVInt(4)\n self.writeVInt(7)\n self.writeVInt(1)\n self.writeVInt(0)\n self.writeVInt(72292)\n self.writeVInt(10) \n self.writeDataReference(15, 21) # map id\n self.writeVInt(-1)\n self.writeVInt(2)\n self.writeString(\"\")\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # MapMaker map structure array\n self.writeVInt(0)\n self.writeBoolean(False) # Power League array entry\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeVInt(0) \n self.writeVInt(0) \n self.writeVInt(0) \n self.writeBoolean(False) \n\n self.writeVInt(0)\n \n ByteStreamHelper.encodeIntList(self, [20, 35, 75, 140, 290, 480, 800, 1250, 1875, 2800])\n ByteStreamHelper.encodeIntList(self, [30, 80, 170, 360]) # Shop Coins Price\n ByteStreamHelper.encodeIntList(self, [300, 880, 2040, 4680]) # Shop Coins Amount\n\n self.writeVInt(0) \n\n self.writeVInt(1)\n self.writeVInt(41000086) # theme\n self.writeVInt(1)\n\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(-1)\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(4)\n\n ByteStreamHelper.encodeIntList(self, [0, 29, 79, 169, 349, 699])\n ByteStreamHelper.encodeIntList(self, [0, 160, 450, 500, 1250, 2500])\n\n self.writeLong(0, 1) # Player ID\n\n self.writeVInt(0) # Notification factory\n \n self.writeVInt(1)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeBoolean(False) # Login Calendar\n self.writeVInt(0)\n self.writeBoolean(True) # Starr Road\n for i in range(7):\n self.writeVInt(0)\n\n self.writeVInt(0) # Mastery\n\n #BattleCard\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n\n self.writeVInt(0) #Brawler's BattleCards\n\n self.writeVInt(5)\n for i in range(5):\n self.writeDataReference(80, i)\n self.writeVInt(-1)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeInt(0)\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeVInt(86400*24)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeBoolean(False)\n\n # end LogicClientHome\n\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeStringReference(player.Name)\n self.writeBoolean(player.Registered)\n self.writeInt(-1)\n\n self.writeVInt(17)\n unlocked_brawler = [i['CardID'] for x,i in player.OwnedBrawlers.items()]\n self.writeVInt(len(unlocked_brawler) + 2)\n for x in unlocked_brawler:\n self.writeDataReference(23, x)\n self.writeVInt(-1)\n self.writeVInt(1)\n\n self.writeDataReference(5, 8)\n self.writeVInt(-1)\n self.writeVInt(player.Coins)\n\n self.writeDataReference(5, 23)\n self.writeVInt(-1)\n self.writeVInt(player.Blings)\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroScore\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"Trophies\"])\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroHighScore\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"HighestTrophies\"])\n\n self.writeVInt(0) # Array\n\n self.writeVInt(0) # HeroPower\n \n self.writeVInt(len(player.OwnedBrawlers)) # HeroLevel\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"PowerLevel\"]-1)\n\n self.writeVInt(0) # hero star power gadget and hypercharge\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroSeenState\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(2)\n\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n\n self.writeVInt(player.Gems) # Diamonds\n self.writeVInt(player.Gems) # Free Diamonds\n self.writeVInt(10) # Player Level\n self.writeVInt(100)\n self.writeVInt(0) # CumulativePurchasedDiamonds or Avatar User Level Tier | 10000 < Level Tier = 3 | 1000 < Level Tier = 2 | 0 < Level Tier = 1\n self.writeVInt(100) # Battle Count\n self.writeVInt(10) # WinCount\n self.writeVInt(80) # LoseCount\n self.writeVInt(50) # WinLooseStreak\n self.writeVInt(20) # NpcWinCount\n self.writeVInt(0) # NpcLoseCount\n self.writeVInt(2) # TutorialState | shouldGoToFirstTutorialBattle = State == 0\n self.writeVInt(12)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeString()\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(1)\n\n def decode(self):\n fields = {}\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24101\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "KeepAliveServerMessage", "path": "Heart/Packets/Server/Socket/KeepAliveServerMessage.py", "snippet": "class KeepAliveServerMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20108\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "PlayerProfileMessage", "path": "Heart/Packets/Server/Home/PlayerProfileMessage.py", "snippet": "class PlayerProfileMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVLong(fields[\"PlayerHighID\"], fields[\"PlayerLowID\"])\n self.writeDataReference(16,11) # \n self.writeVInt(70)\n for i in range(70):\n self.writeDataReference(16, i)\n self.writeDataReference(0)\n self.writeVInt(500) # trophies\n self.writeVInt(1250) # highestTrophies\n self.writeVInt(11) #power level\n \n self.writeVInt(18)\n\n self.writeVInt(1) \n self.writeVInt(1) # 3v3 victories\n\n self.writeVInt(2)\n self.writeVInt(528859) # total exp\n\n self.writeVInt(3)\n self.writeVInt(3) # current trophies\n\n self.writeVInt(4)\n self.writeVInt(4) # highest trophies\n\n self.writeVInt(5) \n self.writeVInt(5) # unlocked brawler?\n\n self.writeVInt(8)\n self.writeVInt(6) # solo victories\n\n self.writeVInt(11) \n self.writeVInt(7) # duo victories\n\n self.writeVInt(9) \n self.writeVInt(8) # highest level robo rumble\n\n self.writeVInt(12) \n self.writeVInt(9) # highest level boss fight\n\n self.writeVInt(13)\n self.writeVInt(10) # highest power league points\n\n self.writeVInt(14)\n self.writeVInt(11) # some power league stuff\n\n self.writeVInt(15)\n self.writeVInt(12) # most challenge win\n\n self.writeVInt(16) #highest level city rampage\n self.writeVInt(13)\n\n self.writeVInt(18) #highest solo power league rank\n self.writeVInt(14)\n\n self.writeVInt(17) #highest team power league rank\n self.writeVInt(15)\n\n self.writeVInt(19) # highest Club league rank\n self.writeVInt(16)\n\n self.writeVInt(20) # number fame\n self.writeVInt(1000)\n\n self.writeVInt(21)\n self.writeVInt(502052) #v50\n\n self.writeString(player.Name) #PlayerInfo\n self.writeVInt(100)\n self.writeVInt(28000000 + player.Thumbnail)\n self.writeVInt(43000000 + player.Namecolor)\n self.writeVInt(14)\n\n self.writeBoolean(True)\n self.writeVInt(300)\n\n self.writeString(\"hello world\")\n self.writeVInt(100)\n self.writeVInt(200)\n self.writeDataReference(29, 558)\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeDataReference(0)\n\n self.writeBoolean(True) #alliance\n self.writeLong(0,1) #alliance ID\n self.writeString(\"haccers\") #alliance name\n self.writeDataReference(8,1) # alliance icon\n self.writeVInt(1) # type\n self.writeVInt(1) # member count\n self.writeVInt(10000) # total trophies\n self.writeVInt(1) # minimum trophies to enter\n self.writeDataReference(0)\n self.writeString(\"RU\") #location\n self.writeVInt(4) # unknown\n self.writeBoolean(True) #is Family friendly\n self.writeVInt(0)\n \n\n self.writeDataReference(25, 1) #alliance role\n self.writeVInt(16)\n\n def decode(self):\n pass\n # fields = {}\n # fields[\"PlayerCount\"] = self.readVInt()\n # fields[\"Text\"] = self.readString()\n # fields[\"Unk1\"] = self.readVInt()\n # super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24113\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "MyAllianceMessage", "path": "Heart/Packets/Server/Home/MyAllianceMessage.py", "snippet": "class MyAllianceMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(1) # Online people in alliance\n self.writeBoolean(True) # isInAlliance\n self.writeDataReference(25, 4)\n self.writeLong(0, 1) # alliance ID\n self.writeString(player.ContentCreator) # alliance name\n self.writeDataReference(8, 37) # alliance icon\n self.writeVInt(3) # type\n self.writeVInt(1) # member count\n self.writeVInt(9500) # total trophies\n self.writeVInt(1) # minimum trophies to enter\n self.writeVInt(0) # 0\n self.writeString('RU') # location\n self.writeVInt(3) # unknown\n self.writeBoolean(True) # isFamilyFriendly\n self.writeVInt(0)\n\n def decode(self):\n fields = {}\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24399\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AllianceDataMessage", "path": "Heart/Packets/Server/Home/AllianceDataMessage.py", "snippet": "class AllianceDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeBoolean(True)\n\n self.writeLong(0, 1) # alliance ID\n self.writeString(player.ContentCreator) # alliance name\n self.writeDataReference(8, 37) # alliance icon\n self.writeVInt(1) # type\n self.writeVInt(1) # member count\n self.writeVInt(player.Trophies) # total trophies\n self.writeVInt(0) # minimum trophies to enter\n self.writeVInt(0) # 0\n self.writeString('RU') # location\n self.writeVInt(1) # people online\n self.writeBoolean(True) # isFamilyFriendly\n self.writeVInt(0)\n\n self.writeString(\"this is the hacciest club in the world\")\n\n self.writeVInt(1) # member count\n self.writeLong(player.ID[0], player.ID[1]) # player ID\n self.writeVInt(2) # role\n self.writeVInt(player.Trophies) # trophies\n self.writeVInt(0) # status: 0=offline 2=online\n self.writeVInt(1) # last connected time seconds ?\n highestPowerLeagueRank = 2\n self.writeVInt(highestPowerLeagueRank)\n if highestPowerLeagueRank != 0:\n self.writeVInt(2) #solo\n self.writeVInt(1) #duo\n self.writeBoolean(False) # boolean always false?\n\n self.writeString(player.Name) # player name\n self.writeVInt(100) # VInt always 100\n self.writeVInt(28000000 + player.Thumbnail) # thumbnail\n self.writeVInt(43000000 + player.Namecolor) # name color\n self.writeVInt(46000000 + player.Namecolor)\n\n self.writeVInt(-1) # most people have it -1 but some with something\n self.writeBoolean(False) # whats this ? only 2/30 people have it true in my club\n week = 58 # week 58 of club league as of 2023/07/05, this number is 0 if you just arrived in the club\n self.writeVInt(week)\n if week != 0: # club league week number?\n self.writeVInt(3) # day\n self.writeVInt(18) # total club trophies earned\n self.writeVInt(0) # event day club trophies earned\n self.writeVInt(8) # total tickets used\n self.writeVInt(0) # event day tickets used\n self.writeVInt(6) # event day max tickets\n self.writeVInt(6) # event day tickets left\n self.writeVInt(0) # event day player ranking\n self.writeBoolean(True) # everyone have it to true\n self.writeVInt(200) # player experience lvl but why tf it doesn't show for some people\n\n def decode(self):\n fields = {}\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24301\n\n def getMessageVersion(self):\n return self.messageVersion" } ]
from Heart.Packets.Client.Authentification.ClientHelloMessage import ClientHelloMessage from Heart.Packets.Client.Authentification.LoginMessage import LoginMessage from Heart.Packets.Client.Battle.AskForBattleEndMessage import AskForBattleEndMessage from Heart.Packets.Client.Home.ChangeAvatarNameMessage import ChangeAvatarNameMessage from Heart.Packets.Client.Home.EndClientTurnMessage import EndClientTurnMessage from Heart.Packets.Client.Home.GoHomeFromOfflinePractiseMessage import GoHomeFromOfflinePractiseMessage from Heart.Packets.Client.Home.GoHomeMessage import GoHomeMessage from Heart.Packets.Client.Home.GetPlayerProfileMessage import GetPlayerProfileMessage from Heart.Packets.Client.Home.AskForAllianceDataMessage import AskForAllianceDataMessage from Heart.Packets.Client.Socket.KeepAliveMessage import KeepAliveMessage from Heart.Packets.Server.Authentification.LoginFailedMessage import LoginFailedMessage from Heart.Packets.Server.Authentification.LoginOkMessage import LoginOkMessage from Heart.Packets.Server.Authentification.OutOfSyncMessage import OutOfSyncMessage from Heart.Packets.Server.Authentification.ServerHelloMessage import ServerHelloMessage from Heart.Packets.Server.Battle.BattleEndMessage import BattleEndMessage from Heart.Packets.Server.Home.AvailableServerCommandMessage import AvailableServerCommandMessage from Heart.Packets.Server.Home.LobbyInfoMessage import LobbyInfoMessage from Heart.Packets.Server.Home.OwnHomeDataMessage import OwnHomeDataMessage from Heart.Packets.Server.Socket.KeepAliveServerMessage import KeepAliveServerMessage from Heart.Packets.Server.Home.PlayerProfileMessage import PlayerProfileMessage from Heart.Packets.Server.Home.MyAllianceMessage import MyAllianceMessage from Heart.Packets.Server.Home.AllianceDataMessage import AllianceDataMessage
13,986
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage', 10100: ClientHelloMessage, 10101: LoginMessage, 10102: 'LoginUsingSessionMessage', 10103: 'CreateAccountMessage', 10107: 'ClientCapabilitiesMessage',
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage', 10100: ClientHelloMessage, 10101: LoginMessage, 10102: 'LoginUsingSessionMessage', 10103: 'CreateAccountMessage', 10107: 'ClientCapabilitiesMessage',
10108: KeepAliveMessage,
9
2023-12-14 18:57:56+00:00
16k
pan-x-c/EE-LLM
megatron/core/models/gpt/gpt_layer_specs.py
[ { "identifier": "get_bias_dropout_add", "path": "megatron/core/fusions/fused_bias_dropout.py", "snippet": "def get_bias_dropout_add(training, fused):\n if fused:\n # jit scripting for a nn.module (with dropout) is not\n # triggering the fusion kernel. For now, we use two\n # different nn.functional routines to account for varying\n # dropout semantics during training and inference phases.\n if training:\n return bias_dropout_add_fused_train\n else:\n return bias_dropout_add_fused_inference\n else:\n return bias_dropout_add_unfused(training)" }, { "identifier": "FusedLayerNorm", "path": "megatron/core/fusions/fused_layer_norm.py", "snippet": "class FusedLayerNorm(torch.nn.Module):\n def __init__(\n self,\n hidden_size,\n eps=1e-5,\n persist_layer_norm=True,\n sequence_parallel=False,\n zero_centered_gamma=False,\n normalization=\"LayerNorm\",\n ):\n super().__init__()\n\n self.zero_centered_gamma = zero_centered_gamma\n self.normalization = normalization\n assert normalization == \"LayerNorm\", '({}) is not supported in ' 'FusedLayerNorm'.format(\n normalization\n )\n\n # List of hiddens sizes supported in the persistent layer norm kernel\n # If the hidden size is not supported, fall back to the non-persistent\n # kernel.\n persist_ln_hidden_sizes = [\n 1024,\n 1536,\n 2048,\n 2304,\n 3072,\n 3840,\n 4096,\n 5120,\n 6144,\n 8192,\n 10240,\n 12288,\n 12800,\n 15360,\n 16384,\n 18432,\n 20480,\n 24576,\n 25600,\n 30720,\n 32768,\n 40960,\n 49152,\n 65536,\n ]\n if hidden_size not in persist_ln_hidden_sizes or not HAVE_PERSIST_LAYER_NORM:\n persist_layer_norm = False\n\n if not persist_layer_norm and not HAVE_FUSED_LAYER_NORM:\n # TODO: Add pytorch only layer norm\n raise ValueError(f'Apex must currently be installed to use megatron core.')\n\n if isinstance(hidden_size, numbers.Integral):\n hidden_size = (hidden_size,)\n self.hidden_size = torch.Size(hidden_size)\n self.eps = eps\n self.weight = Parameter(torch.Tensor(*hidden_size))\n self.bias = Parameter(torch.Tensor(*hidden_size))\n self.reset_parameters()\n self.persist_layer_norm = persist_layer_norm\n self.sequence_parallel = sequence_parallel\n\n # set sequence parallelism flag on weight and bias parameters\n setattr(self.weight, 'sequence_parallel', self.sequence_parallel)\n setattr(self.bias, 'sequence_parallel', self.sequence_parallel)\n\n def reset_parameters(self):\n\n if self.zero_centered_gamma:\n init.zeros_(self.weight)\n init.zeros_(self.bias)\n else:\n init.ones_(self.weight)\n init.zeros_(self.bias)\n\n def forward(self, input):\n\n weight = self.weight + 1 if self.zero_centered_gamma else self.weight\n\n if self.persist_layer_norm:\n output = FastLayerNormFN.apply(input, weight, self.bias, self.eps)\n\n # Apex's fast layer norm function outputs a 'view' tensor (i.e., has\n # a populated '_base' field). This will result in schedule.py's\n # deallocate_output_tensor() throwing an error, so a viewless tensor is\n # created to prevent this.\n output = make_viewless_tensor(\n inp=output, requires_grad=input.requires_grad, keep_graph=True\n )\n\n else:\n output = FusedLayerNormAffineFunction.apply(\n input, weight, self.bias, self.hidden_size, self.eps\n )\n\n return output" }, { "identifier": "ColumnParallelLinear", "path": "megatron/core/tensor_parallel/layers.py", "snippet": "class ColumnParallelLinear(torch.nn.Module):\n \"\"\"Linear layer with column parallelism.\n\n The linear layer is defined as Y = XA + b. A is parallelized along\n its second dimension as A = [A_1, ..., A_p].\n\n Arguments:\n input_size: first dimension of matrix A.\n output_size: second dimension of matrix A.\n\n Keyword Arguments\n bias: If true, add bias\n gather_output: If true, call all-gather on output and make Y available\n to all GPUs, otherwise, every GPU will have its output\n which is Y_i = XA_i\n init_method: method to initialize weights. Note that bias is always set\n to zero.\n stride: For the strided linear layers.\n keep_master_weight_for_test: This was added for testing and should be\n set to False. It returns the master weights\n used for initialization.\n skip_bias_add: If True, do not add the bias term, instead\n return it to be added by the caller. This\n enables performance optimations where bias can\n be fused with other elementwise operations.\n skip_weight_param_allocation: If True, weight parameter is not allocated and must be passed\n as a keyword argument `weight` during the forward pass. Note\n that this does not affect bias, which will be allocated if\n bias is True. Defaults to False.\n is_expert: If True, the layer is treated as an MoE expert layer.\n config: ModelParallelConfig object\n\n \"\"\"\n\n def __init__(\n self,\n input_size,\n output_size,\n *,\n config: ModelParallelConfig,\n init_method: Callable,\n bias=True,\n gather_output=False,\n stride=1,\n keep_master_weight_for_test=False,\n skip_bias_add=False,\n skip_weight_param_allocation: bool = False,\n is_expert: bool = False,\n ):\n super(ColumnParallelLinear, self).__init__()\n\n # Keep input parameters\n self.input_size = input_size\n self.output_size = output_size\n self.gather_output = gather_output\n # Divide the weight matrix along the last dimension.\n world_size = get_tensor_model_parallel_world_size()\n self.output_size_per_partition = divide(output_size, world_size)\n self.skip_bias_add = skip_bias_add\n self.is_expert = is_expert\n self.expert_parallel = config.expert_model_parallel_size > 1\n self.config = config\n\n # Parameters.\n # Note: torch.nn.functional.linear performs XA^T + b and as a result\n # we allocate the transpose.\n # Initialize weight.\n if not skip_weight_param_allocation:\n if config.use_cpu_initialization:\n self.weight = Parameter(\n torch.empty(\n self.output_size_per_partition, self.input_size, dtype=config.params_dtype\n )\n )\n if config.perform_initialization:\n self.master_weight = _initialize_affine_weight_cpu(\n self.weight,\n self.output_size,\n self.input_size,\n self.output_size_per_partition,\n 0,\n init_method,\n stride=stride,\n return_master_weight=keep_master_weight_for_test,\n )\n else:\n self.weight = Parameter(\n torch.empty(\n self.output_size_per_partition,\n self.input_size,\n device=torch.cuda.current_device(),\n dtype=config.params_dtype,\n )\n )\n if config.perform_initialization:\n _initialize_affine_weight_gpu(\n self.weight,\n init_method,\n partition_dim=0,\n stride=stride,\n expert_parallel=(self.is_expert and self.expert_parallel),\n )\n\n setattr(self.weight, 'allreduce', not (self.is_expert and self.expert_parallel))\n else:\n self.weight = None\n\n if bias:\n if config.use_cpu_initialization:\n self.bias = Parameter(\n torch.empty(self.output_size_per_partition, dtype=config.params_dtype)\n )\n else:\n self.bias = Parameter(\n torch.empty(\n self.output_size_per_partition,\n device=torch.cuda.current_device(),\n dtype=config.params_dtype,\n )\n )\n set_tensor_model_parallel_attributes(self.bias, True, 0, stride)\n if config.perform_initialization:\n # Always initialize bias to zero.\n with torch.no_grad():\n self.bias.zero_()\n setattr(self.bias, 'allreduce', not (self.is_expert and self.expert_parallel))\n else:\n self.register_parameter('bias', None)\n\n self.async_tensor_model_parallel_allreduce = (\n config.async_tensor_model_parallel_allreduce and world_size > 1\n )\n\n self.sequence_parallel = config.sequence_parallel\n if self.sequence_parallel and world_size <= 1:\n warnings.warn(\n f\"`sequence_parallel` is set to `True`, but tensor model parallel size is {world_size}. \"\n f\"Disabling sequence parallel.\"\n )\n self.sequence_parallel = False\n\n if config.gradient_accumulation_fusion and not _grad_accum_fusion_available:\n raise RuntimeError(\n \"ColumnParallelLinear was called with gradient_accumulation_fusion set \"\n \"to True but the custom CUDA extension fused_weight_gradient_mlp_cuda \"\n \"module is not found. To use gradient_accumulation_fusion you must \"\n \"install APEX with --cpp_ext and --cuda_ext. For example: \"\n \"pip install --global-option=\\\"--cpp_ext\\\" --global-option=\\\"--cuda_ext .\\\" \"\n \"Note that the extension requires CUDA>=11. Otherwise, you must turn off \"\n \"gradient accumulation fusion.\"\n )\n self.gradient_accumulation_fusion = config.gradient_accumulation_fusion\n\n if self.async_tensor_model_parallel_allreduce and self.sequence_parallel:\n raise RuntimeError(\n \"`async_tensor_model_parallel_allreduce` and `sequence_parallel` \"\n \"cannot be enabled at the same time.\"\n )\n\n self._forward_impl = linear_with_grad_accumulation_and_async_allreduce\n self.explicit_expert_comm = self.is_expert and (\n self.sequence_parallel or self.expert_parallel\n )\n\n def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None):\n \"\"\"Forward of ColumnParallelLinear\n\n Args:\n input_: 3D tensor whose order of dimension is [sequence, batch, hidden]\n\n weight (optional): weight tensor to use, compulsory when\n skip_weight_param_allocation is True.\n\n Returns:\n - output\n - bias\n\n \"\"\"\n if weight is None:\n if self.weight is None:\n raise RuntimeError(\n \"weight was not supplied to ColumnParallelLinear forward pass \"\n \"and skip_weight_param_allocation is True.\"\n )\n weight = self.weight\n else:\n # Check the weight passed in is the correct shape\n expected_shape = (self.output_size_per_partition, self.input_size)\n if weight.shape != expected_shape:\n raise RuntimeError(\n f\"supplied weight's shape is {tuple(weight.shape)}, \"\n f\"not {expected_shape} as expected\"\n )\n\n bias = self.bias if not self.skip_bias_add else None\n\n if (\n self.async_tensor_model_parallel_allreduce\n or self.sequence_parallel\n or self.explicit_expert_comm\n ):\n input_parallel = input_\n else:\n input_parallel = copy_to_tensor_model_parallel_region(input_)\n\n # Matrix multiply.\n if not weight.requires_grad:\n self._forward_impl = linear_with_frozen_weight\n else:\n self._forward_impl = linear_with_grad_accumulation_and_async_allreduce\n output_parallel = self._forward_impl(\n input=input_parallel,\n weight=weight,\n bias=bias,\n gradient_accumulation_fusion=self.gradient_accumulation_fusion,\n async_grad_allreduce=False\n if self.explicit_expert_comm\n else self.async_tensor_model_parallel_allreduce,\n sequence_parallel=False if self.explicit_expert_comm else self.sequence_parallel,\n )\n if self.gather_output:\n # All-gather across the partitions.\n assert not self.sequence_parallel\n output = gather_from_tensor_model_parallel_region(output_parallel)\n else:\n output = output_parallel\n output_bias = self.bias if self.skip_bias_add else None\n return output, output_bias" }, { "identifier": "RowParallelLinear", "path": "megatron/core/tensor_parallel/layers.py", "snippet": "class RowParallelLinear(torch.nn.Module):\n \"\"\"Linear layer with row parallelism.\n\n The linear layer is defined as Y = XA + b. A is parallelized along\n its first dimension and X along its second dimension as:\n - -\n | A_1 |\n | . |\n A = | . | X = [X_1, ..., X_p]\n | . |\n | A_p |\n - -\n Arguments:\n input_size: first dimension of matrix A.\n output_size: second dimension of matrix A.\n\n Keyword Arguments:\n bias: If true, add bias. Note that bias is not parallelized.\n input_is_parallel: If true, we assume that the input is already\n split across the GPUs and we do not split\n again.\n init_method: method to initialize weights. Note that bias is always set\n to zero.\n stride: For the strided linear layers.\n keep_master_weight_for_test: This was added for testing and should be\n set to False. It returns the master weights\n used for initialization.\n skip_bias_add: If True, do not add the bias term, instead\n return it to be added by the caller. This\n enables performance optimations where bias can\n be fused with other elementwise operations.\n is_expert: If True, the layer is treated as an MoE expert layer\n config: ModelParallelConfig object\n\n \"\"\"\n\n def __init__(\n self,\n input_size: int,\n output_size: int,\n *,\n config: ModelParallelConfig,\n init_method: Callable,\n bias: bool = True,\n input_is_parallel: bool = False,\n stride: int = 1,\n keep_master_weight_for_test: bool = False,\n skip_bias_add: bool = False,\n is_expert: bool = False,\n ):\n super(RowParallelLinear, self).__init__()\n\n # Keep input parameters\n self.input_size = input_size\n self.output_size = output_size\n self.input_is_parallel = input_is_parallel\n # Divide the weight matrix along the last dimension.\n world_size = get_tensor_model_parallel_world_size()\n self.input_size_per_partition = divide(input_size, world_size)\n self.skip_bias_add = skip_bias_add\n self.config = config\n self.is_expert = is_expert\n self.expert_parallel = config.expert_model_parallel_size > 1\n self.gradient_accumulation_fusion = config.gradient_accumulation_fusion\n self.sequence_parallel = config.sequence_parallel\n if self.sequence_parallel and not self.input_is_parallel:\n raise RuntimeError(\"To enable `sequence_parallel`, `input_is_parallel` must be `True`\")\n\n # Parameters.\n # Note: torch.nn.functional.linear performs XA^T + b and as a result\n # we allocate the transpose.\n # Initialize weight.\n if config.use_cpu_initialization:\n self.weight = Parameter(\n torch.empty(\n self.output_size, self.input_size_per_partition, dtype=config.params_dtype\n )\n )\n if config.perform_initialization:\n self.master_weight = _initialize_affine_weight_cpu(\n self.weight,\n self.output_size,\n self.input_size,\n self.input_size_per_partition,\n 1,\n init_method,\n stride=stride,\n return_master_weight=keep_master_weight_for_test,\n params_dtype=config.params_dtype,\n )\n else:\n self.weight = Parameter(\n torch.empty(\n self.output_size,\n self.input_size_per_partition,\n device=torch.cuda.current_device(),\n dtype=config.params_dtype,\n )\n )\n if config.perform_initialization:\n _initialize_affine_weight_gpu(\n self.weight,\n init_method,\n partition_dim=1,\n stride=stride,\n expert_parallel=(self.is_expert and self.expert_parallel),\n )\n setattr(self.weight, 'allreduce', not (self.is_expert and self.expert_parallel))\n\n if bias:\n if config.use_cpu_initialization:\n self.bias = Parameter(torch.empty(self.output_size, dtype=config.params_dtype))\n else:\n self.bias = Parameter(\n torch.empty(\n self.output_size,\n device=torch.cuda.current_device(),\n dtype=config.params_dtype,\n )\n )\n\n if config.perform_initialization:\n # Always initialize bias to zero.\n with torch.no_grad():\n self.bias.zero_()\n setattr(self.bias, 'allreduce', not (self.is_expert and self.expert_parallel))\n setattr(self.bias, 'sequence_parallel', self.sequence_parallel)\n else:\n self.register_parameter('bias', None)\n\n self._forward_impl = linear_with_grad_accumulation_and_async_allreduce\n self.explicit_expert_comm = self.is_expert and (\n self.sequence_parallel or self.expert_parallel\n )\n\n def forward(self, input_):\n \"\"\"Forward of RowParallelLinear\n\n Args:\n input_: 3D tensor whose order of dimension is [sequence, batch, hidden]\n\n Returns:\n - output\n - bias\n \"\"\"\n # Set up backprop all-reduce.\n if self.input_is_parallel:\n input_parallel = input_\n else:\n assert not self.sequence_parallel\n input_parallel = scatter_to_tensor_model_parallel_region(input_)\n # Matrix multiply.\n if not self.weight.requires_grad:\n self._forward_impl = linear_with_frozen_weight\n else:\n self._forward_impl = linear_with_grad_accumulation_and_async_allreduce\n output_parallel = self._forward_impl(\n input=input_parallel,\n weight=self.weight,\n bias=None,\n gradient_accumulation_fusion=self.gradient_accumulation_fusion,\n async_grad_allreduce=False,\n sequence_parallel=False,\n )\n\n # All-reduce across all the partitions.\n if self.explicit_expert_comm:\n assert self.skip_bias_add\n output_ = output_parallel\n elif self.sequence_parallel:\n output_ = reduce_scatter_to_sequence_parallel_region(output_parallel)\n else:\n output_ = reduce_from_tensor_model_parallel_region(output_parallel)\n if not self.skip_bias_add:\n output = (output_ + self.bias) if self.bias is not None else output_\n output_bias = None\n else:\n output = output_\n output_bias = self.bias\n return output, output_bias" }, { "identifier": "SelfAttention", "path": "megatron/core/transformer/attention.py", "snippet": "class SelfAttention(Attention):\n \"\"\"Self-attention layer class\n\n Self-attention layer takes input with size [s, b, h]\n and returns output of the same size.\n \"\"\"\n\n def __init__(\n self,\n config: TransformerConfig,\n submodules: SelfAttentionSubmodules,\n layer_number: int = 1,\n attn_mask_type=AttnMaskType.padding,\n **kwargs,\n ):\n super().__init__(\n config=config,\n submodules=submodules,\n layer_number=layer_number,\n attn_mask_type=attn_mask_type,\n **kwargs,\n )\n\n self.linear_qkv = build_module(\n submodules.linear_qkv,\n self.config.hidden_size,\n self.query_projection_size + 2 * self.kv_projection_size,\n config=self.config,\n init_method=self.config.init_method,\n bias=self.config.add_bias_linear,\n skip_bias_add=False,\n )\n\n def get_query_key_value_tensors(self, hidden_states, key_value_states=None):\n \"\"\"\n Derives `query`, `key` and `value` tensors from `hidden_states`.\n \"\"\"\n # Attention heads [sq, b, h] --> [sq, b, ng * (np/ng + 2) * hn)]\n mixed_qkv, _ = self.linear_qkv(hidden_states)\n\n # [sq, b, hp] --> [sq, b, ng, (np/ng + 2) * hn]\n new_tensor_shape = mixed_qkv.size()[:-1] + (\n self.num_query_groups_per_partition,\n (\n (self.num_attention_heads_per_partition // self.num_query_groups_per_partition + 2)\n * self.hidden_size_per_attention_head\n ),\n )\n mixed_qkv = mixed_qkv.view(*new_tensor_shape)\n\n # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn]\n (query, key, value) = torch.split(\n mixed_qkv,\n [\n (\n self.num_attention_heads_per_partition\n // self.num_query_groups_per_partition\n * self.hidden_size_per_attention_head\n ),\n self.hidden_size_per_attention_head,\n self.hidden_size_per_attention_head,\n ],\n dim=3,\n )\n # [sq, b, ng, np/ng * hn] -> [sq, b, np, hn]\n query = query.reshape(query.size(0), query.size(1), -1, self.hidden_size_per_attention_head)\n\n return query, key, value" }, { "identifier": "SelfAttentionSubmodules", "path": "megatron/core/transformer/attention.py", "snippet": "class SelfAttentionSubmodules:\n linear_qkv: Union[ModuleSpec, type] = None\n dot_product_attention: Union[ModuleSpec, type] = None\n linear_proj: Union[ModuleSpec, type] = None" }, { "identifier": "TEDotProductAttention", "path": "megatron/core/transformer/custom_layers/transformer_engine.py", "snippet": "class TEDotProductAttention(te.pytorch.DotProductAttention):\n \"\"\"\n Wrapper for the Transformer-Engine's `DotProductAttention` layer that also\n has \"flash attention\" enabled.\n\n Note that if Megatron's parallel_state has not been initialized yet, the\n tp_group and cp_group passed to TE will be None and must be set later\n via set_tensor_parallel_group() and set_context_parallel_group().\n \"\"\"\n\n cp_stream: torch.cuda.Stream = None\n\n def __init__(\n self,\n config: TransformerConfig,\n layer_number: int = 1,\n attn_mask_type: AttnMaskType = AttnMaskType.padding,\n **kwargs\n ):\n self.config = config\n\n # Only Transformer-Engine version > 0.13.0 supports context parallelism\n te_version = packaging.version.Version(version(\"transformer-engine\"))\n if te_version > packaging.version.Version(\"0.13.0\"):\n if getattr(TEDotProductAttention, \"cp_stream\") is None:\n TEDotProductAttention.cp_stream = torch.cuda.Stream()\n kwargs[\"cp_group\"] = get_context_parallel_group(check_initialized=False)\n kwargs[\"cp_global_ranks\"] = get_context_parallel_global_ranks(check_initialized=False)\n kwargs[\"cp_stream\"] = TEDotProductAttention.cp_stream\n else:\n assert (\n self.config.context_parallel_size == 1\n ), \"Only Transformer-Engine version > 0.13.0 supports context parallelism\"\n\n super().__init__(\n num_attention_heads=self.config.num_attention_heads,\n kv_channels=self.config.kv_channels,\n attention_dropout=self.config.attention_dropout,\n layer_number=layer_number,\n attn_mask_type=attn_mask_type.name,\n sequence_parallel=self.config.sequence_parallel,\n tp_size=self.config.tensor_model_parallel_size,\n get_rng_state_tracker=get_cuda_rng_tracker,\n tp_group=get_tensor_model_parallel_group(check_initialized=False),\n **kwargs,\n )" }, { "identifier": "TELayerNormColumnParallelLinear", "path": "megatron/core/transformer/custom_layers/transformer_engine.py", "snippet": "class TELayerNormColumnParallelLinear(te.pytorch.LayerNormLinear):\n \"\"\"\n Wrapper for the Transformer-Engine's `LayerNormLinear` layer that combines\n layernorm and linear layers\n \"\"\"\n\n def __init__(\n self,\n input_size: int,\n output_size: int,\n config: TransformerConfig,\n init_method: Callable,\n bias: bool,\n skip_bias_add: bool,\n **kwargs\n ):\n self.config = config\n # TE returns a zero length Tensor when bias=False and\n # return_bias=True, but we prefer None. So in that case we\n # tell TE to not return the bias, and return None\n # ourselves. This way our forward always returns two values\n # and we don't have to deal with the zero length Tensor.\n self.te_return_bias = skip_bias_add and bias\n\n # Only Transformer-Engine version >= 0.11.0 supports `RMSNorm`\n te_version = packaging.version.Version(version(\"transformer-engine\"))\n if te_version >= packaging.version.Version(\"0.11.0\"):\n kwargs[\"normalization\"] = self.config.normalization\n\n super().__init__(\n in_features=input_size,\n out_features=output_size,\n bias=bias,\n sequence_parallel=self.config.sequence_parallel,\n fuse_wgrad_accumulation=self.config.gradient_accumulation_fusion,\n tp_group=get_tensor_model_parallel_group(check_initialized=False),\n tp_size=self.config.tensor_model_parallel_size,\n get_rng_state_tracker=get_cuda_rng_tracker,\n init_method=init_method,\n params_dtype=self.config.params_dtype,\n parallel_mode=\"column\",\n return_bias=self.te_return_bias,\n **_get_extra_te_kwargs(config),\n )\n\n def forward(self, x):\n out = super().forward(x)\n\n # TE only returns a tuple when return_bias is True, otherwise\n # it returns a single Tensor, we always want to return two\n # values regardless of the arguments.\n if self.te_return_bias:\n return out\n return out, None" }, { "identifier": "TERowParallelLinear", "path": "megatron/core/transformer/custom_layers/transformer_engine.py", "snippet": "class TERowParallelLinear(TELinear):\n \"\"\"\n Wrapper for the Transformer-Engine's `Linear` layer but specialized similar\n to megatron's `RowParallelLinear` layer.\n \"\"\"\n\n def __init__(self, input_size: int, output_size: int, config: TransformerConfig, **kwargs):\n self.config = config\n super().__init__(\n input_size=input_size,\n output_size=output_size,\n config=self.config,\n parallel_mode=\"row\",\n **kwargs,\n )" }, { "identifier": "DotProductAttention", "path": "megatron/core/transformer/dot_product_attention.py", "snippet": "class DotProductAttention(MegatronModule):\n \"\"\"\n Region where selective activation recomputation is applied.\n This region is memory intensive but less compute intensive which\n makes activation checkpointing more efficient for LLMs (20B+).\n See Reducing Activation Recomputation in Large Transformer Models: https://arxiv.org/abs/2205.05198 for more details.\n\n We use the following notation:\n h: hidden size\n n: number of attention heads\n p: number of tensor model parallel partitions\n b: batch size\n s: sequence length\n \"\"\"\n\n def __init__(\n self, config: TransformerConfig, layer_number: int = 1, attn_mask_type=AttnMaskType.padding\n ):\n super().__init__(config=config)\n\n self.config: TransformerConfig = config\n\n assert (\n self.config.context_parallel_size == 1\n ), \"Context parallelism is only supported by TEDotProductAttention!\"\n\n self.layer_number = max(1, layer_number)\n self.attn_mask_type = attn_mask_type\n\n projection_size = self.config.kv_channels * config.num_attention_heads\n\n # Per attention head and per partition values.\n world_size = parallel_state.get_tensor_model_parallel_world_size()\n self.hidden_size_per_partition = divide(projection_size, world_size)\n self.hidden_size_per_attention_head = divide(projection_size, config.num_attention_heads)\n self.num_attention_heads_per_partition = divide(config.num_attention_heads, world_size)\n\n coeff = None\n self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)\n if self.config.apply_query_key_layer_scaling:\n coeff = self.layer_number\n self.norm_factor *= coeff\n\n self.scale_mask_softmax = FusedScaleMaskSoftmax(\n input_in_fp16=self.config.fp16,\n input_in_bf16=self.config.bf16,\n attn_mask_type=self.attn_mask_type,\n scaled_masked_softmax_fusion=self.config.masked_softmax_fusion,\n mask_func=attention_mask_func,\n softmax_in_fp32=self.config.attention_softmax_in_fp32,\n scale=coeff,\n )\n\n # Dropout. Note that for a single iteration, this layer will generate\n # different outputs on different number of parallel partitions but\n # on average it should not be partition dependent.\n self.attention_dropout = torch.nn.Dropout(self.config.attention_dropout)\n\n def forward(\n self, query_layer: Tensor, key_layer: Tensor, value_layer: Tensor, attention_mask: Tensor\n ):\n\n # ===================================\n # Raw attention scores. [b, n/p, s, s]\n # ===================================\n\n # [b, np, sq, sk]\n output_size = (\n query_layer.size(1),\n query_layer.size(2),\n query_layer.size(0),\n key_layer.size(0),\n )\n\n # [sq, b, np, hn] -> [sq, b * np, hn]\n # This will be a simple view when doing normal attention, but in group query attention\n # the key and value tensors are repeated to match the queries so you can't use simple strides\n # to extract the queries.\n query_layer = query_layer.reshape(output_size[2], output_size[0] * output_size[1], -1)\n # [sk, b, np, hn] -> [sk, b * np, hn]\n key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)\n\n # preallocting input tensor: [b * np, sq, sk]\n matmul_input_buffer = parallel_state.get_global_memory_buffer().get_tensor(\n (output_size[0] * output_size[1], output_size[2], output_size[3]),\n query_layer.dtype,\n \"mpu\",\n )\n\n # Raw attention scores. [b * np, sq, sk]\n matmul_result = torch.baddbmm(\n matmul_input_buffer,\n query_layer.transpose(0, 1), # [b * np, sq, hn]\n key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]\n beta=0.0,\n alpha=(1.0 / self.norm_factor),\n )\n\n # change view to [b, np, sq, sk]\n attention_scores = matmul_result.view(*output_size)\n\n # ===========================\n # Attention probs and dropout\n # ===========================\n\n # attention scores and attention mask [b, np, sq, sk]\n attention_probs: Tensor = self.scale_mask_softmax(attention_scores, attention_mask)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n\n if not self.config.sequence_parallel:\n with tensor_parallel.get_cuda_rng_tracker().fork():\n attention_probs = self.attention_dropout(attention_probs)\n else:\n attention_probs = self.attention_dropout(attention_probs)\n\n # =========================\n # Context layer. [sq, b, hp]\n # =========================\n\n # value_layer -> context layer.\n # [sk, b, np, hn] --> [b, np, sq, hn]\n\n # context layer shape: [b, np, sq, hn]\n output_size = (\n value_layer.size(1),\n value_layer.size(2),\n query_layer.size(0),\n value_layer.size(3),\n )\n\n # change view [sk, b * np, hn]\n value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)\n\n # change view [b * np, sq, sk]\n attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)\n\n # matmul: [b * np, sq, hn]\n context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))\n\n # change view [b, np, sq, hn]\n context_layer = context_layer.view(*output_size)\n\n # [b, np, sq, hn] --> [sq, b, np, hn]\n context_layer = context_layer.permute(2, 0, 1, 3).contiguous()\n\n # [sq, b, np, hn] --> [sq, b, hp]\n new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n return context_layer" }, { "identifier": "AttnMaskType", "path": "megatron/core/transformer/enums.py", "snippet": "class AttnMaskType(enum.Enum):\n padding = 1\n causal = 2" }, { "identifier": "MLP", "path": "megatron/core/transformer/mlp.py", "snippet": "class MLP(MegatronModule):\n \"\"\"\n MLP will take the input with h hidden state, project it to 4*h\n hidden dimension, perform nonlinear transformation, and project the\n state back into h hidden dimension.\n\n\n Returns an output and a bias to be added to the output.\n If config.add_bias_linear is False, the bias returned is None.\n\n We use the following notation:\n h: hidden size\n p: number of tensor model parallel partitions\n b: batch size\n s: sequence length\n \"\"\"\n\n def __init__(\n self, config: TransformerConfig, submodules: MLPSubmodules, is_expert: bool = False\n ):\n super().__init__(config=config)\n\n self.config: TransformerConfig = config\n\n # If this is a gated linear unit we double the output width, see https://arxiv.org/pdf/2002.05202.pdf\n ffn_hidden_size = self.config.ffn_hidden_size\n if self.config.gated_linear_unit:\n ffn_hidden_size *= 2\n\n self.linear_fc1 = build_module(\n submodules.linear_fc1,\n self.config.hidden_size,\n ffn_hidden_size,\n config=self.config,\n init_method=self.config.init_method,\n gather_output=False,\n bias=self.config.add_bias_linear,\n skip_bias_add=True,\n is_expert=is_expert,\n )\n\n if self.config.gated_linear_unit:\n\n def glu(x):\n x = torch.chunk(x, 2, dim=-1)\n return self.config.activation_func(x[0]) * x[1]\n\n self.activation_func = glu\n else:\n self.activation_func = self.config.activation_func\n\n self.linear_fc2 = build_module(\n submodules.linear_fc2,\n self.config.ffn_hidden_size,\n self.config.hidden_size,\n config=self.config,\n init_method=self.config.output_layer_init_method,\n bias=self.config.add_bias_linear,\n input_is_parallel=True,\n skip_bias_add=True,\n is_expert=is_expert,\n )\n\n def forward(self, hidden_states):\n\n # [s, b, 4 * h/p]\n intermediate_parallel, bias_parallel = self.linear_fc1(hidden_states)\n\n if self.config.bias_gelu_fusion:\n assert self.config.add_bias_linear is True\n assert self.activation_func == F.gelu\n intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel)\n else:\n if bias_parallel is not None:\n intermediate_parallel = intermediate_parallel + bias_parallel\n intermediate_parallel = self.activation_func(intermediate_parallel)\n\n # [s, b, h]\n output, output_bias = self.linear_fc2(intermediate_parallel)\n\n return output, output_bias" }, { "identifier": "MLPSubmodules", "path": "megatron/core/transformer/mlp.py", "snippet": "class MLPSubmodules:\n linear_fc1: Union[ModuleSpec, type] = None\n linear_fc2: Union[ModuleSpec, type] = None" }, { "identifier": "ModuleSpec", "path": "megatron/core/transformer/spec_utils.py", "snippet": "class ModuleSpec:\n \"\"\"This is a Module Specification dataclass.\n\n Specification defines the location of the module (to import dynamically)\n or the imported module itself. It also defines the params that need to be\n passed to initialize the module.\n\n Args:\n module (Union[Tuple, type]): A tuple describing the location of the\n module class e.g. `(module.location, ModuleClass)` or the imported\n module class itself e.g. `ModuleClass` (which is already imported\n using `from module.location import ModuleClass`).\n params (dict): A dictionary of params that need to be passed while init.\n\n \"\"\"\n\n module: Union[Tuple, type]\n params: dict = field(default_factory=lambda: {})\n submodules: type = None" }, { "identifier": "SwitchMLP", "path": "megatron/core/transformer/switch_mlp.py", "snippet": "class SwitchMLP(MegatronModule):\n \"\"\"\n Top-1 Mixture of Experts Layer. Routes input to one of N MLP \"experts\"\n Curently supports Sinkhorn based expert routing.\n \"\"\"\n\n def __init__(self, config: TransformerConfig, submodules: MLPSubmodules):\n super().__init__(config=config)\n\n self.config: TransformerConfig = config\n\n self.router = torch.nn.Linear(self.config.hidden_size, self.config.num_moe_experts)\n self.add_bias = config.add_bias_linear\n self.sequence_parallel = config.sequence_parallel\n self.route_algo = sinkhorn\n self.router_activation = torch.sigmoid\n self.expert_parallel_size = parallel_state.get_expert_model_parallel_world_size()\n\n assert self.config.num_moe_experts % self.expert_parallel_size == 0\n self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size\n local_expert_indices_offset = (\n parallel_state.get_expert_model_parallel_rank() * self.num_local_experts\n )\n self.local_expert_indices = [\n local_expert_indices_offset + i for i in range(self.num_local_experts)\n ]\n\n self.local_experts = torch.nn.ModuleList()\n for _ in range(self.num_local_experts):\n expert = MLP(self.config, submodules, is_expert=True)\n self.local_experts.append(expert)\n\n def gather_indices(self, local_indices):\n \"\"\" Gather tensors and concatenate along the first dimension.\"\"\"\n group = get_tensor_and_expert_parallel_group()\n world_size = torch.distributed.get_world_size(group=group)\n # Bypass the function if we are using only 1 GPU.\n if world_size == 1:\n return local_indices\n\n dim_size = list(local_indices.size())\n dim_size[0] = dim_size[0] * world_size\n\n # TODO pre allocate memory\n output = torch.empty(\n dim_size, dtype=local_indices.dtype, device=torch.cuda.current_device()\n )\n torch.distributed._all_gather_base(output, local_indices.contiguous(), group=group)\n return output\n\n def forward(self, hidden_states):\n hidden_shape = hidden_states.shape\n route = self.router(hidden_states)\n route = route.view(-1, self.config.num_moe_experts)\n\n if self.training:\n with torch.no_grad():\n norm_route = self.route_algo(\n route.detach().to(dtype=torch.float32)\n ) # explicit fp32 conversion for stability\n _, max_ind = torch.max(norm_route, dim=1)\n route = self.router_activation(route)\n max_prob = route[torch.arange(route.size(0)), max_ind]\n else:\n route = self.router_activation(route)\n max_prob, max_ind = torch.max(route, dim=1)\n\n max_prob = torch.unsqueeze(max_prob, 1)\n hidden_states = hidden_states.view(-1, hidden_shape[-1])\n\n if self.sequence_parallel or (self.expert_parallel_size > 1):\n global_hidden_states = tensor_parallel.gather_from_sequence_parallel_region_to_moe(\n hidden_states\n )\n global_indices = self.gather_indices(max_ind)\n else:\n global_hidden_states = hidden_states\n global_indices = max_ind\n\n output_total = torch.zeros_like(global_hidden_states)\n if self.add_bias:\n output_bias_total = torch.zeros_like(global_hidden_states)\n\n for expert_num, expert in enumerate(self.local_experts):\n local_expert_index = self.local_expert_indices[expert_num]\n local_indices = (global_indices == local_expert_index).nonzero()\n hidden = global_hidden_states[local_indices, :]\n output, output_bias = expert(hidden)\n\n output_total[local_indices, :] = output\n if self.add_bias:\n output_bias = output_bias.expand_as(output)\n output_bias_total[local_indices, :] = output_bias\n\n if self.sequence_parallel or (self.expert_parallel_size > 1):\n output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe(\n output_total\n )\n if self.add_bias:\n output_bias_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe(\n output_bias_total\n )\n # bias is duplicated across tensor parallelism ranks;\n # reduce scatter reduces bias across tensor parallel_ranks\n output_bias_total = (\n output_bias_total / parallel_state.get_tensor_model_parallel_world_size()\n )\n\n output_total = output_total * max_prob\n output_total = output_total.view(hidden_shape)\n if self.add_bias:\n output_bias_total = output_bias_total * max_prob\n output_bias_total = output_bias_total.view(hidden_shape)\n else:\n output_bias_total = None\n\n return output_total, output_bias_total" }, { "identifier": "TransformerLayer", "path": "megatron/core/transformer/transformer_layer.py", "snippet": "class TransformerLayer(MegatronModule):\n \"\"\"A single transformer layer.\n\n Transformer layer takes input with size [s, b, h] and returns an\n output of the same size.\n \"\"\"\n\n def __init__(\n self,\n config: TransformerConfig,\n submodules: TransformerLayerSubmodules,\n layer_number: int = 1,\n self_attn_mask_type=AttnMaskType.padding,\n ):\n super().__init__(config=config)\n self.config: TransformerConfig = config\n\n self.layer_number = layer_number + self._get_layer_offset()\n\n self.self_attn_mask_type = self_attn_mask_type\n\n ## [Module 1: Input Layernorm] Optional Layernorm on the input data\n # TODO: add pytorch only layernorm\n self.input_layernorm = build_module(\n submodules.input_layernorm,\n hidden_size=self.config.hidden_size,\n eps=self.config.layernorm_epsilon,\n persist_layer_norm=self.config.persist_layer_norm,\n sequence_parallel=self.config.sequence_parallel,\n zero_centered_gamma=self.config.layernorm_zero_centered_gamma,\n normalization=self.config.normalization,\n )\n\n ## [Module 2: SelfAttention]\n self.self_attention = build_module(\n submodules.self_attention, config=self.config, layer_number=layer_number,\n )\n\n ## [Module 3: BiasDropoutFusion]\n self.self_attn_bda = build_module(submodules.self_attn_bda)\n\n ## [Module 4: Post SelfAttention] Optional Layernorm after self-attn\n self.pre_cross_attn_layernorm = build_module(\n submodules.pre_cross_attn_layernorm,\n hidden_size=self.config.hidden_size,\n eps=self.config.layernorm_epsilon,\n persist_layer_norm=self.config.persist_layer_norm,\n sequence_parallel=self.config.sequence_parallel,\n zero_centered_gamma=self.config.layernorm_zero_centered_gamma,\n normalization=self.config.normalization,\n )\n\n ## [Module 5: CrossAttention]\n self.cross_attention = build_module(\n submodules.cross_attention, config=self.config, layer_number=layer_number,\n )\n\n ## [Module 6: BiasDropoutFusion]\n self.cross_attn_bda = build_module(submodules.cross_attn_bda)\n\n ## [Module 7: Post Cross Attention] Optional Layernorm after cross-attn\n self.pre_mlp_layernorm = build_module(\n submodules.pre_mlp_layernorm,\n hidden_size=self.config.hidden_size,\n eps=self.config.layernorm_epsilon,\n persist_layer_norm=self.config.persist_layer_norm,\n sequence_parallel=self.config.sequence_parallel,\n zero_centered_gamma=self.config.layernorm_zero_centered_gamma,\n normalization=self.config.normalization,\n )\n\n ## [Module 8: MLP block]\n # TODO how to set the gpt_layer_spec.py when we have moe_frequency > 1,\n # where MLP and SwitchMLP both appear alternately?\n self.mlp = build_module(submodules.mlp, config=self.config)\n\n ## [Module 9: BiasDropoutFusion]\n self.mlp_bda = build_module(submodules.mlp_bda)\n\n # @jcasper how should we handle nvfuser?\n # Set bias+dropout+add fusion grad_enable execution handler.\n # TORCH_MAJOR = int(torch.__version__.split('.')[0])\n # TORCH_MINOR = int(torch.__version__.split('.')[1])\n # use_nvfuser = TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10)\n # self.bias_dropout_add_exec_handler = nullcontext if use_nvfuser else torch.enable_grad\n self.bias_dropout_add_exec_handler = torch.enable_grad\n\n def _get_layer_offset(self):\n\n pipeline_rank = parallel_state.get_pipeline_model_parallel_rank()\n\n num_layers_per_pipeline_rank = (\n self.config.num_layers // parallel_state.get_pipeline_model_parallel_world_size()\n )\n\n if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:\n vp_rank = parallel_state.get_virtual_pipeline_model_parallel_rank()\n vp_size = parallel_state.get_virtual_pipeline_model_parallel_world_size()\n\n total_num_layers = self.config.num_layers\n num_layers_per_virtual_rank = num_layers_per_pipeline_rank // vp_size\n total_virtual_chunks = total_num_layers // vp_size\n offset = vp_rank * total_virtual_chunks + (pipeline_rank * num_layers_per_virtual_rank)\n\n else:\n # Each stage gets a contiguous set of layers.\n if parallel_state.get_pipeline_model_parallel_world_size() > 1:\n offset = pipeline_rank * num_layers_per_pipeline_rank\n else:\n offset = 0\n\n return offset\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n context=None,\n context_mask=None,\n inference_params=None,\n rotary_pos_emb=None,\n ):\n # hidden_states: [s, b, h]\n\n # Residual connection.\n residual = hidden_states\n\n # Optional Input Layer norm\n input_layernorm_output = self.input_layernorm(hidden_states)\n\n # Self attention.\n attention_output_with_bias = self.self_attention(\n input_layernorm_output,\n attention_mask=attention_mask,\n inference_params=inference_params,\n rotary_pos_emb=rotary_pos_emb,\n )\n\n # TODO: could we move `bias_dropout_add_exec_handler` itself\n # inside the module provided in the `bias_dropout_add_spec` module?\n with self.bias_dropout_add_exec_handler():\n hidden_states = self.self_attn_bda(self.training, self.config.bias_dropout_fusion)(\n attention_output_with_bias, residual, self.config.hidden_dropout\n )\n\n # Residual connection.\n residual = hidden_states\n\n # Optional Layer norm after self-attention\n pre_cross_attn_layernorm_output = self.pre_cross_attn_layernorm(hidden_states)\n\n # Cross attention.\n attention_output_with_bias = self.cross_attention(\n pre_cross_attn_layernorm_output,\n attention_mask=attention_mask,\n context=context,\n inference_params=inference_params,\n )\n\n # TODO: could we move `bias_dropout_add_exec_handler` itself\n # inside the module provided in the `bias_dropout_add_spec` module?\n with self.bias_dropout_add_exec_handler():\n hidden_states = self.cross_attn_bda(self.training, self.config.bias_dropout_fusion)(\n attention_output_with_bias, residual, self.config.hidden_dropout\n )\n\n # Residual connection.\n residual = hidden_states\n\n # Optional Layer norm post the cross-attention.\n pre_mlp_layernorm_output = self.pre_mlp_layernorm(hidden_states)\n\n # MLP.\n mlp_output_with_bias = self.mlp(pre_mlp_layernorm_output)\n\n # TODO: could we move `bias_dropout_add_exec_handler` itself\n # inside the module provided in the `bias_dropout_add_spec` module?\n with self.bias_dropout_add_exec_handler():\n hidden_states = self.mlp_bda(self.training, self.config.bias_dropout_fusion)(\n mlp_output_with_bias, residual, self.config.hidden_dropout\n )\n\n # Jit compiled function creates 'view' tensor. This tensor\n # potentially gets saved in the MPU checkpoint function context,\n # which rejects view tensors. While making a viewless tensor here\n # won't result in memory savings (like the data loader, or\n # p2p_communication), it serves to document the origin of this\n # 'view' tensor.\n output = make_viewless_tensor(\n inp=hidden_states, requires_grad=hidden_states.requires_grad, keep_graph=True\n )\n\n return output\n\n def sharded_state_dict(self, prefix=''):\n\n # state_dict = self.state_dict(prefix=prefix, keep_vars=True)\n state_dict = self.state_dict(keep_vars=True)\n\n tensor_parallel_layers_axis_map = {\n 'self_attention.linear_qkv.weight': 0,\n 'self_attention.linear_qkv.bias': 0,\n 'self_attention.linear_proj.weight': 1,\n 'mlp.linear_fc1.weight': 0,\n 'mlp.linear_fc1.bias': 0,\n 'mlp.linear_fc2.weight': 1,\n }\n\n offset = self._get_layer_offset()\n num_layers = self.config.num_layers\n\n sharded_state_dict = {}\n\n for layer_name in state_dict.keys():\n tensor = state_dict[layer_name]\n global_layer_offset = self.layer_number - 1 # self.layer_number starts at 1\n layer_key = f'{prefix}{global_layer_offset - offset}.{layer_name}' # module list index in TransformerBlock\n sharded_offsets = [(0, global_layer_offset, num_layers)] # PP sharding\n\n if layer_name in tensor_parallel_layers_axis_map:\n tp_axis = tensor_parallel_layers_axis_map[layer_name]\n # TP sharding\n sharded_offsets.append(\n [\n tp_axis + 1, # +1 for PP dimension\n parallel_state.get_tensor_model_parallel_rank(),\n parallel_state.get_tensor_model_parallel_world_size(),\n ]\n )\n replica_id = parallel_state.get_data_parallel_rank()\n else:\n replica_id = (\n parallel_state.get_data_parallel_rank()\n * parallel_state.get_data_parallel_world_size()\n + parallel_state.get_tensor_model_parallel_rank()\n )\n\n if layer_name.endswith('._extra_state'):\n sharded_state_dict[layer_key] = ShardedObject(\n f'{prefix}{layer_name}',\n tensor,\n (num_layers,),\n (global_layer_offset,),\n replica_id,\n )\n\n else:\n sharded_state_dict[layer_key] = ShardedTensor.from_rank_offsets(\n f'{prefix}{layer_name}',\n tensor,\n *sharded_offsets,\n replica_id=replica_id,\n prepend_axis_num=1, # for PP sharding\n )\n\n return sharded_state_dict" }, { "identifier": "TransformerLayerSubmodules", "path": "megatron/core/transformer/transformer_layer.py", "snippet": "class TransformerLayerSubmodules:\n input_layernorm: Union[ModuleSpec, type] = IdentityOp\n self_attention: Union[ModuleSpec, type] = IdentityOp\n self_attn_bda: Union[ModuleSpec, type] = IdentityFuncOp\n\n pre_cross_attn_layernorm: Union[ModuleSpec, type] = IdentityOp\n cross_attention: Union[ModuleSpec, type] = IdentityOp\n cross_attn_bda: Union[ModuleSpec, type] = IdentityFuncOp\n\n pre_mlp_layernorm: Union[ModuleSpec, type] = IdentityOp\n mlp: Union[ModuleSpec, type] = IdentityOp\n mlp_bda: Union[ModuleSpec, type] = IdentityFuncOp" } ]
from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add from megatron.core.fusions.fused_layer_norm import FusedLayerNorm from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules from megatron.core.transformer.custom_layers.transformer_engine import ( TEDotProductAttention, TELayerNormColumnParallelLinear, TERowParallelLinear, ) from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.mlp import MLP, MLPSubmodules from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules
13,362
# Use this spec to use lower level Transformer Engine modules (required for fp8 training) gpt_layer_with_transformer_engine_spec = ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( self_attention=ModuleSpec( module=SelfAttention, params={"attn_mask_type": AttnMaskType.causal}, submodules=SelfAttentionSubmodules( linear_qkv=TELayerNormColumnParallelLinear, dot_product_attention=TEDotProductAttention, linear_proj=TERowParallelLinear, ), ), self_attn_bda=get_bias_dropout_add, mlp=ModuleSpec( module=MLP, submodules=MLPSubmodules( linear_fc1=TELayerNormColumnParallelLinear, linear_fc2=TERowParallelLinear, ), ), mlp_bda=get_bias_dropout_add, ), ) # Use this spec for an implementation using only modules in megatron core gpt_layer_local_spec = ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( input_layernorm=FusedLayerNorm, self_attention=ModuleSpec( module=SelfAttention, params={"attn_mask_type": AttnMaskType.causal}, submodules=SelfAttentionSubmodules( linear_qkv=ColumnParallelLinear, dot_product_attention=DotProductAttention, linear_proj=RowParallelLinear, ), ), self_attn_bda=get_bias_dropout_add, pre_mlp_layernorm=FusedLayerNorm, mlp=ModuleSpec( module=MLP, submodules=MLPSubmodules( linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear, ), ), mlp_bda=get_bias_dropout_add, ), ) # Use this spec to use lower level Transformer Engine modules and SwitchMLP based MoE gpt_layer_with_transformer_engine_spec_moe = ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( self_attention=ModuleSpec( module=SelfAttention, params={"attn_mask_type": AttnMaskType.causal}, submodules=SelfAttentionSubmodules( linear_qkv=TELayerNormColumnParallelLinear, dot_product_attention=TEDotProductAttention, linear_proj=TERowParallelLinear, ), ), self_attn_bda=get_bias_dropout_add, pre_mlp_layernorm=FusedLayerNorm, mlp=ModuleSpec(
# Use this spec to use lower level Transformer Engine modules (required for fp8 training) gpt_layer_with_transformer_engine_spec = ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( self_attention=ModuleSpec( module=SelfAttention, params={"attn_mask_type": AttnMaskType.causal}, submodules=SelfAttentionSubmodules( linear_qkv=TELayerNormColumnParallelLinear, dot_product_attention=TEDotProductAttention, linear_proj=TERowParallelLinear, ), ), self_attn_bda=get_bias_dropout_add, mlp=ModuleSpec( module=MLP, submodules=MLPSubmodules( linear_fc1=TELayerNormColumnParallelLinear, linear_fc2=TERowParallelLinear, ), ), mlp_bda=get_bias_dropout_add, ), ) # Use this spec for an implementation using only modules in megatron core gpt_layer_local_spec = ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( input_layernorm=FusedLayerNorm, self_attention=ModuleSpec( module=SelfAttention, params={"attn_mask_type": AttnMaskType.causal}, submodules=SelfAttentionSubmodules( linear_qkv=ColumnParallelLinear, dot_product_attention=DotProductAttention, linear_proj=RowParallelLinear, ), ), self_attn_bda=get_bias_dropout_add, pre_mlp_layernorm=FusedLayerNorm, mlp=ModuleSpec( module=MLP, submodules=MLPSubmodules( linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear, ), ), mlp_bda=get_bias_dropout_add, ), ) # Use this spec to use lower level Transformer Engine modules and SwitchMLP based MoE gpt_layer_with_transformer_engine_spec_moe = ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( self_attention=ModuleSpec( module=SelfAttention, params={"attn_mask_type": AttnMaskType.causal}, submodules=SelfAttentionSubmodules( linear_qkv=TELayerNormColumnParallelLinear, dot_product_attention=TEDotProductAttention, linear_proj=TERowParallelLinear, ), ), self_attn_bda=get_bias_dropout_add, pre_mlp_layernorm=FusedLayerNorm, mlp=ModuleSpec(
module=SwitchMLP, # MOE
14
2023-12-07 08:29:38+00:00
16k
tommy-xq/SA2VP
vit_train_swin.py
[ { "identifier": "create_optimizer", "path": "optim_factory.py", "snippet": "def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):\n opt_lower = args.opt.lower()\n weight_decay = args.weight_decay\n if weight_decay and filter_bias_and_bn:\n skip = {}\n if skip_list is not None:\n skip = skip_list\n elif hasattr(model, 'no_weight_decay'):\n skip = model.no_weight_decay()\n parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)\n weight_decay = 0.\n else:\n parameters = model.parameters()\n\n if 'fused' in opt_lower:\n assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'\n\n opt_args = dict(lr=args.lr, weight_decay=weight_decay)\n if hasattr(args, 'opt_eps') and args.opt_eps is not None:\n opt_args['eps'] = args.opt_eps\n if hasattr(args, 'opt_betas') and args.opt_betas is not None:\n opt_args['betas'] = args.opt_betas\n\n opt_split = opt_lower.split('_')\n opt_lower = opt_split[-1]\n if opt_lower == 'sgd' or opt_lower == 'nesterov':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'momentum':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'adam':\n optimizer = optim.Adam(parameters, **opt_args)\n elif opt_lower == 'adamw':\n optimizer = optim.AdamW(parameters, **opt_args)\n elif opt_lower == 'nadam':\n optimizer = Nadam(parameters, **opt_args)\n elif opt_lower == 'radam':\n optimizer = RAdam(parameters, **opt_args)\n elif opt_lower == 'adamp':\n optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)\n elif opt_lower == 'sgdp':\n optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'adadelta':\n optimizer = optim.Adadelta(parameters, **opt_args)\n elif opt_lower == 'adafactor':\n if not args.lr:\n opt_args['lr'] = None\n optimizer = Adafactor(parameters, **opt_args)\n elif opt_lower == 'adahessian':\n optimizer = Adahessian(parameters, **opt_args)\n elif opt_lower == 'rmsprop':\n optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n elif opt_lower == 'rmsproptf':\n optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n # elif opt_lower == 'novograd':\n # optimizer = NovoGrad(parameters, **opt_args)\n # elif opt_lower == 'nvnovograd':\n # optimizer = NvNovoGrad(parameters, **opt_args)\n elif opt_lower == 'fusedsgd':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'fusedmomentum':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'fusedadam':\n optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)\n elif opt_lower == 'fusedadamw':\n optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)\n elif opt_lower == 'fusedlamb':\n optimizer = FusedLAMB(parameters, **opt_args)\n elif opt_lower == 'fusednovograd':\n opt_args.setdefault('betas', (0.95, 0.98))\n optimizer = FusedNovoGrad(parameters, **opt_args)\n else:\n assert False and \"Invalid optimizer\"\n raise ValueError\n\n if len(opt_split) > 1:\n if opt_split[0] == 'lookahead':\n optimizer = Lookahead(optimizer)\n\n return optimizer" }, { "identifier": "get_parameter_groups", "path": "optim_factory.py", "snippet": "def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None):\n parameter_group_names = {}\n parameter_group_vars = {}\n\n for name, param in model.named_parameters():\n if not param.requires_grad:\n continue # frozen weights\n if len(param.shape) == 1 or name.endswith(\".bias\") or name in skip_list:\n group_name = \"no_decay\"\n this_weight_decay = 0.\n else:\n group_name = \"decay\"\n this_weight_decay = weight_decay\n if get_num_layer is not None:\n layer_id = get_num_layer(name)\n group_name = \"layer_%d_%s\" % (layer_id, group_name)\n else:\n layer_id = None\n\n if group_name not in parameter_group_names:\n if get_layer_scale is not None:\n scale = get_layer_scale(layer_id)\n else:\n scale = 1.\n\n parameter_group_names[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr_scale\": scale\n }\n parameter_group_vars[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr_scale\": scale\n }\n\n parameter_group_vars[group_name][\"params\"].append(param)\n parameter_group_names[group_name][\"params\"].append(name)\n print(\"Param groups = %s\" % json.dumps(parameter_group_names, indent=2))\n return list(parameter_group_vars.values())" }, { "identifier": "LayerDecayValueAssigner", "path": "optim_factory.py", "snippet": "class LayerDecayValueAssigner(object):\n def __init__(self, values):\n self.values = values\n\n def get_scale(self, layer_id):\n return self.values[layer_id]\n\n def get_layer_id(self, var_name):\n return get_num_layer_for_vit(var_name, len(self.values))" }, { "identifier": "build_dataset", "path": "datasets.py", "snippet": "def build_dataset(is_train, args):\n # must choose one\n transform = build_transform_vtab(is_train, args)\n # transform = build_transform_fgvc(is_train, args)\n \n prefix_fgvc = './data/fgvc' # replace yours, sample:'./data/fgvc'\n prefix_vtab = './data/vtab-1k' # replace yours, sample:'./data/vtab-1k'\n \n if args.data_set == 'CIFAR_ori':\n dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)\n nb_classes = 100\n elif args.data_set == 'IMNET':\n root = os.path.join(args.data_path, 'train' if is_train else 'test')\n dataset = datasets.ImageFolder(root, transform=transform)\n nb_classes = 1000\n elif args.data_set == \"image_folder\":\n root = args.data_path if is_train else args.eval_data_path\n dataset = ImageFolder(root, transform=transform)\n nb_classes = args.nb_classes\n assert len(dataset.class_to_idx) == nb_classes\n elif args.data_set == 'CUB':\n if is_train:\n dataset = FGVC_cub(root=prefix_fgvc+'/CUB_200_2011', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_cub(root=prefix_fgvc+'/CUB_200_2011', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 200\n elif args.data_set == 'DOG':\n if is_train:\n dataset = FGVC_dog(root=prefix_fgvc+'/dogs', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_dog(root=prefix_fgvc+'/dogs', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 120\n elif args.data_set == 'FLOWER':\n if is_train:\n dataset = FGVC_flower(root=prefix_fgvc+'/OxfordFlower', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_flower(root=prefix_fgvc+'/OxfordFlower', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 102\n elif args.data_set == 'CAR':\n if is_train:\n dataset = FGVC_car(root=prefix_fgvc+'/cars', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_car(root=prefix_fgvc+'/cars', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 196\n elif args.data_set == 'BIRD':\n if is_train:\n dataset = FGVC_bird(root=prefix_fgvc+'/nabirds', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_bird(root=prefix_fgvc+'/nabirds', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 555\n elif args.data_set == 'CAL101':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/caltech101', my_mode=args.my_mode, train=True, transform=transform) # VTAB_attnmap\n else:\n dataset = VTAB(root=prefix_vtab+'/caltech101', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 102\n elif args.data_set == 'CIFAR':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/cifar', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/cifar', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 100\n elif args.data_set == 'PATCH_CAMELYON':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/patch_camelyon', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/patch_camelyon', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 2\n elif args.data_set == 'EUROSAT':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/eurosat', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/eurosat', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 10\n elif args.data_set == 'DMLAB':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/dmlab', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/dmlab', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 6\n elif args.data_set == 'CLEVR_COUNT':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/clevr_count', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/clevr_count', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 8\n elif args.data_set == 'DTD':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/dtd', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/dtd', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 47\n elif args.data_set == 'FLOWER_S':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/oxford_flowers102', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/oxford_flowers102', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 102\n elif args.data_set == 'PET':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/oxford_iiit_pet', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/oxford_iiit_pet', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 37\n elif args.data_set == 'SVHN_S':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/svhn', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/svhn', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 10\n elif args.data_set == 'SUN':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/sun397', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/sun397', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 397\n elif args.data_set == 'Resisc45':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/resisc45', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/resisc45', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 45\n elif args.data_set == 'Retinopathy':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/diabetic_retinopathy', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/diabetic_retinopathy', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 5\n elif args.data_set == 'CLEVR_DISTANCE':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/clevr_dist', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/clevr_dist', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 6\n elif args.data_set == 'KITTI_DISTANCE':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/kitti', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/kitti', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 4\n elif args.data_set == 'DS_LOC':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/dsprites_loc', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/dsprites_loc', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 16\n elif args.data_set == 'DS_ORI':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/dsprites_ori', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/dsprites_ori', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 16\n elif args.data_set == 'SN_AZI':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/smallnorb_azi', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/smallnorb_azi', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 18\n elif args.data_set == 'SN_ELE':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/smallnorb_ele', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/smallnorb_ele', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 9\n elif args.data_set == 'DTD_DAM':\n if is_train:\n dataset = DTD(root='/data/damvp_data/cal_all/dtd', split=\"train\", transform=transform) # note: remember to change data path.\n else:\n dataset = DTD(root='/data/damvp_data/cal_all/dtd', split=\"test\", transform=transform) # note: use 'val' to find best and then 'test'. when training, use 'val'.\n nb_classes = 47\n elif args.data_set == 'GTSRB_DAM':\n if is_train:\n dataset = GTSRB(root='/data/damvp_data/cal_all', split=\"train\", transform=transform)\n else:\n dataset = GTSRB(root='/data/damvp_data/cal_all', split=\"test\", transform=transform)\n nb_classes = 43\n elif args.data_set == 'FOOD_DAM':\n if is_train:\n dataset = Food101(root='/data/data', split=\"train\", transform=transform)\n else:\n dataset = Food101(root='/data/data', split=\"test\", transform=transform)\n nb_classes = 101\n elif args.data_set == 'CIFAR10_DAM':\n if is_train:\n dataset = CIFAR10(root='/data/damvp_data/cal_all', split=\"train\", transform=transform)\n else:\n dataset = CIFAR10(root='/data/damvp_data/cal_all', split=\"val\", transform=transform)\n nb_classes = 10\n elif args.data_set == 'CIFAR100_DAM':\n if is_train:\n dataset = CIFAR100(root='/data/damvp_data/cal_all', split=\"train\", transform=transform)\n else:\n dataset = CIFAR100(root='/data/damvp_data/cal_all', split=\"test\", transform=transform)\n nb_classes = 100\n elif args.data_set == 'SVHN_DAM':\n if is_train:\n dataset = SVHN(root='/data/damvp_data/cal_all/svhn', split=\"train\", transform=transform)\n else:\n dataset = SVHN(root='/data/damvp_data/cal_all/svhn', split=\"test\", transform=transform)\n nb_classes = 10\n else:\n raise NotImplementedError()\n assert nb_classes == args.nb_classes\n print(\"Number of the class = %d\" % args.nb_classes)\n\n return dataset, nb_classes" }, { "identifier": "build_beit_pretraining_dataset", "path": "datasets.py", "snippet": "def build_beit_pretraining_dataset(args):\n transform = DataAugmentationForBEiT(args)\n print(\"Data Aug = %s\" % str(transform))\n return ImageFolder(args.data_path, transform=transform)" }, { "identifier": "build_beit_pretraining_dataset_val", "path": "datasets.py", "snippet": "def build_beit_pretraining_dataset_val(args):\n transform = DataAugmentationForBEiT_val(args)\n return ImageFolder('/data/fgvc_deal/cub/test', transform=transform)" }, { "identifier": "train_one_epoch", "path": "engine_for_train.py", "snippet": "def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,\n data_loader: Iterable, optimizer: torch.optim.Optimizer,\n device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,\n model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None,\n start_steps=None, lr_schedule_values=None, wd_schedule_values=None,\n num_training_steps_per_epoch=None, update_freq=None):\n model.train(True)\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n print_freq = 10\n\n if loss_scaler is None:\n model.zero_grad()\n model.micro_steps = 0\n else:\n optimizer.zero_grad()\n\n for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\n step = data_iter_step // update_freq\n if step >= num_training_steps_per_epoch:\n continue\n it = start_steps + step # global training iteration\n # Update LR & WD for the first acc\n if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0:\n for i, param_group in enumerate(optimizer.param_groups):\n if lr_schedule_values is not None:\n param_group[\"lr\"] = lr_schedule_values[it] * param_group[\"lr_scale\"]\n if wd_schedule_values is not None and param_group[\"weight_decay\"] > 0:\n param_group[\"weight_decay\"] = wd_schedule_values[it]\n # print(samples)\n samples = samples.to(device, non_blocking=True)\n # images = images.to(device, non_blocking=True)\n targets = targets.to(device, non_blocking=True)\n\n if mixup_fn is not None:\n samples, targets = mixup_fn(samples, targets)\n\n if loss_scaler is None:\n samples = samples.half()\n loss, output = train_class_batch(\n model, samples, targets, criterion)# criterion_2, device\n else:\n with torch.cuda.amp.autocast():\n loss, output = train_class_batch(\n model, samples, targets, criterion)# criterion_2\n\n loss_value = loss.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n sys.exit(1)\n\n if loss_scaler is None:\n loss /= update_freq\n model.backward(loss)\n model.step()\n\n if (data_iter_step + 1) % update_freq == 0:\n # model.zero_grad()\n # Deepspeed will call step() & model.zero_grad() automatic\n if model_ema is not None:\n model_ema.update(model)\n grad_norm = None\n loss_scale_value = get_loss_scale_for_deepspeed(model)\n else:\n # this attribute is added by timm on one optimizer (adahessian)\n is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order\n loss /= update_freq\n grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,\n parameters=model.parameters(), create_graph=is_second_order,\n update_grad=(data_iter_step + 1) % update_freq == 0)\n if (data_iter_step + 1) % update_freq == 0:\n optimizer.zero_grad()\n if model_ema is not None:\n model_ema.update(model)\n loss_scale_value = loss_scaler.state_dict()[\"scale\"]\n\n torch.cuda.synchronize()\n\n if mixup_fn is None:\n class_acc = (output.max(-1)[-1] == targets).float().mean()\n else:\n class_acc = None\n metric_logger.update(loss=loss_value)\n metric_logger.update(class_acc=class_acc)\n metric_logger.update(loss_scale=loss_scale_value)\n min_lr = 10.\n max_lr = 0.\n for group in optimizer.param_groups:\n min_lr = min(min_lr, group[\"lr\"])\n max_lr = max(max_lr, group[\"lr\"])\n\n metric_logger.update(lr=max_lr)\n metric_logger.update(min_lr=min_lr)\n weight_decay_value = None\n for group in optimizer.param_groups:\n if group[\"weight_decay\"] > 0:\n weight_decay_value = group[\"weight_decay\"]\n metric_logger.update(weight_decay=weight_decay_value)\n metric_logger.update(grad_norm=grad_norm)\n\n if log_writer is not None:\n log_writer.update(loss=loss_value, head=\"loss\")\n log_writer.update(class_acc=class_acc, head=\"loss\")\n log_writer.update(loss_scale=loss_scale_value, head=\"opt\")\n log_writer.update(lr=max_lr, head=\"opt\")\n log_writer.update(min_lr=min_lr, head=\"opt\")\n log_writer.update(weight_decay=weight_decay_value, head=\"opt\")\n log_writer.update(grad_norm=grad_norm, head=\"opt\")\n\n log_writer.set_step()\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}" }, { "identifier": "evaluate", "path": "engine_for_train.py", "snippet": "@torch.no_grad()\ndef evaluate(data_loader, model, device):\n criterion = torch.nn.CrossEntropyLoss()\n\n metric_logger = utils.MetricLogger(delimiter=\" \")\n header = 'Test:'\n\n # switch to evaluation mode\n model.eval()\n \n for batch in metric_logger.log_every(data_loader, 10, header):\n # samples, images = bs\n images = batch[0]\n target = batch[-1]\n images = images.to(device, non_blocking=True)\n # samples = samples.to(device, non_blocking=True)\n target = target.to(device, non_blocking=True)\n \n # compute output\n \n with torch.cuda.amp.autocast():\n output, prompt = model(images)\n loss = 0.8*criterion(output, target)+0.2*criterion(prompt, target)\n \n # acc1, acc5 = accuracy(output, target, topk=(1, 5))\n acc1 = accuracy(output, target, topk=(1, 5))[0]\n\n batch_size = target.shape[0]\n metric_logger.update(loss=loss.item())\n metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)\n \n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n \"\"\"\n print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'\n .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))\n \"\"\"\n print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f}'\n .format(top1=metric_logger.acc1, losses=metric_logger.loss))\n\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}" }, { "identifier": "NativeScalerWithGradNormCount", "path": "utils.py", "snippet": "class NativeScalerWithGradNormCount:\n state_dict_key = \"amp_scaler\"\n\n def __init__(self):\n self._scaler = torch.cuda.amp.GradScaler()\n\n def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):\n self._scaler.scale(loss).backward(create_graph=create_graph)\n if update_grad:\n if clip_grad is not None:\n assert parameters is not None\n self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place\n norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)\n else:\n self._scaler.unscale_(optimizer)\n norm = get_grad_norm_(parameters)\n self._scaler.step(optimizer)\n self._scaler.update()\n else:\n norm = None\n return norm\n\n def state_dict(self):\n return self._scaler.state_dict()\n\n def load_state_dict(self, state_dict):\n self._scaler.load_state_dict(state_dict)" }, { "identifier": "_build_swin_model", "path": "vpt_main/src/models/build_swin_backbone.py", "snippet": "def _build_swin_model(model_type, crop_size, model_root):\n if model_type == \"swint_imagenet\":\n model = SwinTransformer(\n img_size=crop_size,\n embed_dim=96,\n depths=[2, 2, 6, 2],\n num_heads=[3, 6, 12, 24],\n window_size=7,\n drop_path_rate=0.2,\n num_classes=-1, # setting to a negative value will make head as identity\n )\n embed_dim = 96\n num_layers = 4\n elif model_type == \"swint_imagenet_ssl\":\n model = SwinTransformer(\n img_size=crop_size,\n embed_dim=96,\n depths=[2, 2, 6, 2],\n num_heads=[3, 6, 12, 24],\n window_size=7,\n drop_path_rate=0.2,\n num_classes=-1,\n )\n embed_dim = 96\n num_layers = 4\n\n elif model_type == \"swins_imagenet\":\n model = SwinTransformer(\n img_size=crop_size,\n embed_dim=96,\n depths=[2, 2, 18, 2],\n num_heads=[3, 6, 12, 24],\n window_size=7,\n drop_path_rate=0.3,\n num_classes=-1,\n )\n embed_dim = 96\n num_layers = 4\n elif model_type == \"swinb_imagenet_224\":\n model = SwinTransformer(\n img_size=crop_size,\n embed_dim=128,\n depths=[2, 2, 18, 2],\n num_heads=[4, 8, 16, 32],\n window_size=7,\n drop_path_rate=0.5,\n num_classes=-1,\n )\n embed_dim = 128\n num_layers = 4\n elif model_type == \"swinb_imagenet_384\":\n model = SwinTransformer(\n img_size=384,\n embed_dim=128,\n depths=[2, 2, 18, 2],\n num_heads=[4, 8, 16, 32],\n window_size=12,\n drop_path_rate=0.5,\n num_classes=-1,\n )\n embed_dim = 128\n num_layers = 4\n\n elif model_type == \"swinb_imagenet22k_224\":\n model = SwinTransformer(\n img_size=crop_size,\n embed_dim=128,\n depths=[2, 2, 18, 2],\n num_heads=[4, 8, 16, 32],\n window_size=7,\n drop_path_rate=0.2, # try to from 0.5 -> 0, 0.1 is best on cifar.\n num_classes=-1,\n )\n embed_dim = 128\n num_layers = 4\n elif model_type == \"swinb_imagenet22k_384\":\n model = SwinTransformer(\n img_size=384,\n embed_dim=128,\n depths=[2, 2, 18, 2],\n num_heads=[4, 8, 16, 32],\n window_size=12,\n drop_path_rate=0.5,\n num_classes=-1,\n )\n embed_dim = 128\n num_layers = 4\n elif model_type == \"swinl_imagenet22k_224\":\n model = SwinTransformer(\n img_size=crop_size,\n embed_dim=192,\n depths=[2, 2, 18, 2],\n num_heads=[6, 12, 24, 48],\n window_size=7,\n drop_path_rate=0.5,\n num_classes=-1,\n )\n embed_dim = 192\n num_layers = 4\n\n feat_dim = int(embed_dim * 2 ** (num_layers - 1))\n # load checkpoint\n model_w = os.path.join(model_root, MODEL_ZOO[model_type])\n checkpoint = torch.load(model_w, map_location='cpu')\n state_dict = checkpoint['model']\n\n \"\"\"\n if crop_size == 448:\n for k in list(state_dict.keys()):\n if \"attn_mask\" not in k:\n # remove prefix\n state_dict[k] = state_dict[k]\n # delete renamed or unused k\n else:\n del state_dict[k]\n\n # rename some keys for ssl models\n if model_type.endswith(\"ssl\"):\n # rename moco pre-trained keys\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('encoder.'):\n # remove prefix\n state_dict[k[len(\"encoder.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n \"\"\"\n\n model.load_state_dict(state_dict, strict=False)\n # load cross attention\n for name, param in model.named_parameters():\n name_list = name.split('.')\n if name_list[0]=='layers':\n if name_list[2]=='cross_attn':\n if name_list[4]=='attention_norm':\n load_name = name_list[0]+'.'+name_list[1]+'.blocks.'+name_list[3]+'.norm1.'+name_list[5]\n param.requires_grad = False\n param.copy_(state_dict[load_name])\n elif name_list[4]=='attn':\n if name_list[5]=='qkv':\n load_name = name_list[0]+'.'+name_list[1]+'.blocks.'+name_list[3]+'.attn.qkv.'+name_list[6]\n param.requires_grad = False\n param.copy_(state_dict[load_name])\n elif name_list[5]=='to_out':\n load_name = name_list[0]+'.'+name_list[1]+'.blocks.'+name_list[3]+'.attn.proj.'+name_list[6]\n param.requires_grad = False\n param.copy_(state_dict[load_name])\n\n return model, feat_dim" } ]
import argparse import datetime import numpy as np import time import torch import torch.nn as nn import torch.backends.cudnn as cudnn import json import os import utils import random import deepspeed from pathlib import Path from time import sleep from timm.data.mixup import Mixup from timm.models import create_model from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from timm.utils import ModelEma from optim_factory import create_optimizer, get_parameter_groups, LayerDecayValueAssigner from datasets import build_dataset from datasets import build_beit_pretraining_dataset, build_beit_pretraining_dataset_val from engine_for_train import train_one_epoch, evaluate # engine for vit from utils import NativeScalerWithGradNormCount as NativeScaler from scipy import interpolate from timm.models.layers import trunc_normal_ from functools import partial from vpt_main.src.models.build_swin_backbone import _build_swin_model # choose model from deepspeed import DeepSpeedConfig
11,430
data_loader_val = None mixup_fn = None mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: print("Mixup is activated!") mixup_fn = Mixup( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes) model = Dual_model(args) n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) frozen_parameters = sum(p.numel() for p in model.parameters() if not p.requires_grad) total_parameters = sum(p.numel() for p in model.parameters()) print('------------------------------') for name, param in model.named_parameters(): print(name, param.requires_grad) print('------------------------------') model.to(device) model_ema = None if args.model_ema: # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper model_ema = ModelEma( model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume='') print("Using EMA with decay = %.8f" % args.model_ema_decay) model_without_ddp = model # print("Model = %s" % str(model_without_ddp)) total_batch_size = args.batch_size * args.update_freq * utils.get_world_size() num_training_steps_per_epoch = len(dataset_train) // total_batch_size print("LR = %.8f" % args.lr) print("Batch size = %d" % total_batch_size) print("Update frequent = %d" % args.update_freq) print("Number of training examples = %d" % len(dataset_train)) print("Number of training training per epoch = %d" % num_training_steps_per_epoch) assigner = None if assigner is not None: print("Assigned values = %s" % str(assigner.values)) skip_weight_decay_list = None if args.enable_deepspeed: loss_scaler = None optimizer_params = get_parameter_groups( model, args.weight_decay, skip_weight_decay_list, assigner.get_layer_id if assigner is not None else None, assigner.get_scale if assigner is not None else None) model, optimizer, _, _ = ds_init( args=args, model=model, model_parameters=optimizer_params, dist_init_required=not args.distributed, ) print("model.gradient_accumulation_steps() = %d" % model.gradient_accumulation_steps()) assert model.gradient_accumulation_steps() == args.update_freq else: if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) model_without_ddp = model.module optimizer = create_optimizer( args, model_without_ddp, skip_list=skip_weight_decay_list, get_num_layer=assigner.get_layer_id if assigner is not None else None, get_layer_scale=assigner.get_scale if assigner is not None else None) loss_scaler = NativeScaler() print("Use step level LR scheduler!") lr_schedule_values = utils.cosine_scheduler( args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps, ) if args.weight_decay_end is None: args.weight_decay_end = args.weight_decay wd_schedule_values = utils.cosine_scheduler( args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch) print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values))) if mixup_fn is not None: # smoothing is handled with mixup label transform criterion = SoftTargetCrossEntropy() elif args.smoothing > 0.: criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) else: criterion = torch.nn.CrossEntropyLoss() print("criterion = %s" % str(criterion)) utils.auto_load_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema) if args.eval: test_stats = evaluate(data_loader_val, model, device) print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%") exit(0) # show parameters print('number of learnable params:', n_parameters) print('rate of tuned/total(*100): %.2f' % (float(n_parameters)/(86743224+n_parameters)*100)+'%')#total_parameters print(f"Start training for {args.epochs} epochs") start_time = time.time() max_accuracy = 0.0 for epoch in range(args.start_epoch, args.epochs): if args.distributed: data_loader_train.sampler.set_epoch(epoch) if log_writer is not None: log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq)
# -------------------------------------------------------- # SA2VP: Spatially Aligned-and-Adapted Visual Prompt code # reference: # BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) # Github source: https://github.com/microsoft/unilm/tree/master/beit # Based on timm # https://github.com/rwightman/pytorch-image-models/tree/master/timm # --------------------------------------------------------' #os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID' #os.environ['CUDA_VISIBLE_DEVICES']='0' class Dual_model(nn.Module): def __init__(self, args): super(Dual_model, self).__init__() self.vit_base, feat_dim = _build_swin_model('swinb_imagenet22k_224', 224, './backbone_ckpt') # where to save pre-trained model ./backbone_ckpt for k, p in self.vit_base.named_parameters(): name_list = k.split('.') print(name_list) if name_list[1] == 'deep_ppt' or name_list[1] == 'proj_ppt': p.requires_grad = True elif name_list[1] == '2': if name_list[2] == 'cross_attn': if name_list[4] == 'ffn' or name_list[4] == 'ffn_norm': p.requires_grad = True else: p.requires_grad = False else: p.requires_grad = False else: p.requires_grad = False self.class_head = nn.Linear(1024, args.nb_classes, bias=True) trunc_normal_(self.class_head.weight, std=0.02) def forward(self, x): x, p = self.vit_base.forward_features(x) # B*768 return self.class_head(x), self.class_head(p) def get_args(): parser = argparse.ArgumentParser('SA2VP script for image classification', add_help=False) parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--epochs', default=30, type=int) parser.add_argument('--update_freq', default=1, type=int) parser.add_argument('--save_ckpt_freq', default=50, type=int) parser.add_argument("--discrete_vae_weight_path", type=str) parser.add_argument("--discrete_vae_type", type=str, default="dall-e") # Model parameters parser.add_argument('--model', default='beit_base_patch16_224', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--rel_pos_bias', action='store_true') parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias') parser.set_defaults(rel_pos_bias=False) parser.add_argument('--abs_pos_emb', action='store_true') parser.set_defaults(abs_pos_emb=True) parser.add_argument('--layer_scale_init_value', default=0.1, type=float, help="0.1 for base, 1e-5 for large. set 0 to disable layer scale") parser.add_argument('--input_size', default=224, type=int, help='images input size') parser.add_argument('--second_input_size', default=112, type=int, help='images input size for discrete vae') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT', help='Attention dropout rate (default: 0.)') parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False) parser.add_argument('--model_ema', action='store_true', default=False) parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='') parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='') # Optimizer parameters parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the weight decay. We use a cosine schedule for WD and using a larger decay by the end of training improves performance for ViTs.""") parser.add_argument('--lr', type=float, default=5e-4, metavar='LR', help='learning rate (default: 5e-4)') parser.add_argument('--layer_decay', type=float, default=0.9) parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', help='num of steps to warmup LR, will overload warmup_epochs if set > 0') # Augmentation parameters parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'), parser.add_argument('--smoothing', type=float, default=0, help='Label smoothing (default: 0)') parser.add_argument('--train_interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') parser.add_argument('--second_interpolation', type=str, default='lanczos', help='Interpolation for discrete vae (random, bilinear, bicubic default: "lanczos")') # Evaluation parameters parser.add_argument('--crop_pct', type=float, default=None) # * Random Erase params parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') # * Mixup params parser.add_argument('--mixup', type=float, default=0, help='mixup alpha, mixup enabled if > 0.') parser.add_argument('--cutmix', type=float, default=0, help='cutmix alpha, cutmix enabled if > 0.') parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup_prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup_switch_prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup_mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') # * Finetuning params parser.add_argument('--finetune', default='', help='finetune from checkpoint') parser.add_argument('--model_key', default='model|module', type=str) parser.add_argument('--model_prefix', default='', type=str) parser.add_argument('--init_scale', default=0.001, type=float) parser.add_argument('--use_mean_pooling', action='store_true') parser.set_defaults(use_mean_pooling=True) parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling') parser.add_argument('--disable_weight_decay_on_rel_pos_bias', action='store_true', default=False) # Dataset parameters parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path') parser.add_argument('--my_mode', default='train_val', type=str, help='my mode to train or test') parser.add_argument('--eval_data_path', default=None, type=str, help='dataset path for evaluation') parser.add_argument('--nb_classes', default=0, type=int, help='number of the classification types') parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true') parser.add_argument('--data_set', default='CUB', choices=['CIFAR', 'IMNET', 'image_folder', 'CUB', 'DOG', 'FLOWER', 'CAR', 'BIRD', 'CAL101', 'DMLAB','EUROSAT','PATCH_CAMELYON','CLEVR_COUNT','CIFAR100','FOOD101','SVHN','DTD','FLOWER_S','PET','SVHN_S','SUN','Resisc45','Retinopathy','CLEVR_DISTANCE','KITTI_DISTANCE','DS_LOC','DS_ORI','SN_AZI','SN_ELE', 'DTD_DAM', 'GTSRB_DAM', 'FOOD_DAM', 'CIFAR10_DAM', 'CIFAR100_DAM', 'SVHN_DAM'], type=str, help='ImageNet dataset path') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--log_dir', default=None, help='path where to tensorboard log') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--auto_resume', action='store_true') parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') parser.set_defaults(auto_resume=True) parser.add_argument('--save_ckpt', action='store_true') parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt') parser.set_defaults(save_ckpt=True) parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true', help='Perform evaluation only') parser.add_argument('--dist_eval', action='store_true', default=False, help='Enabling distributed evaluation') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') parser.set_defaults(pin_mem=True) # distributed training parameters parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=-1, type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') parser.add_argument('--enable_deepspeed', action='store_true', default=False) known_args, _ = parser.parse_known_args() if known_args.enable_deepspeed: try: parser = deepspeed.add_config_arguments(parser) ds_init = deepspeed.initialize except: print("Please 'pip install deepspeed==0.4.0'") exit(0) else: ds_init = None return parser.parse_args(), ds_init def main(args, ds_init): utils.init_distributed_mode(args) if ds_init is not None: utils.create_ds_config(args) print(args) device = torch.device(args.device) seed = 42 torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True dataset_train, args.nb_classes = build_dataset(is_train=True, args=args) if args.disable_eval_during_finetuning: dataset_val = None else: dataset_val, _ = build_dataset(is_train=False, args=args) print("Calculation of training examples = %d" % len(dataset_train)) print("Calculation of other examples = %d" % len(dataset_val)) if True: # args.distributed: num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_train = torch.utils.data.DistributedSampler( dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True ) print("Sampler_train = %s" % str(sampler_train)) if args.dist_eval: if len(dataset_val) % num_tasks != 0: print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' 'This will slightly alter validation results as extra duplicate entries are added to achieve ' 'equal num of samples per-process.') sampler_val = torch.utils.data.DistributedSampler( dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False) else: sampler_val = torch.utils.data.SequentialSampler(dataset_val) else: sampler_train = torch.utils.data.RandomSampler(dataset_train) sampler_val = torch.utils.data.SequentialSampler(dataset_val) if global_rank == 0 and args.log_dir is not None: os.makedirs(args.log_dir, exist_ok=True) log_writer = utils.TensorboardLogger(log_dir=args.log_dir) else: log_writer = None data_loader_train = torch.utils.data.DataLoader( dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, ) if dataset_val is not None: data_loader_val = torch.utils.data.DataLoader( dataset_val, sampler=sampler_val, batch_size=int(4*args.batch_size), num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False ) else: data_loader_val = None mixup_fn = None mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: print("Mixup is activated!") mixup_fn = Mixup( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes) model = Dual_model(args) n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) frozen_parameters = sum(p.numel() for p in model.parameters() if not p.requires_grad) total_parameters = sum(p.numel() for p in model.parameters()) print('------------------------------') for name, param in model.named_parameters(): print(name, param.requires_grad) print('------------------------------') model.to(device) model_ema = None if args.model_ema: # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper model_ema = ModelEma( model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume='') print("Using EMA with decay = %.8f" % args.model_ema_decay) model_without_ddp = model # print("Model = %s" % str(model_without_ddp)) total_batch_size = args.batch_size * args.update_freq * utils.get_world_size() num_training_steps_per_epoch = len(dataset_train) // total_batch_size print("LR = %.8f" % args.lr) print("Batch size = %d" % total_batch_size) print("Update frequent = %d" % args.update_freq) print("Number of training examples = %d" % len(dataset_train)) print("Number of training training per epoch = %d" % num_training_steps_per_epoch) assigner = None if assigner is not None: print("Assigned values = %s" % str(assigner.values)) skip_weight_decay_list = None if args.enable_deepspeed: loss_scaler = None optimizer_params = get_parameter_groups( model, args.weight_decay, skip_weight_decay_list, assigner.get_layer_id if assigner is not None else None, assigner.get_scale if assigner is not None else None) model, optimizer, _, _ = ds_init( args=args, model=model, model_parameters=optimizer_params, dist_init_required=not args.distributed, ) print("model.gradient_accumulation_steps() = %d" % model.gradient_accumulation_steps()) assert model.gradient_accumulation_steps() == args.update_freq else: if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) model_without_ddp = model.module optimizer = create_optimizer( args, model_without_ddp, skip_list=skip_weight_decay_list, get_num_layer=assigner.get_layer_id if assigner is not None else None, get_layer_scale=assigner.get_scale if assigner is not None else None) loss_scaler = NativeScaler() print("Use step level LR scheduler!") lr_schedule_values = utils.cosine_scheduler( args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps, ) if args.weight_decay_end is None: args.weight_decay_end = args.weight_decay wd_schedule_values = utils.cosine_scheduler( args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch) print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values))) if mixup_fn is not None: # smoothing is handled with mixup label transform criterion = SoftTargetCrossEntropy() elif args.smoothing > 0.: criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) else: criterion = torch.nn.CrossEntropyLoss() print("criterion = %s" % str(criterion)) utils.auto_load_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema) if args.eval: test_stats = evaluate(data_loader_val, model, device) print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%") exit(0) # show parameters print('number of learnable params:', n_parameters) print('rate of tuned/total(*100): %.2f' % (float(n_parameters)/(86743224+n_parameters)*100)+'%')#total_parameters print(f"Start training for {args.epochs} epochs") start_time = time.time() max_accuracy = 0.0 for epoch in range(args.start_epoch, args.epochs): if args.distributed: data_loader_train.sampler.set_epoch(epoch) if log_writer is not None: log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq)
train_stats = train_one_epoch(
6
2023-12-12 13:19:17+00:00
16k
lumina-test/lumina
lumina/e2e_test/test_gbn.py
[ { "identifier": "get_qp_info_list", "path": "lumina/analyzer/main.py", "snippet": "def get_qp_info_list(switch_msg_snapshot):\n \"\"\" Get the list of QP info from the switch message snapshot\n\n Args:\n switch_msg_snapshot (str): The path to the switch message snapshot\n\n Returns:\n list of dict: The list of queue pair (QP) information if successful or None otherwise.\n The list of QP information is in the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n \"\"\"\n try:\n with open(switch_msg_snapshot, 'r') as stream:\n qp_info_list = yaml.safe_load(stream)\n except:\n logging.error(\"Read switch message snapshot %s error.\" % switch_msg_snapshot)\n return None\n\n logging.info(\"Read switch message snapshot %s.\" % switch_msg_snapshot)\n return qp_info_list" }, { "identifier": "Orchestrator", "path": "lumina/orchestrator/main.py", "snippet": "class Orchestrator:\n \"\"\" Class to manage the experiment \"\"\"\n def __init__(self, config_file):\n \"\"\" Constructor for Orchestrator class\n\n Args:\n config_file (str): path to the yaml (config) file.\n The file contains configs for switch, requester, responder, traffic, etc.\n\n Returns:\n N/A\n \"\"\"\n with open(config_file, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n local_workspace = conf['local-workspace']\n result_path = conf['result-path']\n switch_conf = conf['switch']\n requester_conf = conf['requester']\n responder_conf = conf['responder']\n requester_mirror_conf = conf['requester-mirror']\n responder_mirror_conf = conf['responder-mirror']\n traffic_conf = conf['traffic']\n rewrite_udp_dst_port = conf['rewrite-udp-dst-port']\n num_repeats = conf['num-repeats']\n agg_pcap_filename = conf['aggregate-pcap-filename']\n except KeyError as e:\n print(\"Config file %s has a bad yaml format (key error: %s)\" % (config_file, e))\n sys.exit(-1)\n\n switch_conf['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n requester_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n responder_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n\n self.local_workspace = local_workspace\n self.result_path = result_path\n self.traffic_conf = traffic_conf\n self.num_repeats = num_repeats\n self.switch = switch.Switch(switch_conf)\n self.requester = host.RDMAHost(requester_conf)\n self.responder = host.RDMAHost(responder_conf)\n self.requester_mirror = host.MirrorHost(requester_mirror_conf)\n self.responder_mirror = host.MirrorHost(responder_mirror_conf)\n self.aggregate_pcap_filename = agg_pcap_filename\n\n cmd = \"mkdir -p %s\" % self.result_path\n subprocess.call(cmd, shell = True)\n\n def rm_old_files(self):\n \"\"\" Remove result files left by previous experiments \"\"\"\n old_iter_id = 0\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n while os.path.exists(old_iter_result_path) and not os.path.isfile(old_iter_result_path):\n cmd = \"rm -rf %s\" % (old_iter_result_path)\n subprocess.call(cmd, shell=True)\n\n old_iter_id += 1\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n def get_requester_ip_list(self):\n \"\"\" Return the list of requester IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.requester.conf['nic']['ip-list']]\n\n def get_responder_ip_list(self):\n \"\"\" Return the list of responder IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.responder.conf['nic']['ip-list']]\n\n def get_num_repeats(self):\n \"\"\" Return the number of experiment repeats \"\"\"\n return self.num_repeats\n\n def sync_and_compile(self):\n \"\"\" Syncronize and compile the code on all the hosts\n\n Returns:\n bool: True if the code is synced and compiled successfully, False otherwise\n \"\"\"\n logging.info(\"Sync and compile the code\")\n\n ## Sync and compile the switch code\n ret = self.switch.sync_and_compile(self.local_workspace,\n switch.SWITCH_PROG_DIR_NAME,\n switch.SWITCH_PROG_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the switch code\")\n return False\n\n ## Sync and compile the traffic generator code\n rdma_verb = self.traffic_conf['rdma-verb'].strip().lower()\n if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER:\n logging.error(\"Invalid RDMA verb: %s\" % rdma_verb)\n return False\n\n ret = self.requester.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_client_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on requester\")\n return False\n\n ret = self.responder.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_server_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on responder\")\n return False\n\n ret = self.requester.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on requester\")\n return False\n\n ret = self.responder.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on responder\")\n return False\n\n ## Sync and compile the packet capture code\n ret = self.requester_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on requester_mirror\")\n return False\n\n ret = self.responder_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on responder_mirror\")\n return False\n\n return True\n\n def generate_switch_table_config(self):\n \"\"\" Generate the switch configuration, including:\n 1. Forward table\n 2. Mirror table\n 3. ARP table\n 4. Traffic table, including the events to inject\n\n Returns:\n bool: True if the switch configuration is generated successfully, False otherwise\n \"\"\"\n requester_nic_conf = self.requester.conf['nic']\n responder_nic_conf = self.responder.conf['nic']\n requester_mirror_nic_conf = self.requester_mirror.conf['nic']\n responder_mirror_nic_conf = self.responder_mirror.conf['nic']\n\n ## Set up forward table entries\n self.switch.conf['forward-table'] = []\n try:\n for nic_conf, host_type in zip([requester_nic_conf, responder_nic_conf, \\\n requester_mirror_nic_conf, responder_mirror_nic_conf],\n ['requester', 'responder', 'requester_mirror', 'responder_mirror']):\n forward_table_entry = {'dst-mac': nic_conf['mac'],\n 'eg-port': nic_conf['switch-port'],\n 'host': host_type}\n self.switch.conf['forward-table'].append(forward_table_entry)\n except:\n logging.error(\"Failed to set forward table\")\n return False\n\n ## Set up mirror table entries, use ingress_to_egress\n try:\n requester_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': requester_nic_conf['switch-port'],\n 'dst-port': requester_mirror_nic_conf['switch-port']}\n\n responder_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': responder_nic_conf['switch-port'],\n 'dst-port': responder_mirror_nic_conf['switch-port']}\n self.switch.conf['mirror-table'] = [requester_mirror_entry, responder_mirror_entry]\n except:\n logging.error(\"Failed to set mirror table\")\n return False\n\n requester_mac = requester_nic_conf['mac']\n responder_mac = responder_nic_conf['mac']\n requester_ip_list = requester_nic_conf['ip-list']\n responder_ip_list = responder_nic_conf['ip-list']\n ## Set up arp table entries\n arp_entries = []\n try:\n for dst_ip_list, dst_mac in zip([requester_ip_list, responder_ip_list],\n [requester_mac, responder_mac]):\n for dst_ip_subnet in dst_ip_list:\n dst_ip = dst_ip_subnet.split('/')[0]\n arp_entries.append({'dst-ip': dst_ip, 'dst-mac': dst_mac})\n self.switch.conf['arp-table'] = arp_entries\n except:\n logging.error(\"Failed to set ARP table\")\n return False\n\n ## Generate the events of each iteration for switch config\n per_iter_event_list = self.traffic_conf['data-pkt-events']\n msg_size = self.traffic_conf['message-size']\n mtu = self.traffic_conf['mtu']\n num_msgs_per_qp = self.traffic_conf['num-msgs-per-qp']\n num_pkts_per_msg = int(math.ceil(msg_size / mtu))\n self.switch.conf['traffic'] = {}\n self.switch.conf['traffic']['num-msgs-per-qp'] = num_msgs_per_qp\n self.switch.conf['traffic']['num-pkts-per-msg'] = num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'] = []\n\n if per_iter_event_list is None or len(per_iter_event_list) == 0:\n ## No events at all\n return True\n\n for i in range(num_msgs_per_qp):\n for per_iter_event in per_iter_event_list:\n global_event = copy.deepcopy(per_iter_event)\n\n ## This event is applied to all the packets of the message. We need to expand it!\n if str(global_event['psn']).lower() == 'all':\n for psn in range(num_pkts_per_msg):\n global_event['psn'] = psn + i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n else:\n global_event['psn'] += i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n\n return True\n\n def ping_mesh(self):\n \"\"\" Ping all the IP addresses between requester and responder to check the connectivity\n\n Returns:\n bool: True if all the IP addresses can be pinged successfully, False otherwise\n \"\"\"\n for requester_ip_subnet in self.requester.conf['nic']['ip-list']:\n requester_ip = requester_ip_subnet.split('/')[0]\n command = \"ping \" + requester_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.responder.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + requester_ip)\n logging.error(\"[Command return info]: %s %s\" % (', '.join(ret_val), ', '.join(err_info)))\n return False\n\n for responder_ip_subnet in self.responder.conf['nic']['ip-list']:\n responder_ip = responder_ip_subnet.split('/')[0]\n command = \"ping \" + responder_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.requester.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + responder_ip)\n logging.error(\"[Command return info]: %s %s\" % (ret_val, err_info))\n return False\n\n logging.info(\"Successfully pinged all the IP addresses between requester and responder\")\n return True\n\n def generate_switch_config_file(self):\n \"\"\" Generate the switch configuration file and copy it to the switch\n\n Returns:\n bool: True if the switch configuration file is generated and copied successfully, False otherwise\n \"\"\"\n ## Get the mac address for all the hosts\n self.requester.get_mac_address()\n self.responder.get_mac_address()\n self.requester_mirror.get_mac_address()\n self.responder_mirror.get_mac_address()\n\n ## Generate config for Match-Action table in switch\n if self.generate_switch_table_config() == False:\n logging.error(\"Failed to generate switch table configuration\")\n return False\n\n ## Dump the switch configuration into a file, and copy it to the switch\n if self.switch.dump_controller_config(self.local_workspace) == False:\n logging.error(\"Failed to dump switch config\")\n return False\n\n return True\n\n def __is_valid_traffc(self):\n \"\"\" Check if the traffic configuration is valid, including:\n 1. The tx-depth should be 1 or > 1\n 2. If tx-depth > 1, then we can only inject ECN marking events\n\n Returns:\n bool: True if the traffic configuration is valid, False otherwise\n \"\"\"\n try:\n data_pkt_events = self.traffic_conf['data-pkt-events']\n tx_depth = self.traffic_conf['tx-depth']\n\n if tx_depth == 1:\n return True\n elif tx_depth <= 0:\n return False\n\n for event in data_pkt_events:\n if event['type'] != 'ecn':\n logging.error(\"Cannot inject %s event when tx depth = %d\" % (event['type'], tx_depth))\n return False\n except:\n logging.error(\"Failed to parse traffic configuration\")\n return False\n\n return True\n\n def run_experiment(self):\n \"\"\" Run the experiment\n\n Returns:\n bool: True if the experiment is completed successfully, False otherwise\n \"\"\"\n\n ## Check if traffic configuration is valid\n if self.__is_valid_traffc() == False:\n logging.error(\"Invalid traffic configuration\")\n return False\n\n ## Run switch program\n if self.switch.run_switch() == False:\n logging.error(\"Failed to run switch\")\n return False\n\n ## Sleep for 1 second to make sure control plane is listenning (for client message)\n time.sleep(1)\n\n ## Configure the servers\n if self.requester.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA requester\")\n return False\n\n if self.responder.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA responder\")\n return False\n\n if self.requester_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on responder mirror\")\n return False\n\n ## Check the connectivity through pingmesh (try 5 rounds)\n num_tries = 0\n pingmesh_ret = False\n\n while num_tries < 5:\n pingmesh_ret = self.ping_mesh()\n if pingmesh_ret == True:\n break\n num_tries += 1\n time.sleep(1)\n\n if pingmesh_ret == False:\n logging.error(\"Failed to ping all the IP addresses between requester and responder\")\n return False\n\n ## Launch packet capture for both side\n ## Prerequisite: config hugepage and igb_uio if needed\n if self.requester_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on responder mirror\")\n return False\n\n time.sleep(3)\n\n ## Dump the counters before running\n if self.requester.dump_counters(host.REQ_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester before running\")\n return False\n\n if self.responder.dump_counters(host.RSP_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder before running\")\n return False\n\n ## Launch RDMA server first\n run_server_ret = self.responder.run_traffic_gen_server(self.traffic_conf)\n if run_server_ret == False:\n logging.error(\"Failed to run RDMA server\")\n return False\n\n time.sleep(2)\n\n ## Launch RDMA client\n try:\n destination_ip_subnet = self.responder.conf['nic']['ip-list'][0]\n destination_ip = destination_ip_subnet.split('/')[0]\n except:\n logging.error(\"Failed to get destination IP\")\n return False\n\n run_client_ret = self.requester.run_traffic_gen_client(traffic_conf=self.traffic_conf,\n destination_ip=destination_ip,\n controller_ip=self.switch.conf['control-ip'],\n controller_listen_port=self.switch.conf['listen-port'])\n if run_client_ret == False:\n logging.error(\"Failed to run RDMA client\")\n return False\n\n if self.switch.dump_results() == False:\n logging.error(\"Failed to dump results from switch\")\n return False\n\n if self.requester.dump_counters(host.REQ_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester after running\")\n return False\n\n if self.responder.dump_counters(host.RSP_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder after running\")\n return False\n\n logging.info(\"Experiment completed successfully\")\n return True\n\n def clean_up(self):\n \"\"\" Clean up the environment after the experiment\n\n Returns:\n bool: True if the clean up is completed successfully, False otherwise\n \"\"\"\n logging.info(\"Start cleaning up the environment\")\n\n if self.switch.clean_up() == False:\n logging.error(\"Failed to clean up switch\")\n return False\n\n if self.requester.clean_up() == False:\n logging.error(\"Failed to clean up requester\")\n return False\n\n if self.responder.clean_up() == False:\n logging.error(\"Failed to clean up responder\")\n return False\n\n if self.requester_mirror.clean_up() == False:\n logging.error(\"Failed to clean up requester mirror\")\n return False\n\n if self.responder_mirror.clean_up() == False:\n logging.error(\"Failed to clean up responder mirror\")\n return False\n\n return True\n\n def fetch_results(self, iter_id=0):\n \"\"\" Fetch the results of iteration 'iter_id', including:\n 1. Switch table entries and counters\n 2. Packet trace (pcap file)\n 3. Configs and end-to-end results from RDMA hosts\n\n Args:\n iter_id (int, optional): iteration ID, defaults to 0\n\n Returns:\n bool: True if the result collection is completed successfully, False otherwise\n \"\"\"\n ## Make the results dir if it does not exist\n iter_result_path = os.path.join(self.result_path, str(iter_id))\n cmd = \"mkdir -p %s\" % iter_result_path\n try:\n subprocess.call(cmd, shell=True)\n except:\n logging.error(\"Failed to create result directory %s\" % iter_result_path)\n return False\n\n if self.switch.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from switch\")\n return False\n\n if self.requester_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester mirror\")\n return False\n\n if self.responder_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder mirror\")\n return False\n\n if self.requester.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester\")\n return False\n\n if self.responder.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder\")\n return False\n\n logging.info(\"Finished fetching results for iteration %d\" % iter_id)\n return True\n\n def merge_traces(self, iter_id=0):\n iter_pcap_dir_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR)\n src_pcap_file_list = [os.path.join(iter_pcap_dir_path,\n self.requester_mirror.conf['pkt-dump-conf']['dump-filename']),\n os.path.join(iter_pcap_dir_path,\n self.responder_mirror.conf['pkt-dump-conf']['dump-filename'])]\n target_pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = pcap_process.merge_pcaps(src_pcap_file_list)\n if packet_list is None:\n logging.error(\"Failed to merge pcap files for iteration %d\" % iter_id)\n return False\n\n if pcap_process.dump_pkts_to_pcap(target_pcap_path, packet_list) == False:\n logging.error(\"Failed to dump packets to pcap file %s\" % target_pcap_path)\n return False\n\n logging.info(\"Successfully merged pcap files for iteration %d\" % iter_id)\n\n def check_integrity(self, iter_id=0):\n ## Check if the collected packet trace passes integrity check\n pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = get_packet_list(pcap_path)\n packet_list.sort(key=lambda x:x.get_switch_seqnum())\n logging.info(\"Packet trace sorted by switch sequence number.\")\n\n switch_state_snapshot = os.path.join(self.result_path,\n str(iter_id),\n switch.SWITCH_RESULT_DIR,\n switch.SWITCH_STATE_SNAPSHOT)\n port_map = {'requester': self.requester.conf['nic']['switch-port'],\n 'responder': self.responder.conf['nic']['switch-port'],\n 'requester-mirror': self.requester_mirror.conf['nic']['switch-port'],\n 'responder-mirror': self.responder_mirror.conf['nic']['switch-port']}\n switch_counter = SwitchCounter(switch_state_snapshot, port_map)\n\n integrity_checker = IntegrityCheck(packet_list=packet_list,\n switch_counter=switch_counter,\n requester_ip_list=self.get_requester_ip_list(),\n responder_ip_list=self.get_responder_ip_list())\n\n if integrity_checker.check() == True:\n logging.info(\"Integrity check passed\")\n return True\n else:\n logging.info(\"Integrity check failed\")\n return False" }, { "identifier": "SwitchCounter", "path": "lumina/analyzer/counter/switch_counter.py", "snippet": "class SwitchCounter:\n \"\"\" Class to parse switch counter files\n\n Attributes:\n _counter (dict of dict): the switch counters with the following format:\n {'requester': {'ingress': counter_value, 'egress': counter_value},\n 'responder': {'ingress': counter_value, 'egress': counter_value},\n 'requester-mirror': {'ingress': counter_value, 'egress': counter_value},\n 'responder-mirror': {'ingress': counter_value, 'egress': counter_value}}\n \"\"\"\n def __init__(self, snapshot_filename, port_map):\n \"\"\" Constructor\n\n Args:\n snapshot_filename (str): the file where switch dumps its counters\n port_map (dict): the mapping between port name and port number\n\n Returns:\n N/A\n \"\"\"\n with open(snapshot_filename, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n ingress_counters = conf['counter']['ingress']\n egress_counters = conf['counter']['egress']\n except:\n print(\"Bad yaml format in %s\" % snapshot_filename)\n sys.exit(-1)\n\n requester_port = port_map['requester']\n responder_port = port_map['responder']\n requester_mirror_port = port_map['requester-mirror']\n responder_mirror_port = port_map['responder-mirror']\n\n self._counter = {'requester' : {'ingress':0, 'egress': 0},\n 'responder' : {'ingress':0, 'egress': 0},\n 'requester-mirror' : {'ingress':0, 'egress': 0},\n 'responder-mirror' : {'ingress':0, 'egress': 0}}\n try:\n self._counter['requester']['ingress'] = ingress_counters[requester_port]\n self._counter['responder']['ingress'] = ingress_counters[responder_port]\n self._counter['requester-mirror']['ingress'] = ingress_counters[requester_mirror_port]\n self._counter['responder-mirror']['ingress'] = ingress_counters[responder_mirror_port]\n\n self._counter['requester']['egress'] = egress_counters[requester_port]\n self._counter['responder']['egress'] = egress_counters[responder_port]\n self._counter['requester-mirror']['egress'] = egress_counters[requester_mirror_port]\n self._counter['responder-mirror']['egress'] = egress_counters[responder_mirror_port]\n\n except:\n print(\"Port number not exist in the switch snapshot\")\n sys.exit(-1)\n\n def get_counter(self):\n \"\"\" Return the switch counters (dict of dict) \"\"\"\n return self._counter" }, { "identifier": "MLNXHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class MLNXHostCounter(HostCounter):\n \"\"\" Class to parse MLNX host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_port_rcv_packets(self):\n \"\"\" Return the number of received packets \"\"\"\n return self._counter['port-counters']['port_rcv_packets']\n\n def get_port_xmit_packets(self):\n \"\"\" Return the number of transmitted packets \"\"\"\n return self._counter['port-counters']['port_xmit_packets']\n\n def get_num_packet_seq_err(self):\n \"\"\" Return the number of received NAK sequence error packets \"\"\"\n return self._counter['hw-counters']['packet_seq_err']\n\n def get_num_out_of_sequence(self):\n \"\"\" Return the number of out-of-sequence packets received \"\"\"\n return self._counter['hw-counters']['out_of_sequence']\n\n def get_num_dup_requests(self):\n \"\"\" Return the number of duplicate requests \"\"\"\n return self._counter['hw-counters']['duplicate_request']\n\n def implied_nak_seq_err(self):\n \"\"\" Return the number of READ requests implying sequence errors \"\"\"\n return self._counter['hw-counters']['implied_nak_seq_err']\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['np_cnp_sent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['np_ecn_marked_roce_packets']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['rp_cnp_handled']\n\n def get_num_icrc_errors(self):\n \"\"\" Return the number of RoCE packets with ICRC errors received \"\"\"\n return self._counter['hw-counters']['rx_icrc_encapsulated']\n\n def get_num_timeout_err(self):\n \"\"\" Return the number of times QP's ack timer expired for RC, XRC, DCT QPs at the sender side \"\"\"\n return self._counter['hw-counters']['local_ack_timeout_err']\n\n def get_num_discards_dict_tx(self):\n \"\"\" Return the number of TX discarded packets (dict)\"\"\"\n discards_dict_tx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'tx' in x:\n discards_dict_tx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_tx\n\n def get_num_discards_dict_rx(self):\n \"\"\" Return the number of RX discarded packets (dict) \"\"\"\n discards_dict_rx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'rx' in x:\n discards_dict_rx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_rx" }, { "identifier": "IntelHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class IntelHostCounter(HostCounter):\n \"\"\" Class to parse Intel host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['cnpSent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['RxECNMrkd']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['cnpHandled']\n\n def get_num_discards_dict(self):\n \"\"\" Return the number of discarded packets (dict) \"\"\"\n discards_dict= {}\n for x in self._counter['hw-counters'].keys():\n if 'discard' in x:\n discards_dict[x] = self._counter['hw-counters'][x]\n return discards_dict" }, { "identifier": "get_packet_list", "path": "lumina/analyzer/pcap_processor/pcap_process.py", "snippet": "def get_packet_list(pcap_file):\n \"\"\" Read a pcap file and return a list of packets\n\n Args:\n pcap_file (str): The pcap file to read\n\n Returns:\n list: The list of packets if successful, empty list otherwise\n\n Raises:\n IOError: If the pcap file cannot be opened for reading\n Exception: If the pcap file cannot be read\n \"\"\"\n packet_list = []\n try:\n with open(pcap_file, 'rb') as file_read:\n pcap = dpkt.pcap.Reader(file_read)\n for packet in pcap:\n packet_list.append(roce_packet.RRoCEPacket(packet))\n except IOError:\n logging.error(\"Unable to open pcap file %s. Please check your filename.\" % pcap_file)\n raise IOError\n\n except:\n logging.error(\"Failed to read pcap file %s.\" % pcap_file)\n raise Exception\n\n logging.info(\"Successfully read %d packets from %s.\" % (len(packet_list), pcap_file))\n return packet_list" }, { "identifier": "LatencyMeasure", "path": "lumina/analyzer/measurer/latency_measure.py", "snippet": "class LatencyMeasure:\n \"\"\" Class to measure the latency between packets for some events,\n e.g., NACK latency, Retransmission latency, CNP latency\n\n Attributes:\n packet_list (list of RRoCEPacket objects): list of packets\n qp_info_list (list of dict): list of QP info with the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n is_read (bool): if the QPs use RDMA read verb\n \"\"\"\n def __init__(self, packet_list, qp_info_list, is_read=False):\n \"\"\" Constructor\n\n Args:\n packet_list (list of RRoCEPacket objects): list of packets\n qp_info_list (list of dict): list of QP info with the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n is_read (bool): if the QPs use RDMA read verb (default: False)\n\n Returns:\n N/A\n \"\"\"\n self.packet_list = packet_list\n self.qp_info_list = qp_info_list\n self.is_read = is_read\n\n def get_peer_qp_info(self, dest_qpn, dest_ip):\n \"\"\" Get the info of the peer QP (qpn, ip) of a given qp (qpn, ip)\n\n Args:\n dest_qpn (int): destination QP number\n dest_ip (str): destination IP\n\n Returns:\n int: peer QP number (None if not found)\n str: peer IP (None if not found)\n \"\"\"\n for qp_info in self.qp_info_list:\n if qp_info['qpn_snd'] == dest_qpn and qp_info['ip_snd'] == dest_ip:\n return qp_info['qpn_rcv'], qp_info['ip_rcv']\n elif qp_info['qpn_rcv'] == dest_qpn and qp_info['ip_rcv'] == dest_ip:\n return qp_info['qpn_snd'], qp_info['ip_snd']\n\n return None, None\n\n def get_bit_error_pkts(self, relative_dest_qpn=None):\n \"\"\" Get the packets marked with bit error flag\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with bit error flag\n \"\"\"\n error_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_bit_error() == False:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n error_pkt_list.append(packet)\n\n return error_pkt_list\n\n def get_dropped_pkts(self, relative_dest_qpn=None):\n \"\"\" Get the packets marked with drop flag\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with drop flag\n \"\"\"\n dropped_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_dropped() == False:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n dropped_pkt_list.append(packet)\n\n return dropped_pkt_list\n\n def get_ecn_pkts(self):\n \"\"\" Get the packets marked with ECN\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with ECN\n \"\"\"\n ecn_pkt_list = []\n\n for packet in self.packet_list:\n if packet.is_ecn():\n ecn_pkt_list.append(packet)\n\n return ecn_pkt_list\n\n def get_cnp_pkts(self):\n \"\"\" Get the congestion notification packets\n\n Returns:\n list of RRoCEPacket objects: the list of congestion notification packets\n \"\"\"\n cnp_pkt_list = []\n\n for packet in self.packet_list:\n if packet.is_cnp():\n cnp_pkt_list.append(packet)\n\n return cnp_pkt_list\n\n def get_undelivered_pkts(self, relative_dest_qpn = None):\n \"\"\" Get the undelivered packets (dropped or marked with bit error)\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of undelivered packets\n \"\"\"\n undelivered_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_delivered() == True:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n undelivered_pkt_list.append(packet)\n\n return undelivered_pkt_list\n\n def get_nack(self, undelivered_pkt):\n \"\"\" Given an undelivered packet, return the NACK packet that triggers its retransmission.\n If there's no NACK packet found for the undelivered packet, return None.\n Note that for RDMA READ, NACK is essentially a READ request packet that triggers retransmission\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the NACK packet that triggers the retransmission of the undelivered packet\n (None if not found)\n \"\"\"\n undelivered_pkt_dest_qpn = undelivered_pkt.get_roce_dest_qp()\n undelivered_pkt_dst_ip = undelivered_pkt.get_dst_ip()\n undelivered_pkt_psn = undelivered_pkt.get_roce_pkt_seq()\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n matched_dest_qpn, matched_dst_ip = self.get_peer_qp_info(undelivered_pkt_dest_qpn, undelivered_pkt_dst_ip)\n\n if matched_dest_qpn == None or matched_dst_ip == None:\n logging.error(\"QP info of the undelivered packet not found in qp_info_list dumped by switch\")\n return None\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return None\n\n if ((self.is_read and packet.is_roce_read_req()) or packet.is_roce_nack()) and \\\n packet.get_dst_ip() == matched_dst_ip and \\\n packet.get_roce_dest_qp() == matched_dest_qpn and \\\n packet.get_roce_pkt_seq() == undelivered_pkt_psn and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n ## We return the first packet appears after the undelivered packet and matches the undelivered packet\n return packet\n\n return None\n\n def get_qp_first_nack_before_retrans(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the first NACK packet on its QP between it and its retransmission.\n If there's no NACK packet found before the retransmission, return None.\n Note that for RDMA READ, NACK is essentially a READ request packet\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the first NACK packet on the QP between the undelivered packet and its retransmission\n (None if not found)\n \"\"\"\n undelivered_pkt_dest_qpn = undelivered_pkt.get_roce_dest_qp()\n undelivered_pkt_dst_ip = undelivered_pkt.get_dst_ip()\n undelivered_pkt_psn = undelivered_pkt.get_roce_pkt_seq()\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n matched_dest_qpn, matched_dst_ip = self.get_peer_qp_info(undelivered_pkt_dest_qpn, undelivered_pkt_dst_ip)\n\n if matched_dest_qpn == None or matched_dst_ip == None:\n logging.error(\"QP info of the undelivered packet not found in qp_info_list dumped by switch\")\n return None\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return None\n\n if ((self.is_read and packet.is_roce_read_req()) or packet.is_roce_nack()) and \\\n packet.get_dst_ip() == matched_dst_ip and \\\n packet.get_roce_dest_qp() == matched_dest_qpn and \\\n packet.get_roce_pkt_seq() <= undelivered_pkt_psn and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return packet\n\n return None\n\n def get_qp_next_delivered_pkt(self, current_pkt):\n \"\"\" For a packet, return the next delivered packet on the same QP.\n\n Args:\n current_pkt (RRoCEPacket object): the current packet\n\n Returns:\n RRoCEPacket object: the next delivered packet on the same QP (None if not found)\n \"\"\"\n switch_seqnum = current_pkt.get_switch_seqnum()\n\n for packet in self.packet_list:\n if self.is_same_qp_roce_data_pkt(packet, current_pkt) and \\\n packet.get_switch_seqnum() > switch_seqnum and \\\n packet.is_delivered():\n return packet\n\n return None\n\n def get_retransmit_pkt(self, undelivered_pkt):\n \"\"\" Given an undelivered packet, return its retransmission packet.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the retransmission packet of the undelivered packet (None if not found)\n \"\"\"\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n ## We return the first packet appears after the undelivered packet and matches the undelivered packet\n return packet\n\n return None\n\n def get_latency_between_pkts(self, packet_alpha, packet_beta):\n \"\"\" Return the time of packet_beta - time of packet_alpha in seconds\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n float: the time difference between two packets in seconds\n \"\"\"\n return packet_beta.get_switch_timestamp() - packet_alpha.get_switch_timestamp()\n\n def is_same_roce_data_pkt(self, packet_alpha, packet_beta):\n \"\"\" Return if two packets are the same RoCE data packet (same src ip, dst ip, dest qp, and psn)\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n bool: True if two packets are the same RoCE data packet, False otherwise\n \"\"\"\n return packet_alpha.get_src_ip() == packet_beta.get_src_ip() and \\\n packet_alpha.get_dst_ip() == packet_beta.get_dst_ip() and \\\n packet_alpha.get_roce_dest_qp() == packet_beta.get_roce_dest_qp() and \\\n packet_alpha.get_roce_pkt_seq() == packet_beta.get_roce_pkt_seq()\n\n def is_same_qp_roce_data_pkt(self, packet_alpha, packet_beta):\n \"\"\" Return if two packets are RoCE data packets on the same QP (same src ip, dst ip, and dest qp)\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n bool: True if two packets are RoCE data packets on the same QP, False otherwise\n \"\"\"\n return packet_alpha.get_src_ip() == packet_beta.get_src_ip() and \\\n packet_alpha.get_dst_ip() == packet_beta.get_dst_ip() and \\\n packet_alpha.get_roce_dest_qp() == packet_beta.get_roce_dest_qp()\n\n def get_qp_next_delivered_pkt_latency(self, pkt):\n \"\"\" Get the latency between 'pkt' and next 'delivered' packet on the same QP\n\n Args:\n pkt (RRoCEPacket object): the packet\n\n Returns:\n float: the latency between 'pkt' and next 'delivered' packet on the same QP\n (None if not found)\n \"\"\"\n\n next_pkt = self.get_qp_next_delivered_pkt(pkt)\n if next_pkt is None:\n return None\n\n return self.get_latency_between_pkts(pkt, next_pkt)\n\n def get_nack_gen_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the NACK generation latency, i.e., the duration from the detection of\n the undelivered packet to the generation of the NACK packet that triggers its retransmission.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the NACK generation latency for the undelivered packet (None if not found)\n \"\"\"\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n return None\n\n # NACK should be triggered by the next delivered packet on the same QP\n next_delivered_pkt = self.get_qp_next_delivered_pkt(undelivered_pkt)\n if self.is_same_roce_data_pkt(next_delivered_pkt, undelivered_pkt):\n # We should never reach here\n return None\n\n nack_gen_latency = self.get_latency_between_pkts(next_delivered_pkt, nack_pkt)\n return nack_gen_latency\n\n def get_nack_resp_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the NACK response latency, i.e., the duration from the generation of\n the NACK packet to the retransmission of this undelivered packet.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the NACK response latency for the undelivered packet (None if not found)\n \"\"\"\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n return None\n\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n return None\n\n nack_resp_latency = self.get_latency_between_pkts(nack_pkt, retransmit_pkt)\n return nack_resp_latency\n\n def get_retransmit_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the retransmission latency, i.e., the duration from the packet\n to its retransmission.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the retransmission latency for the undelivered packet (None if not found)\n \"\"\"\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n return None\n\n retransmit_latency = self.get_latency_between_pkts(undelivered_pkt, retransmit_pkt)\n return retransmit_latency\n\n def get_nack_gen_latency_list(self, relative_dest_qpn=None):\n \"\"\" Return a list of NACK generation latency for all undelivered packets with relative_dest_qpn\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of float: a list of NACK generation latency for all undelivered packets with relative_dest_qpn\n \"\"\"\n undelivered_pkts = self.get_undelivered_pkts(relative_dest_qpn)\n nack_latency_list = []\n\n for undelivered_pkt in undelivered_pkts:\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n nack_latency_list.append(None)\n else:\n nack_latency = self.get_latency_between_pkts(undelivered_pkt, nack_pkt)\n nack_latency_list.append(nack_latency)\n\n return nack_latency_list\n\n def get_retransmit_latency_list(self, relative_dest_qpn):\n \"\"\" Return a list of retransmission latency for all undelivered packets with relative_dest_qpn\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of float: a list of retransmission latency for all undelivered packets with relative_dest_qpn\n \"\"\"\n undelivered_pkts = self.get_undelivered_pkts(relative_dest_qpn)\n retransmit_latency_list = []\n\n for undelivered_pkt in undelivered_pkts:\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n retransmit_latency_list.append(None)\n else:\n retransmit_latency = self.get_latency_between_pkts(undelivered_pkt, retransmit_pkt)\n retransmit_latency_list.append(retransmit_latency)\n\n return retransmit_latency_list" }, { "identifier": "config_stream_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_stream_handler(logger):\n \"\"\" Configure stream handler\n\n Args:\n logger (logging.Logger): Logger object\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n logger.addHandler(console)" }, { "identifier": "config_file_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_file_handler(logger, log_file, no_format=False):\n \"\"\" Configure file handler\n\n Args:\n logger (logging.Logger): Logger object\n log_file (str): Log file path\n no_format (bool): If True, do not format log messages (default: False)\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(log_file, mode=\"w\")\n if no_format == False:\n file_handler.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n file_handler.setLevel(logging.INFO)\n logger.addHandler(file_handler)" }, { "identifier": "TRIGGER_OOS", "path": "lumina/analyzer/packet_parser/roce_packet.py", "snippet": "TRIGGER_OOS = 1" }, { "identifier": "TRIGGER_TIMEOUT", "path": "lumina/analyzer/packet_parser/roce_packet.py", "snippet": "TRIGGER_TIMEOUT = 2" } ]
import argparse, os, math, glob, logging, time import lumina.analyzer.checker.integrity_check as integrity_check import lumina.analyzer.checker.host_check as host_check import lumina.analyzer.checker.gbn_check as gbn_check import lumina.analyzer.checker.read_gbn_check as read_gbn_check import lumina.orchestrator.host as host import lumina.orchestrator.switch as switch from lumina.analyzer.main import get_qp_info_list from lumina.orchestrator.main import Orchestrator from lumina.analyzer.counter.switch_counter import SwitchCounter from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter from lumina.analyzer.pcap_processor.pcap_process import get_packet_list from lumina.analyzer.measurer.latency_measure import LatencyMeasure from lumina.utils.config_loggers import config_stream_handler, config_file_handler from lumina.analyzer.packet_parser.roce_packet import TRIGGER_OOS, TRIGGER_TIMEOUT
12,865
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_gbn.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger)
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_gbn.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger)
config_file_handler(logger=root_logger,
8
2023-12-09 08:21:14+00:00
16k
boweniac/autogan
autogan/agents/universal_agent.py
[ { "identifier": "AgentSwitch", "path": "autogan/agents/agent_switch.py", "snippet": "class AgentSwitch:\n def __init__(\n self,\n organizational_structure: List,\n task_tag: Optional[str] = \"/task\",\n opening_speaker: Optional[any] = None,\n default_agent_config: Optional[Dict] = None,\n default_super_rich: Optional[str] = None,\n default_stream_mode: Optional[bool] = None,\n response_func: Optional[ResponseFuncType]\n = default_response_func,\n ):\n \"\"\"All messages sent by agents need to be forwarded through the AgentSwitch object.\n 所有 agent 发送的消息,都需要通过 AgentSwitch 对象进行转发。\n\n **Forwarding:**\n 转发:\n\n The AgentSwitch object determines who to forward the message to based on the agent name after the @ symbol in the message.\n AgentSwitch 对象通过消息中 @ 符号后的 agent name 来判断将消息转发给谁。\n\n **Conversation domain:**\n 会话域:\n\n In each round of dialogue, the agent does not need to use all historical conversation records as its context.\n 每轮对话 agent 无需将所有的历史会话记录作为其上下文。\n\n The agent's conversation domain is based on the task. that is, the context of each round of dialogue for the agent only focuses on the historical conversation records of the current task.\n agent 的会话域以任务为基础。即 agent 每轮对话的上下文仅聚焦于当前任务的历史会话记录。\n\n **Task:**\n 任务:\n\n The AgentSwitch object determines whether the content of the message is a task through the task tag in the message.\n AgentSwitch 对象通过消息中的 task tag,来判断消息的内容是否是一个任务。\n\n If it is a task, the AgentSwitch object will call the receiver's new_task method.\n 如果是任务,AgentSwitch 对象会调用接收方的 new_task 方法。\n\n The default task tag is /task, which can be modified through the task_tag parameter when initializing the AgentSwitch object.\n task tag 默认为 /task,该值可在初始化 AgentSwitch 对象时,通过 task_tag 参数修改。\n\n **Organizational structure:**\n 组织架构:\n\n A multidimensional list containing agent objects.\n 一个包含 agent 对象的多维列表。\n\n Each list is equivalent to a department, and the first agent in the list is the leader of the department.\n 每个列表相当于一个部门,列表中的第一个 agent 为部门的 leader。\n\n Each agent can communicate with other agents in the current department and the leader of the subordinate department to complete tasks together.\n 每个 agent 可与当前部门的其他 agent 以及下级部门的 leader 沟通,协作完成任务。\n\n Note: There cannot be agents with the same name in the organizational structure.\n 注意:组织架构中不能有相同名称的 agent。\n\n :param organizational_structure: A multidimensional list containing agent objects.\n 一个包含 agent 对象的多维列表。\n :param opening_speaker_name: The name of the human agent invited to publish the first task.\n 被邀请发布第一个任务的人工 agent 名称。\n :param task_tag: Publish tasks to other agents by adding task_tag to the message.\n 通过在消息中添加 task_tag 来向其他 agent 发布任务。\n \"\"\"\n self.task_tag = task_tag\n self._default_agent_config = default_agent_config\n self._default_super_rich = default_super_rich\n self._default_stream_mode = default_stream_mode\n self._response_func = response_func\n self._agents = {} # key: agent name value: agent object\n\n self._init_agents(organizational_structure)\n self._init_agents_workmates(organizational_structure)\n if opening_speaker:\n self._inviting_to_speak(opening_speaker)\n\n def _init_agents(self, agent_list: list):\n for item in agent_list:\n if isinstance(item, list):\n self._init_agents(item)\n elif isinstance(item, str):\n continue\n else:\n self._agents[item.name] = item\n if item.agent_config is None and self._default_agent_config is not None:\n item.set_agent_config(self._default_agent_config)\n if item.super_rich is None and self._default_super_rich is not None:\n item.super_rich = self._default_super_rich\n if item.stream_mode is None:\n if self._default_stream_mode is None or self._default_stream_mode:\n item.stream_mode = True\n else:\n item.stream_mode = False\n if self._response_func:\n item.response_func = self._response_func\n\n def _init_agents_workmates(self, agent_list: list):\n \"\"\"Arrange for each agent to communicate with other agents according to the organizational structure.\n 根据组织架构,为每个 agent 安排可以与其沟通的其他 agent\n\n An agent should not exist in multiple departments.\n agent 不应存在于多个部门中\n\n :param agent_list: Organizational structure\n 组织架构\n \"\"\"\n if isinstance(agent_list[0], str):\n # The current list is workflow mode\n l = len(agent_list)\n\n for index, main_agent in enumerate(agent_list):\n # Skip the first element\n if index == 0:\n continue\n\n workmates = \"\"\n\n if index == l - 1:\n # If this is the last element\n name = \"\\\\\"\n elif isinstance(agent_list[index + 1], list):\n # If the next element is a list\n name = agent_list[index + 1][0].name\n duty = agent_list[index + 1][0].duty\n workmates = f\"\"\"\n{name} : {duty}\"\"\"\n else:\n # If the next element is agent\n name = agent_list[index + 1].name\n duty = agent_list[index + 1].duty\n workmates = f\"\"\"\n{name} : {duty}\"\"\"\n\n if isinstance(main_agent, list):\n # If the current element is a list\n self._init_agents_workmates(main_agent)\n if not main_agent[0].pipeline or main_agent[0].pipeline == \"\\\\\":\n main_agent[0].workmates += workmates\n main_agent[0].pipeline = name\n else:\n # If the current element is agent\n if not main_agent.pipeline or main_agent.pipeline == \"\\\\\":\n main_agent.workmates += workmates\n main_agent.pipeline = name\n else:\n # The current list is non-workflow mode.\n for main_agent in agent_list:\n workmates = \"\"\n\n if isinstance(main_agent, list):\n # If the current element is a list\n self._init_agents_workmates(main_agent)\n\n # If the current element is a workflow list, no hierarchical relationship is established.\n if isinstance(main_agent[0], str):\n continue\n\n # Establish a leveling relationship between current department leaders\n for agent in agent_list:\n if isinstance(agent, list):\n # If other elements are lists\n\n if isinstance(agent[0], str):\n if agent[0] == \"F\":\n # If it is a workflow\n\n # Determine whether the second element is a list.\n if isinstance(agent[1], list):\n name = agent[1][0].name\n duty = agent[1][0].duty\n else:\n name = agent[1].name\n duty = agent[1].duty\n else:\n # Skip other types of workflow\n continue\n else:\n # If it is a department\n if agent[0].name != main_agent[0].name and agent[0].duty is not None:\n name = agent[0].name\n duty = agent[0].duty\n else:\n # Skip departments that duplicate the current department\n continue\n else:\n # If other elements are agent\n name = agent.name\n duty = agent.duty\n workmates += f\"\"\"\n{name} : {duty}\"\"\"\n main_agent[0].workmates += workmates\n else:\n # If the current element is agent\n\n # Establish a level relationship of the current agent\n for agent in agent_list:\n if isinstance(agent, list):\n # If other elements are lists\n\n # Determine whether it is a department or a workflow\n if isinstance(agent[0], str):\n if agent[0] == \"F\":\n # If it is a workflow\n\n # Determine whether the second element is a list.\n if isinstance(agent[1], list):\n name = agent[1][0].name\n duty = agent[1][0].duty\n else:\n name = agent[1].name\n duty = agent[1].duty\n else:\n # Skip other types of workflow\n continue\n else:\n # If it is a department\n name = agent[0].name\n duty = agent[0].duty\n else:\n # If other elements are agent\n if agent.name != main_agent.name and agent.duty is not None:\n name = agent.name\n duty = agent.duty\n else:\n # Skip the duplicate agent with the current agent\n continue\n workmates += f\"\"\"\n{name} : {duty}\"\"\"\n main_agent.workmates += workmates\n\n def _inviting_to_speak(self, invited_speaker):\n \"\"\"Invite the human agent to publish the first task\n 邀请人工 agent 发布第一个任务\n\n :param invited_speaker_name: The name of the human agent\n 人工 agent 名称。\n \"\"\"\n if invited_speaker.name not in self._agents:\n print(\"agent does not exist\")\n return\n new_task_id = self.create_time_based_uuid()\n invited_speaker.receive(self, new_task_id, \"system\", \"Please enter\", 2)\n\n def handle_and_forward(self, task_id: str, pusher_name: str, content: str,\n completion_tokens: Optional[int]):\n \"\"\"Handle messages and forward to other agent.\n 处理消息并转发给其他代理\n\n **Forwarding:**\n 转发:\n Determines who to forward the message to based on the agent name after the @ symbol in the message.\n 通过消息中 @ 符号后的 agent name 来判断将消息转发给谁。\n\n **Task:**\n 任务:\n Determines whether the content of the message is a task through the task tag in the message.\n 通过消息中的 task tag,来判断消息的内容是否是一个任务。\n\n If it is a task, will call the receiver's new_task method.\n 如果是任务,对象会调用接收方的 new_task 方法。\n\n **Conversation domain control:**\n 会话域控制:\n Translate the task id of the pusher into the task id of the receiver to connect the context.\n 将推送方的任务 id,转换为接收方的任务 id,以衔接上下文。\n\n - If the pusher is the task publisher, it is necessary to convert the task id of the pusher into the sub-task id of the receiver.\n - 如推送方为任务发布者,则需要将推送方的任务 id 转换为接收方的子任务 id。\n\n - If the pusher is executing the task published by the receiver, it is necessary to convert the task id of the pusher into the parent task id of the receiver.\n - 如推送方正在执行接收方发布的任务,则需要将推送方的任务 id 转换为接收方的上级任务 id。\n\n :param task_id: pusher task id.\n :param pusher_name: pusher_name.\n :param content: message content.\n :param completion_tokens: message content tokens.\n \"\"\"\n # Get pusher object.\n pusher = self._agents[pusher_name]\n\n # Recognize the recipient's name.\n match = re.findall(r'@(\\w+)', content)\n\n if match:\n if match[0] not in self._agents:\n # Handling the case of incorrect recipient name.\n warn = f\"@{pusher_name} {match[0]} not exist, do not @{match[0]} again, Also please do not attempt to converse with me, this is just a system message.\"\n self._response_func(\"system\", \"system\", \"\", False, 0, warn, 0, None)\n pusher.receive(self, task_id, \"system\", warn, 12)\n\n # Get receiver object.\n receiver = self._agents[match[0]]\n if re.search(fr'@\\w+ {self.task_tag}', content):\n # Generate a new task id.\n new_task_id = self.create_time_based_uuid()\n\n # Establish a relationship between the push task and the receiver task.\n pusher.sub_to_main_task_id[new_task_id] = task_id\n receiver.main_to_sub_task_id[task_id] = new_task_id\n # Create a new task.\n receiver.new_task(self, new_task_id, pusher_name, content, completion_tokens)\n else:\n switch_task_id = task_id\n if receiver.main_to_sub_task_id and task_id in receiver.main_to_sub_task_id:\n # Translate the session ID of the pusher into the sub-session ID of the receiver.\n switch_task_id = receiver.main_to_sub_task_id[task_id]\n if receiver.main_to_sub_task_id and task_id in receiver.sub_to_main_task_id:\n # Translate the session id of the sender into the superior session id of the receiver.\n switch_task_id = receiver.sub_to_main_task_id[task_id]\n if switch_task_id == task_id:\n # If no subtasks of the task from the pusher are found, a prompt is needed to create the task first.\n # Generate a new task id.\n new_task_id = self.create_time_based_uuid()\n\n # Establish a relationship between the push task and the receiver task.\n pusher.sub_to_main_task_id[new_task_id] = task_id\n receiver.main_to_sub_task_id[task_id] = new_task_id\n # Create a new task.\n content = content.replace(f\"@{match[0]} \", f\"@{match[0]} {self.task_tag} \")\n receiver.new_task(self, new_task_id, pusher_name, content, completion_tokens)\n else:\n receiver.receive(self, switch_task_id, pusher_name, content, completion_tokens)\n else:\n # Handling the situation where the recipient is not recognized.\n if pusher.pipeline != \"\\\\\":\n warn = f\"@{pusher_name} Any reply must start with @ + recipient's name, Also please do not attempt to converse with me, this is just a system message.\"\n self._response_func(\"system\", \"system\", \"\", False, 0, warn, 0, None)\n pusher.receive(self, task_id, \"system\", warn, 12)\n\n @staticmethod\n def create_time_based_uuid():\n # 获取当前时间的时间戳\n timestamp = time.time()\n\n # 创建一个基于时间戳的UUID\n return uuid.uuid5(uuid.NAMESPACE_DNS, str(timestamp))" }, { "identifier": "compressed_messages", "path": "autogan/utils/compressed_messages_utils.py", "snippet": "def compressed_messages(messages: List[Dict], focus: str, summary_model_config: LLMConfig, agent_name: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None,\n safe_size: Optional[int] = 4096) -> tuple[Optional[list], Optional[list], Optional[int]]:\n \"\"\"Compress Conversation Context\n 压缩会话上下文\n\n The content to be compressed is divided into: recent original conversation content, and distant content that needs to be compressed.\n 待压缩的会话内容会被分为:近期的原始会话内容、远期需要压缩的会话内容。\n\n When compressing distant conversation records, attention is focused on the 'focus'\n 在压缩远期会话记录时,会将注意力集中于 focus\n\n **Recent Original Conversation Content:**\n 近期原始会话内容:\n\n First, traverse the 'messages' in reverse order, extract the recent conversation records, until the cumulative tokens of the conversation records exceed 50% of the 'safe_size'\n 先反向遍历 messages,提取近期的会话记录,直至会话记录的累计 tokens 超过 safe_size 的 50%\n\n If the tokens of the first recent conversation record exceed 50% of the 'safe_size', then directly extract the first recent conversation record\n 如近期第一条会话记录的 tokens 就超过了 safe_size 的 50% 则直接提取近期第一条会话记录\n\n **Distant Compressed Conversation Content:**\n 远期压缩会话内容:\n\n The remaining conversation records will be compressed as distant conversation records. The size after compression is expected to be within the range of ('safe_size' - cumulative original conversation tokens)\n 剩余的会话记录将作为远期会话记录进行压缩,压缩后的大小被期望保持在 (safe_size - 累计原始会话 tokens) 范围之内\n\n If the value of 'safe_size' - cumulative original conversation tokens is less than 0, then the size after compression is expected to be 1024 tokens\n 如 safe_size - 累计原始会话 tokens 的值小于 0 则压缩后的大小被期望保持在 1024 tokens\n\n Note: The compression process does not treat messages from the 'system' role specially, and they should be excluded from 'messages'.\n 注意:压缩过程并未对 system 角色的消息进行特殊处理,应将其排除在 messages 之外。\n\n :param messages: The conversation content to be compressed, excluding 'system message' and 'focus message'. It should include 'role', 'content', 'tokens' fields.\n 待压缩的会话内容,应排除掉 system message 和 focus message。需包含 'role','content','tokens' 字段。\n :param focus: The focus direction when compressing distant conversation records\n 压缩远期会话记录时的专注方向\n :param summary_model_config: The LLM model configuration used to compress distant conversation records\n 用于压缩远期会话记录的 LLM 模型配置\n :param agent_name:\n :param response_func: Used to return results to the interface or terminal.\n 用于向接口或终端返回结果\n :param stream_mode:\n :param safe_size: 'max_messages_tokens' of 'agent main model' minus the tokens of 'system message' and 'focus message'. When 'safe_size' is less than 0, it will be forcibly defined as 1024\n agent main model 的 max_messages_tokens 减去 system message 和 focus message 的 tokens,当 safe_size 小于 0 时,将被强制定义为 1024\n\n :return:\n --conversation_messages: The compressed conversation records, the difference from 'request_messages' is that the 'tokens' field of each message is retained\n 压缩后的会话记录,与 request_messages 的区别是保留了每条消息的 tokens 字段\n --request_messages: The message content requested to 'llm', removed the 'tokens' field of each message\n 用于向 llm 请求的消息内容,去掉了每条消息的 tokens 字段\n --total_tokens: The total tokens after compression\n 压缩后的整体tokens\n \"\"\"\n conversation_messages = []\n request_messages = []\n total_tokens = 0\n\n if len(messages) == 0:\n return None, None, None\n\n if safe_size < 0:\n safe_size = 1024\n # Reverse traverse the message to extract recent original conversation content.\n i = 0\n for message in reversed(messages):\n tokens = message[\"tokens\"]\n if total_tokens + tokens > int(safe_size * 0.5) and i != 0:\n break\n message_copy = message.copy()\n message_copy.pop('tokens', None)\n conversation_messages.insert(0, message)\n request_messages.insert(0, message_copy)\n total_tokens += tokens\n i -= 1\n # Compress the remaining messages as distant conversation records.\n if len(messages) > (i * -1):\n compressed_size = safe_size - total_tokens\n if compressed_size <= 0:\n compressed_size = 1024\n\n # 压缩剩余 messages\n content, tokens = generate_messages_summary(messages[:i], focus, summary_model_config, compressed_size, agent_name, response_func, stream_mode)\n\n if content:\n conversation_messages.insert(\n 0,\n {'role': 'assistant', 'content': f'Earlier historical conversation records: {content}',\n 'tokens': tokens}\n )\n request_messages.insert(\n 0,\n {'role': 'assistant', 'content': f'Earlier historical conversation records: {content}'}\n )\n total_tokens += tokens\n if conversation_messages and request_messages:\n return conversation_messages, request_messages, total_tokens\n else:\n return None, None, None" }, { "identifier": "compressed_text_universal", "path": "autogan/utils/compressed_text_utils.py", "snippet": "def compressed_text_universal(text: str, summary_model_config: LLMConfig, agent_name: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None,\n focus: Optional[str] = None, safe_size: Optional[int] = None) \\\n -> tuple[Optional[str], Optional[int]]:\n \"\"\"Compress the text, generating either a regular summary or a cue summary.\n 压缩文本,可生成普通摘要或线索摘要。\n\n First, the long text is sliced, and then a summary is generated for each slice.\n 首先将长文本切片,然后逐切片的生成摘要。\n\n If the value of the focus parameter is not None, then the attention will be focused on the focus area while generating the summary.\n 如 focus 参数的值不为 None 则在生成摘要时注意力集中于 focus。\n\n If the value of the safe_size parameter is not None and the length of the initial compression result exceeds the safe_size, the summary will be further compressed, with the compressed size expected to stay within the range of the safe_size.\n 如 safe_size 参数的值不为 None 且初次压缩结果长度超过 safe_size,则会对摘要进一步压缩,压缩后的大小被期望保持在 safe_size 范围之内。\n\n :param text: Text to be compressed.\n 待压缩的文本。\n :param summary_model_config: LLM configuration used for text compression.\n 用于压缩文本的 LLM 配置。\n :param agent_name:\n :param response_func: Used to return results to the interface or terminal.\n 用于向接口或终端返回结果\n :param stream_mode:\n :param focus: The focus direction when compressing text.\n 压缩文本时的专注方向。\n :param safe_size: The target size of the text after compression, if not provided there is no limit.\n 文本压缩后的目标尺寸,如果为空则不做限制。\n\n :return:\n --compressed_text: The text after compression.\n 压缩后的文本。\n --total_tokens: Total tokens after compression.\n 压缩后的整体tokens。\n \"\"\"\n\n compressed_text = \"\"\n total_tokens = 0\n\n split_texts = split_text(text, summary_model_config.max_messages_tokens, summary_model_config.model)\n\n for st in split_texts:\n if focus:\n content, tokens = generate_text_clues(st, focus, summary_model_config, agent_name, response_func,\n stream_mode)\n else:\n content, tokens = generate_text_summary(st, summary_model_config, agent_name, response_func, stream_mode)\n\n if content:\n compressed_text += content + \"\\n\"\n total_tokens += tokens\n\n if compressed_text:\n if safe_size and safe_size < total_tokens:\n return compressed_text_into_safe_size(compressed_text, safe_size, summary_model_config, agent_name,\n response_func, stream_mode)\n else:\n return compressed_text, total_tokens\n else:\n return None, None" }, { "identifier": "AgentConfig", "path": "autogan/oai/config_utils.py", "snippet": "class AgentConfig:\n \"\"\"The agent configuration includes:\n agent 配置包括:\n\n - main_model: The LLM configuration of the agent's main body.\n agent 主体的 LLM 配置。\n\n - summary_model: The LLM configuration used for compressing context and generating text summaries.\n 用于压缩上下文以及生成文本摘要的 LLM 配置。\n\n - request_interval_time: The interval time of LLM requests.\n LLM 请求间隔时间。\n\n - request_timeout:The timeout of LLM requests.\n LLM 请求超时时间。\n\n - max_retries: The maximum number of retries for LLM requests.\n LLM 请求最大重试次数。\n \"\"\"\n\n def __init__(\n self,\n config: Dict,\n ):\n model_filter = config[\"main_model\"].get(\"model_filter\", \"\")\n # main model config\n self._main_model_api_key_list = ConfigList(config[\"main_model\"][\"api_key_list\"], model_filter)\n self._main_model_max_messages_tokens = config[\"main_model\"][\"max_messages_tokens\"]\n\n # summary model config\n if \"summary_model\" in config:\n model_filter = config[\"summary_model\"].get(\"model_filter\", \"\")\n self._summary_model_api_key_list = ConfigList(config[\"summary_model\"][\"api_key_list\"], model_filter)\n self._summary_model_max_messages_tokens = config[\"summary_model\"][\"max_messages_tokens\"]\n else:\n # Use the main_model configuration when the summary_model configuration is empty.\n self._summary_model_api_key_list = self._main_model_api_key_list\n self._summary_model_max_messages_tokens = self._main_model_max_messages_tokens\n\n self._request_interval_time = config[\"request_interval_time\"]\n self._request_timeout = config[\"request_timeout\"]\n self._max_retries = config[\"max_retries\"]\n\n @property\n def main_model_config(self):\n return LLMConfig(\n self._main_model_api_key_list,\n self._main_model_max_messages_tokens,\n self._request_interval_time,\n self._request_timeout,\n self._max_retries\n )\n\n @property\n def summary_model_config(self):\n return LLMConfig(\n self._summary_model_api_key_list,\n self._summary_model_max_messages_tokens,\n self._request_interval_time,\n self._request_timeout,\n self._max_retries\n )" }, { "identifier": "count_text_tokens", "path": "autogan/oai/count_tokens_utils.py", "snippet": "def count_text_tokens(text: str, model: Optional[str] = \"gpt-3.5-turbo\") -> int:\n \"\"\"Calculate the tokens of the text.\n\n :param text: The text to be tokenized\n :param model: Calculate tokens for a specific model. If the model is not listed, it will default to calculating the number of tokens based on the gpt-3.5-turbo standard.\n\n :return: tokens\n \"\"\"\n\n if not text:\n return 0\n\n model_list = ['gpt-4', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo']\n if model not in model_list:\n model = \"gpt-3.5-turbo\"\n\n try:\n encoding = tiktoken.encoding_for_model(model)\n num_tokens = len(encoding.encode(text))\n except Exception as e:\n print(e)\n num_tokens = 0\n\n return num_tokens" }, { "identifier": "generate_chat_completion", "path": "autogan/oai/generate_utils.py", "snippet": "def generate_chat_completion(llm_config: LLMConfig, messages: List, agent_name: str, gen: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None)\\\n -> tuple[Optional[str], Optional[int]]:\n \"\"\"Call the LLM interface\n\n Currently, only the chatgpt model of openai (including azure) is adapted.\n\n :param llm_config: LLM configuration.\n :param messages:\n :param agent_name:\n :param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries\n - main: agent replies\n - idea: deep thoughts\n - messages_summary: context compression\n - text_summary: general summaries\n - clue_summary: clue summaries\n :param response_func: Used to return results to the interface or terminal.\n :param stream_mode:\n \"\"\"\n\n # When a certain configuration in the configuration list fails to request,\n # continue to try the next configuration until all configurations in the list are attempted.\n loop = llm_config.len_of_api_key_list\n for i in range(loop):\n time.sleep(llm_config.request_interval_time)\n api_key = llm_config.next_api_key\n try:\n completion_content = \"\"\n completion_tokens = 0\n index = 1\n for message in chat_completions(messages, api_key, llm_config.request_timeout,\n llm_config.max_retries, stream_mode):\n content = \"\"\n if stream_mode:\n if (message and \"choices\" in message and \"delta\" in message[\"choices\"][0]\n and \"content\" in message[\"choices\"][0][\"delta\"]\n and message[\"choices\"][0][\"delta\"][\"content\"]):\n content = message[\"choices\"][0][\"delta\"][\"content\"]\n completion_content += content\n else:\n if (message and \"choices\" in message and \"message\" in message[\"choices\"][0]\n and \"content\" in message[\"choices\"][0][\"message\"]\n and message[\"choices\"][0][\"message\"][\"content\"]):\n content = message[\"choices\"][0][\"message\"][\"content\"]\n completion_content = content\n if message and \"usage\" in message and \"completion_tokens\" in message[\"usage\"]:\n completion_tokens = message[\"usage\"][\"completion_tokens\"]\n response_func(agent_name, gen, api_key[\"model\"], stream_mode, index, content, completion_tokens, message)\n if content:\n index += 1\n\n if completion_content:\n if completion_tokens == 0:\n completion_tokens = count_text_tokens(completion_content, api_key['model'])\n return completion_content, completion_tokens\n else:\n raise ValueError(\"The return value is empty.\")\n except Exception as e:\n if i == loop - 1:\n print(f\"generate_chat_completion Exception: {e}\")\n return None, None" }, { "identifier": "environment_info", "path": "autogan/utils/environment_utils.py", "snippet": "def environment_info() -> str:\n \"\"\"Current environment information\n\n :return: --current_time: Y.m.d H:M:S week:%w\n \"\"\"\n info = f'current time: {get_time()}'\n\n return info" }, { "identifier": "default_response_func", "path": "autogan/utils/response.py", "snippet": "def default_response_func(agent_name: str, gen: str, model: str, stream_mode: bool, index: int,\n content: Optional[str], tokens: Optional[int], response: any):\n \"\"\"default response function\n 默认响应函数提供终端打印支持\n The default response function provides terminal printing support.\n\n :param agent_name:\n :param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries\n 用于区分 agent 回复、深思、压缩上下文、普通摘要、线索摘要\n - main: agent replies\n - idea: deep thoughts\n - messages_summary: context compression\n - text_summary: general summaries\n - clue_summary: clue summaries\n - system:\n - tool:\n - tool_call:\n :param model:\n :param stream_mode:\n :param index: response sequence\n :param content: completion content\n 生成内容\n :param tokens: completion tokens\n 生成内容的 tokens\n :param response: Respond to raw data\n 响应原始数据\n :return:\n \"\"\"\n if stream_mode:\n end = \"\"\n else:\n end = \"\\n\"\n\n if content:\n if gen == \"main\":\n if index == 1:\n print(f\"\\n{agent_name}: \", end=end)\n print(content, end=end)\n elif gen == \"idea\" or gen == \"tool_call\":\n if index == 1:\n print(\n colored(\n f\"\\n{agent_name}: \",\n \"cyan\",\n ),\n end=end,\n flush=True,\n )\n print(\n colored(\n content,\n \"cyan\",\n ),\n end=end,\n flush=True,\n )\n elif gen == \"system\":\n print(\n colored(\n f\"\\n{agent_name}: {content}\",\n \"red\",\n ),\n end=end,\n flush=True,\n )\n elif gen == \"tool\":\n print(\n colored(\n f\"\\n{agent_name}: {content}\",\n \"blue\",\n ),\n end=end,\n flush=True,\n )\n elif gen == \"search\":\n print(\n colored(\n f\"\\nurl: {content}\",\n \"cyan\",\n ),\n end=end,\n flush=True,\n )" } ]
import re from collections import defaultdict from typing import Optional, Dict, Any from autogan.agents.agent_switch import AgentSwitch from autogan.utils.compressed_messages_utils import compressed_messages from autogan.utils.compressed_text_utils import compressed_text_universal from autogan.oai.config_utils import AgentConfig from autogan.oai.count_tokens_utils import count_text_tokens from autogan.oai.generate_utils import generate_chat_completion from autogan.utils.environment_utils import environment_info from autogan.utils.response import default_response_func from termcolor import colored
11,425
content = re.sub(r'^@\S+\s+', '', content).strip() if self._use_tool and not content.startswith("@"): content, completion_tokens = self.tool_function(task_id, content, completion_tokens) # Assign recipients for the results generated by the tool_function. if not content.startswith("@"): if (task_id in self._conversation_focus and "task_issuer" in self._conversation_focus[task_id]): receiver = self._conversation_focus[task_id]['task_issuer'] else: receiver = sender_name content = f"@{receiver} " + content self.response_func(self.name, "tool", "", False, 0, content, completion_tokens, None) self._push_to_switch(switch, task_id, content, completion_tokens) except SystemExit: print("The task is finished.") except Exception as e: print(f"e :{e}") if self._use_tool == "only": self._push_to_switch(switch, task_id, f"@{sender_name} Generate error, Trying again", 4) else: self._re_push_to_switch(switch, task_id, hold_content, hold_completion_tokens, sender_name) def _base_generate_reply(self, switch: AgentSwitch, task_id: str, gen: str) -> tuple[Optional[str], Optional[int]]: """Use the main LLM to generate responses. Before generating a response, the historical conversation records within the current task scope, excluding system_message and focus_message, will be compressed first. :param switch: AgentSwitch Object :param task_id: Task id :return: --content: Generate content --tokens: Generate content tokens """ system_message, focus_message, total_tokens = self._base_message(switch, task_id) # Calculate the target size of context compression. safe_size = self.agent_config.main_model_config.max_messages_tokens - total_tokens # Compress the historical conversation records. request_messages, total_tokens = self._chat_messages_safe_size(task_id, safe_size) request_messages.insert(0, system_message) if focus_message: request_messages.insert(0, focus_message) return generate_chat_completion(self.agent_config.main_model_config, request_messages, self.name, gen, self.response_func, self.stream_mode) def _super_rich_generate_reply(self, switch: AgentSwitch, task_id: str) -> tuple[Optional[str], Optional[int]]: """Use the main LLM to generate responses. Before generating a response, the historical conversation records within the current task scope, excluding system_message and focus_message, will be compressed first. :param switch: AgentSwitch Object :param task_id: Task id :return: --content: Generate content --tokens: Generate content tokens """ system_message, focus_message, total_tokens = self._base_message(switch, task_id) # Calculate the target size of context compression. safe_size = self.agent_config.main_model_config.max_messages_tokens - total_tokens # Compress the historical conversation records. request_messages, total_tokens = self._chat_messages_safe_size(task_id, safe_size) if focus_message: request_messages.insert(0, focus_message) index = 0 ideas = defaultdict(str) while True: message, is_end = self._super_rich_message(switch, task_id, ideas, index) if is_end: gen = "main" else: gen = "idea" print( colored( f"\n\n>>>>>>>> {message[0]}:", "cyan", ), flush=True, ) if message[1]["role"] == "system": messages = request_messages.copy() messages.append(message[1]) content, token = generate_chat_completion(self.agent_config.main_model_config, messages, self.name, gen, self.response_func, self.stream_mode) ideas[message[0]] = content tokens = token else: content, token = generate_chat_completion(self.agent_config.main_model_config, [message[1]], self.name, gen, self.response_func, self.stream_mode) ideas[message[0]] = content tokens = token if is_end: break else: index += 1 return content, tokens def _push_to_switch(self, switch: AgentSwitch, task_id: str, content: str, completion_tokens: int): content = content.replace(f"@{self.name} ", "") self._conversation_messages[task_id].append( {'role': 'assistant', 'content': content, 'tokens': completion_tokens}) switch.handle_and_forward(task_id, self.name, content, completion_tokens) def _chat_messages_safe_size(self, task_id: str, safe_size: int) \ -> tuple[list, int]: """Compress the historical session records within the current task scope (excluding system_message and focus_message) :param task_id: Task id :param safe_size: The max_messages_tokens of the main LLM configuration :return: --request_messages: It is used for the message content requested to LLM, with the tokens field of each message removed. –-total_tokens: The overall tokens after compression. """ if task_id in self._conversation_messages and self._conversation_messages[task_id]:
try: except ImportError: def colored(x, *args, **kwargs): return x class UniversalAgent: def __init__( self, name: str, agent_config: Optional[Dict] = None, duty: Optional[str] = None, work_flow: Optional[str] = None, use_tool: Optional[str] = None, # only | join super_rich: Optional[str] = None, # auto | on | off stream_mode: Optional[bool] = None, ): """Agent base class Each agent can communicate with other agents in the current department and the leader of the subordinate department to complete tasks together. 每个 agent 可与当前部门的其他 agent 以及下级部门的 leader 沟通,协作完成任务。 To provide functions beyond the modeling capabilities for the agent, you can override the tool_function method. 想要为 agent 提供模型能力之外的功能,可以通过重写 tool_function 方法来实现。 :param name: The agent name should be unique in the organizational structure. agent name 在组织架构中应当是唯一的。 :param agent_config: The agent configuration includes: agent 配置包括: - main_model: The LLM configuration of the agent's main body. agent 主体的 LLM 配置。 - summary_model: The LLM configuration used for compressing context and generating text summaries. 用于压缩上下文以及生成文本摘要的 LLM 配置。 - request_interval_time: The interval time of LLM requests. LLM 请求间隔时间。 - request_timeout:The timeout of LLM requests. LLM 请求超时时间。 - max_retries: The maximum number of retries for LLM requests. LLM 请求最大重试次数。 :param duty: Used to explain one's job responsibilities to other agents. 用于向其他 agent 说明自己的工作职责。 :param work_flow: Defines the workflow of the agent. 定义 agent 的工作流程。 :param use_tool: Defines the mode of the agent using the tool_function: 定义 agent 使用 tool_function 的模式: - None: means not using the tool function. 不使用工具函数。 - only: Do not use the LLM, only use the tool function to generate results. 不使用 LLM,仅使用工具函数生成结果。 - join: The content generated by the LLM will be used as the input parameter for the tool_function. LLM 生成的内容将作为 tool_function 的输入参数 :param super_rich: Whether to enable the deep thought function. When enabled, it uses a set of analysis processes to refine the output of the agent. However, this can increase the number of tokens used, so it is not recommended for use with the gpt-4 model. The name "super_rich" is a reminder that using this function with gpt-4 can be expensive, even more so than Elon Musk's earning speed. 是否开启深思功能,开启后会使用一套分析流程来收敛 agent 的输出结果,但这样做会增加 tokens 的消耗,因此不建议在gpt-4模型下使用。 之所以这个参数叫 super_rich ,是为了提醒用户,如果在 gpt-4 下使用,其花钱的速度可能会超过马斯克赚钱的速度。 - auto: Disable for GPT-4, enable for other models 在 gpt-4下禁用,其他模型开启 - on: Always enabled 始终开启 - off: Always disabled 始终关闭 :param stream_mode: Whether to enable the stream_mode 定义 agent 的工作流程。 """ self.name = name self.agent_config = AgentConfig(agent_config) if agent_config else None self.duty = duty self.super_rich = super_rich # auto | on | off self.stream_mode = stream_mode self.response_func = default_response_func # Used to return results to the interface or terminal. self.workmates = "" # relevant personnel's name and duty self.pipeline = "" # In a linear workflow, this is the next person to communicate with. # Translate the session ID of the pusher into the sub-session ID of the receiver. self.sub_to_main_task_id = defaultdict(str) # Translate the session id of the sender into the superior session id of the receiver. self.main_to_sub_task_id = defaultdict(str) self._work_flow = work_flow self._use_tool = use_tool # only | join self._conversation_messages = defaultdict(list) # key: task id,value: Conversation history self._conversation_focus = defaultdict(Dict) # key: task id,value: {"task_issuer": "", "task_content": ""} def set_agent_config(self, agent_config: Dict): self.agent_config = AgentConfig(agent_config) def new_task(self, switch: AgentSwitch, task_id: str, sender_name: str, content: str, completion_tokens: int): """Accept tasks posted by other agent. :param switch: AgentSwitch object :param task_id: New task id :param sender_name: Task Issuer's Name :param content: Task content :param completion_tokens: Task content tokens """ # Avoid excessively long task content if (self._use_tool != "only" and completion_tokens > self.agent_config.main_model_config.max_messages_tokens * 0.5): self._push_to_switch(switch, task_id, "The task is too long", 5) # Cache task information to maintain focus during task execution task_content = content.replace(f"@{self.name}", "please help me") task_content = task_content.replace(f"{switch.task_tag}", "") self._conversation_focus[task_id] = {'task_issuer': sender_name, 'task_content': task_content} # Start the generation process self._generate_process(switch, task_id, sender_name, content, completion_tokens) def receive(self, switch: AgentSwitch, task_id: str, sender_name: str, content: str, completion_tokens: int): """Receive messages sent by other agents (excluding new task requests) :param switch: AgentSwitch object :param task_id: Task id :param sender_name: Name of the agent sending the message :param content: Message content :param completion_tokens: Message content tokens """ if self._use_tool != "only": safe_size = self.agent_config.main_model_config.max_messages_tokens if completion_tokens > safe_size: # 如消息内容过长,则对其进行压缩 compressed_text, total_tokens = compressed_text_universal( content, self.agent_config.summary_model_config, self.name, self.response_func, self.stream_mode, self._conversation_focus[task_id]['task_content'], safe_size) if compressed_text: content = compressed_text completion_tokens = total_tokens # Press the message into the session record of the current task self._conversation_messages[task_id].append( {'role': 'user', 'content': content, 'tokens': completion_tokens}) # Start the generation process self._generate_process(switch, task_id, sender_name, content, completion_tokens) def tool_function(self, task_id: str, param: Optional[str] = None, tokens: Optional[int] = None) -> tuple[str, int]: """When the value of the use_tool parameter is 'only' or 'join', please override this method. :return: --content: Generate content --tokens: Generate content tokens """ pass def _base_message(self, switch: AgentSwitch, task_id: str) \ -> tuple[dict[str, str], Optional[dict[str, Any]], int]: """This is the paradigm message required for each round of dialogue. 每轮对话都需要的范式消息 :param switch: AgentSwitch object :param task_id: Task id :return: -- system_message: Used to clarify its own workflow to the agent and where the agent can seek help. 用于向 agent 阐明自身工作流程,以及可以向哪些 agent 寻求帮助。 -- focus_message: Used to maintain focus during task execution, including who is currently executing the task and what the content of the task is. It will not be forgotten or compressed with the increase of dialogue rounds. 用于在任务执行过程中保持专注力,包括当前正在执行谁发布的任务、任务的内容是什么。不会随会话轮次的增多而被遗忘或压缩。 -- total_tokens: The overall tokens of the content of the system_message and the focus_message. system_message 以及 focus_message 内容的整体 tokens。 """ total_tokens = 0 info = environment_info() # Assemble system message system_prompt = f"""Now your name is {self.name}, you are an assistant who will not give up easily when you encounter difficulties Environment information: {info}""" if self._work_flow: system_prompt += f""" Your work flow is:: {self._work_flow}""" if self.workmates: system_prompt += f""" The following professionals can help you accomplish the task: {self.workmates}""" if self._use_tool is None: system_prompt += f""" Please follow these guidelines when replying to any content: 1. Be aware that if you do not @recipient at the beginning, the system will give an error. 2. When asking for help, you need to first post a task, the method is: @recipient {switch.task_tag} task content. 3. The recipient does not have any dialogue records before the task begins, nor can they see your conversations with others. 4. Do not suggest the recipient to communicate with others. 5. Do not explain to the initiator of the task what you are going to do. 6. In the reply, do not converse with two recipients at the same time. """ total_tokens += 37 system_message = {'role': 'system', 'content': system_prompt} if task_id in self._conversation_focus and self._conversation_focus[task_id]: # Assemble focus message focus_prompt = f"""current task content: task issuer: {self._conversation_focus[task_id]['task_issuer']} task content: {self._conversation_focus[task_id]['task_content']}""" if self._use_tool is None: if self.pipeline and self.pipeline != "\\": focus_prompt += f""" When you have the result of the task, please @{self.pipeline} {switch.task_tag} and reply to the execution result, He'll know what to do next""" else: focus_prompt += f""" When you have the result of the task, please @{self._conversation_focus[task_id]['task_issuer']} and reply to the execution result""" total_tokens += count_text_tokens(focus_prompt) focus_message = {'role': 'user', 'content': focus_prompt} else: focus_message = None return system_message, focus_message, total_tokens def _super_rich_message(self, switch: AgentSwitch, task_id: str, ideas: dict, index: int)\ -> tuple[list[str, dict], bool]: """Thought prompts, with new content requested at each level 深思提示词,每层请求新的内容 :param switch: AgentSwitch object :param task_id: Task id :param ideas: Results generated :param index: Current thinking depth :return: -- message_list: Thought prompts list -- tag: -- message: Thought prompts -- is_end: """ messages = [] task_issuer = "" if self.pipeline and self.pipeline != "\\": task_issuer += f"{self.pipeline} : When there is no more work to be done, Submit the results to me." else: task_issuer += f"{self._conversation_focus[task_id]['task_issuer']} : When there is no more work to be done, Submit the results to me." total_tokens = 0 info = f""" reference workflow: {environment_info()}""" workmates = "" if self.workmates: workmates = f""" relevant personnel's name and duty: {self.workmates} {task_issuer}""" workflow = "" if self._work_flow: workflow = f""" {self._work_flow}""" repetitive_prompt = f"""The above is a group chat record, assuming you are {self.name}, please do the following analysis: Step 1: Understand your overall workflow (No need to output): workflow:{workflow} Step 2: Analyze whether {self.name} is repeating a task in the workflow or encountering difficulties (No need to output). Step 3: output your analysis results If yes, please give advice on how to stop repeating from the perspective of {self.name}. If not, please reply one word 'None'.""" messages.append(["Observe whether the previous conversation fell into a cycle", {'role': 'system', 'content': repetitive_prompt}]) debug_prompt = f"""The above is a group chat record, please do the following analysis: Step 1: Understand your overall workflow, Including the execution conditions and objectives for each step (No need to output): workflow:{workflow} Step 2: Analyze whether there are unresolved errors in the previous conversation (No need to output). Step 3: Analyze If there are unresolved errors, Think about what the root cause of these errors is (No need to output). Step 4: Analyze If there are unresolved errors, From {self.name}'s perspective, how should you solve it next? (No need to output) Step 5: output your analysis results, including the following content: whether there are unresolved errors in the previous conversation: If there are unresolved errors, What errors in the dialogue: If there are unresolved errors, The root cause of the error: If there are unresolved errors, How to solve it next: Note: There's no need to output the specific dialogue content, just output the analysis results.""" messages.append(["Reflect on whether there are any errors in the previous dialogue process", {'role': 'system', 'content': debug_prompt}]) planning_prompt = f"""The above is a group chat record, assuming you are {self.name}, please do the following analysis: Step 1: Understand your overall workflow (No need to output): workflow:{workflow} Step 2: Analyze which item to execute or continue to execute in the workflow (No need to output). Step 3: Understand the specific errors that have occurred in the current conversation (No need to output). Are you stuck in a deadlock: {ideas["Observe whether the previous conversation fell into a cycle"]} {ideas["Reflect on whether there are any errors in the previous dialogue process"]} Step 4: Understand some rules (No need to output). 1. When asking for help, you need to first post a task, 2. The recipient does not have any dialogue records before the task begins, nor can they see your conversations with others. 2. Don't let the other party to communicate with others. 3. In your plan, there should be no content about apologizing to others or what you are going to do. Step 5: output your analysis results, including the following content: Do you need to create a task: In the next round of conversation, the specific work you need to do is(Please explain in detail and Ignore the work that has been completed.): all the details that need to be taken into consideration, including recommended methods or tools, etc: Note: There's no need to output the specific dialogue content, just output the analysis results. """ messages.append(["Think about what to do next", {'role': 'system', 'content': planning_prompt}]) communicate_prompt = f"""your name is {self.name}, please do the following analysis: Step 1: Understand your work plan (No need to output): {ideas["Think about what to do next"]} Step 2: Get to know your colleagues, including what they can and cannot do (No need to output): {workmates} {self._conversation_focus[task_id]['task_issuer']} : "" Step 3: Analyze who is the most relevant colleague to the first step of next round of conversation the specific work you need to do, note that you can only choose one person (No need to output). Step 4: output your analysis results, including the following content: who is the most relevant colleague to the first step of your plan: What are the requirements when the other party receives messages: What can the other party do: What the other party cannot do: Note: please provide the correct names of relevant personnel, Don't provide names that don't exist.""" messages.append(["Think about who to communicate with next", {'role': 'user', 'content': communicate_prompt}]) reply_prompt = f"""The above is a group chat record, assuming you are {self.name}, Please strictly follow the contents of the guidelines below to generate your response, note do not communicate with others or perform other tasks: {info} Step 1: Clarify who you will be communicating with (No need to output): {ideas["Think about who to communicate with next"]} Step 2: Specify the task you are going to carry out (No need to output): {ideas["Think about what to do next"]} Step 3: Understand some response rules (No need to output). 1. Please do not mention the second person in your reply content. 2. When you need to post a task, the method is: @recipient {switch.task_tag} task content. Step 4: Please follow the content of the previous step, From {self.name}'s perspective, Output your response in the format below: @who you will be communicating with + Reply content""" messages.append(["Generate reply content", {'role': 'system', 'content': reply_prompt}]) if index == len(messages) - 1: return messages[index], True else: return messages[index], False def _generate_process(self, switch: AgentSwitch, task_id: str, sender_name: str, content: str, completion_tokens: int): """Generate process If the value of the use_tool parameter is None, only the main LLM is used to generate a response. 如果 use_tool 参数的值为 None,则仅使用主体 LLM 生成回复。 If the value of the use_tool parameter is 'only', the main LLM is skipped and the tool_function is used directly to generate a response. 如果 use_tool 参数的值为 only,则跳过主体 LLM 直接使用 tool_function 生成回复。 If the value of the use_tool parameter is 'join', the main LLM is first used to generate content, and then the generated content is used as the input parameter for tool_function. 如果 use_tool 参数的值为 join,则先使用主体 LLM 生成内容,然后将生成的内容作为 tool_function 的输入参数。 """ hold_content = content hold_completion_tokens = completion_tokens try: if self._use_tool != "only": if self._use_tool == "join": print( colored( f"\n\n>>>>>>>> tool call:", "cyan", ), flush=True, ) content, completion_tokens = self._base_generate_reply(switch, task_id, "tool_call") else: if self.super_rich == "on": content, completion_tokens = self._super_rich_generate_reply(switch, task_id) elif (self.super_rich == "auto" or self.super_rich is None) and "gpt-4" not in self.agent_config.main_model_config.model: content, completion_tokens = self._super_rich_generate_reply(switch, task_id) else: content, completion_tokens = self._base_generate_reply(switch, task_id, "main") if content is None: raise ValueError("Failed to generate content.") else: content = re.sub(r'^@\S+\s+', '', content).strip() if self._use_tool and not content.startswith("@"): content, completion_tokens = self.tool_function(task_id, content, completion_tokens) # Assign recipients for the results generated by the tool_function. if not content.startswith("@"): if (task_id in self._conversation_focus and "task_issuer" in self._conversation_focus[task_id]): receiver = self._conversation_focus[task_id]['task_issuer'] else: receiver = sender_name content = f"@{receiver} " + content self.response_func(self.name, "tool", "", False, 0, content, completion_tokens, None) self._push_to_switch(switch, task_id, content, completion_tokens) except SystemExit: print("The task is finished.") except Exception as e: print(f"e :{e}") if self._use_tool == "only": self._push_to_switch(switch, task_id, f"@{sender_name} Generate error, Trying again", 4) else: self._re_push_to_switch(switch, task_id, hold_content, hold_completion_tokens, sender_name) def _base_generate_reply(self, switch: AgentSwitch, task_id: str, gen: str) -> tuple[Optional[str], Optional[int]]: """Use the main LLM to generate responses. Before generating a response, the historical conversation records within the current task scope, excluding system_message and focus_message, will be compressed first. :param switch: AgentSwitch Object :param task_id: Task id :return: --content: Generate content --tokens: Generate content tokens """ system_message, focus_message, total_tokens = self._base_message(switch, task_id) # Calculate the target size of context compression. safe_size = self.agent_config.main_model_config.max_messages_tokens - total_tokens # Compress the historical conversation records. request_messages, total_tokens = self._chat_messages_safe_size(task_id, safe_size) request_messages.insert(0, system_message) if focus_message: request_messages.insert(0, focus_message) return generate_chat_completion(self.agent_config.main_model_config, request_messages, self.name, gen, self.response_func, self.stream_mode) def _super_rich_generate_reply(self, switch: AgentSwitch, task_id: str) -> tuple[Optional[str], Optional[int]]: """Use the main LLM to generate responses. Before generating a response, the historical conversation records within the current task scope, excluding system_message and focus_message, will be compressed first. :param switch: AgentSwitch Object :param task_id: Task id :return: --content: Generate content --tokens: Generate content tokens """ system_message, focus_message, total_tokens = self._base_message(switch, task_id) # Calculate the target size of context compression. safe_size = self.agent_config.main_model_config.max_messages_tokens - total_tokens # Compress the historical conversation records. request_messages, total_tokens = self._chat_messages_safe_size(task_id, safe_size) if focus_message: request_messages.insert(0, focus_message) index = 0 ideas = defaultdict(str) while True: message, is_end = self._super_rich_message(switch, task_id, ideas, index) if is_end: gen = "main" else: gen = "idea" print( colored( f"\n\n>>>>>>>> {message[0]}:", "cyan", ), flush=True, ) if message[1]["role"] == "system": messages = request_messages.copy() messages.append(message[1]) content, token = generate_chat_completion(self.agent_config.main_model_config, messages, self.name, gen, self.response_func, self.stream_mode) ideas[message[0]] = content tokens = token else: content, token = generate_chat_completion(self.agent_config.main_model_config, [message[1]], self.name, gen, self.response_func, self.stream_mode) ideas[message[0]] = content tokens = token if is_end: break else: index += 1 return content, tokens def _push_to_switch(self, switch: AgentSwitch, task_id: str, content: str, completion_tokens: int): content = content.replace(f"@{self.name} ", "") self._conversation_messages[task_id].append( {'role': 'assistant', 'content': content, 'tokens': completion_tokens}) switch.handle_and_forward(task_id, self.name, content, completion_tokens) def _chat_messages_safe_size(self, task_id: str, safe_size: int) \ -> tuple[list, int]: """Compress the historical session records within the current task scope (excluding system_message and focus_message) :param task_id: Task id :param safe_size: The max_messages_tokens of the main LLM configuration :return: --request_messages: It is used for the message content requested to LLM, with the tokens field of each message removed. –-total_tokens: The overall tokens after compression. """ if task_id in self._conversation_messages and self._conversation_messages[task_id]:
conversation_messages, request_messages, total_tokens = compressed_messages(
1
2023-12-06 03:24:34+00:00
16k
Deltares/imod-python
imod/msw/model.py
[ { "identifier": "CouplerMapping", "path": "imod/msw/coupler_mapping.py", "snippet": "class CouplerMapping(MetaSwapPackage):\n \"\"\"\n This contains the data to connect MODFLOW 6 cells to MetaSWAP svats.\n\n This class is responsible for the file `mod2svat.inp`. It also includes\n connection to wells.\n\n Parameters\n ----------\n modflow_dis: StructuredDiscretization\n Modflow 6 structured discretization\n well: WellDisStructured (optional)\n If given, this parameter describes sprinkling of SVAT units from MODFLOW\n cells.\n \"\"\"\n\n _file_name = \"mod2svat.inp\"\n _metadata_dict = {\n \"mod_id\": VariableMetaData(10, 1, 9999999, int),\n \"free\": VariableMetaData(2, None, None, str),\n \"svat\": VariableMetaData(10, 1, 9999999, int),\n \"layer\": VariableMetaData(5, 0, 9999, int),\n }\n\n _with_subunit = (\"mod_id\",)\n _without_subunit = ()\n _to_fill = (\"free\",)\n\n def __init__(\n self,\n modflow_dis: mf6.StructuredDiscretization,\n well: mf6.WellDisStructured = None,\n ):\n super().__init__()\n\n self.well = well\n # Test if equal or larger than 1, to ignore idomain == -1 as well. Don't\n # assign to self.dataset, as grid extent might differ from svat when\n # MetaSWAP only covers part of the Modflow grid domain.\n self.idomain_active = modflow_dis[\"idomain\"] >= 1\n\n def _create_mod_id_rch(self, svat):\n \"\"\"\n Create modflow indices for the recharge layer, which is where\n infiltration will take place.\n \"\"\"\n self.dataset[\"mod_id\"] = xr.full_like(svat, fill_value=0, dtype=np.int64)\n n_subunit = svat[\"subunit\"].size\n idomain_top_active = self.idomain_active.sel(layer=1, drop=True)\n\n n_mod_top = idomain_top_active.sum()\n\n # idomain does not have a subunit dimension, so tile for n_subunits\n mod_id_1d = np.tile(np.arange(1, n_mod_top + 1), (n_subunit, 1))\n\n self.dataset[\"mod_id\"].values[:, idomain_top_active.values] = mod_id_1d\n\n def _render(self, file, index, svat):\n self._create_mod_id_rch(svat)\n # package check only possible after calling _create_mod_id_rch\n self._pkgcheck()\n\n data_dict = {\"svat\": svat.values.ravel()[index]}\n\n data_dict[\"layer\"] = np.full_like(data_dict[\"svat\"], 1)\n\n for var in self._with_subunit:\n data_dict[var] = self._index_da(self.dataset[var], index)\n\n # Get well values\n if self.well:\n mod_id_well, svat_well, layer_well = self._create_well_id(svat)\n data_dict[\"mod_id\"] = np.append(mod_id_well, data_dict[\"mod_id\"])\n data_dict[\"svat\"] = np.append(svat_well, data_dict[\"svat\"])\n data_dict[\"layer\"] = np.append(layer_well, data_dict[\"layer\"])\n\n for var in self._to_fill:\n data_dict[var] = \"\"\n\n dataframe = pd.DataFrame(\n data=data_dict, columns=list(self._metadata_dict.keys())\n )\n\n self._check_range(dataframe)\n\n return self.write_dataframe_fixed_width(file, dataframe)\n\n def _create_well_id(self, svat):\n \"\"\"\n Get modflow indices, svats, and layer number for the wells\n \"\"\"\n n_subunit = svat[\"subunit\"].size\n\n # Convert to Python's 0-based index\n well_row = self.well[\"row\"] - 1\n well_column = self.well[\"column\"] - 1\n well_layer = self.well[\"layer\"] - 1\n\n n_mod = self.idomain_active.sum()\n mod_id = xr.full_like(self.idomain_active, 0, dtype=np.int64)\n mod_id.values[self.idomain_active.values] = np.arange(1, n_mod + 1)\n\n well_mod_id = mod_id[well_layer, well_row, well_column]\n well_mod_id = np.tile(well_mod_id, (n_subunit, 1))\n\n well_svat = svat.values[:, well_row, well_column]\n\n well_active = well_svat != 0\n\n well_svat_1d = well_svat[well_active]\n well_mod_id_1d = well_mod_id[well_active]\n\n # Tile well_layers for each subunit\n layer = np.tile(well_layer + 1, (n_subunit, 1))\n layer_1d = layer[well_active]\n\n return (well_mod_id_1d, well_svat_1d, layer_1d)" }, { "identifier": "GridData", "path": "imod/msw/grid_data.py", "snippet": "class GridData(MetaSwapPackage):\n \"\"\"\n This contains the grid data of MetaSWAP.\n\n This class is responsible for the file `area_svat.inp`\n\n Parameters\n ----------\n area: array of floats (xr.DataArray)\n Describes the area of SVAT units. This array must have a subunit coordinate\n to describe different landuses.\n landuse: array of integers (xr.DataArray)\n Describes the landuse type of SVAT units.\n This array must have a subunit coordinate.\n rootzone_depth: array of floats (xr.DataArray)\n Describes the rootzone depth of SVAT units.\n This array must have a subunit coordinate to describe different landuses.\n surface_elevation: array of floats (xr.DataArray)\n Describes the surface elevation of SVAT units.\n This array must not have a subunit coordinate.\n soil_physical_unit: array of integers (xr.DataArray)\n Describes the physical parameters of SVAT units.\n These parameters will be looked up in a table according to the given integers.\n This array must not have a subunit coordinate.\n active: array of bools (xr.DataArray)\n Describes whether SVAT units are active or not.\n This array must not have a subunit coordinate.\n \"\"\"\n\n _file_name = \"area_svat.inp\"\n _metadata_dict = {\n \"svat\": VariableMetaData(10, 1, 99999999, int),\n \"area\": VariableMetaData(10, 0.0, 999999.0, float),\n \"surface_elevation\": VariableMetaData(8, -9999.0, 9999.0, float),\n \"temp\": VariableMetaData(8, None, None, str),\n \"soil_physical_unit\": VariableMetaData(6, 1, 999999, int),\n \"soil_physical_unit_string\": VariableMetaData(16, None, None, str),\n \"landuse\": VariableMetaData(6, 1, 999999, int),\n \"rootzone_depth\": VariableMetaData(8, 0.0, 10.0, float),\n }\n _with_subunit = (\"area\", \"landuse\", \"rootzone_depth\")\n _without_subunit = (\"surface_elevation\", \"soil_physical_unit\")\n _to_fill = (\"soil_physical_unit_string\", \"temp\")\n\n def __init__(\n self,\n area: xr.DataArray,\n landuse: xr.DataArray,\n rootzone_depth: xr.DataArray,\n surface_elevation: xr.DataArray,\n soil_physical_unit: xr.DataArray,\n active: xr.DataArray,\n ):\n super().__init__()\n self.dataset[\"area\"] = area\n self.dataset[\"landuse\"] = landuse\n self.dataset[\"rootzone_depth\"] = rootzone_depth\n self.dataset[\"surface_elevation\"] = surface_elevation\n self.dataset[\"soil_physical_unit\"] = soil_physical_unit\n self.dataset[\"active\"] = active\n\n self._pkgcheck()\n\n def generate_index_array(self):\n \"\"\"\n Generate index arrays to be used on other packages\n \"\"\"\n area = self.dataset[\"area\"]\n active = self.dataset[\"active\"]\n\n isactive = area.where(active).notnull()\n\n index = isactive.values.ravel()\n\n svat = xr.full_like(area, fill_value=0, dtype=np.int64).rename(\"svat\")\n svat.values[isactive.values] = np.arange(1, index.sum() + 1)\n\n return index, svat\n\n def _pkgcheck(self):\n super()._pkgcheck()\n\n dx, _, _, dy, _, _ = spatial_reference(self.dataset)\n\n if (not np.isscalar(dx)) or (not np.isscalar(dy)):\n raise ValueError(\"MetaSWAP only supports equidistant grids\")\n\n active = self.dataset[\"active\"]\n\n cell_area = active.astype(float) * dx * abs(dy)\n total_area = self.dataset[\"area\"].sum(dim=\"subunit\")\n\n # Apparently all regional models intentionally provided area grids\n # smaller than cell area, to allow surface waters as workaround.\n unequal_area = (total_area > cell_area).values[active.values]\n\n if np.any(unequal_area):\n raise ValueError(\n \"Provided area grid with total areas larger than cell area\"\n )" }, { "identifier": "IdfMapping", "path": "imod/msw/idf_mapping.py", "snippet": "class IdfMapping(MetaSwapPackage):\n \"\"\"\n Describes svat location in the IDF grid.\n\n Note that MetaSWAP can only write equidistant grids.\n \"\"\"\n\n _file_name = \"idf_svat.inp\"\n _metadata_dict = {\n \"svat\": VariableMetaData(10, 1, 9999999, int),\n \"rows\": VariableMetaData(10, 1, 9999999, int),\n \"columns\": VariableMetaData(10, 1, 9999999, int),\n \"y_grid\": VariableMetaData(15, 0.0, 9999999.0, float),\n \"x_grid\": VariableMetaData(15, 0.0, 9999999.0, float),\n }\n\n _with_subunit = ()\n _without_subunit = (\"rows\", \"columns\", \"y_grid\", \"x_grid\")\n _to_fill = ()\n\n # NOTE that it is stated in the IO manual: \"The x- and y-coordinates should\n # increase with increasing col, row.\" But the example works with decreasing\n # y-coordinates.\n\n def __init__(self, area, nodata):\n super().__init__()\n\n self.dataset[\"area\"] = area\n self.dataset[\"nodata\"] = nodata\n\n nrow = self.dataset.coords[\"y\"].size\n ncol = self.dataset.coords[\"x\"].size\n\n y_index = xr.DataArray(\n np.arange(1, nrow + 1), coords={\"y\": self.dataset.coords[\"y\"]}, dims=(\"y\",)\n )\n x_index = xr.DataArray(\n np.arange(1, ncol + 1), coords={\"x\": self.dataset.coords[\"x\"]}, dims=(\"x\",)\n )\n rows, columns = xr.broadcast(y_index, x_index)\n\n self.dataset[\"rows\"] = rows\n self.dataset[\"columns\"] = columns\n\n y_grid, x_grid = xr.broadcast(self.dataset[\"y\"], self.dataset[\"x\"])\n\n self.dataset[\"x_grid\"] = x_grid\n self.dataset[\"y_grid\"] = y_grid\n\n def get_output_settings(self):\n grid = self.dataset[\"area\"]\n dx, xmin, _, dy, ymin, _ = spatial_reference(grid)\n ncol = grid[\"x\"].size\n nrow = grid[\"y\"].size\n\n # If non-equidistant, spatial_reference returned a 1d array instead of\n # float\n if (not np.isscalar(dx)) or (not np.isscalar(dy)):\n raise ValueError(\"MetaSWAP only supports equidistant grids\")\n\n nodata = self.dataset[\"nodata\"].values\n\n return dict(\n simgro_opt=-1,\n idf_per=1,\n idf_dx=dx,\n idf_dy=np.abs(dy),\n idf_ncol=ncol,\n idf_nrow=nrow,\n idf_xmin=xmin,\n idf_ymin=ymin,\n idf_nodata=nodata,\n )" }, { "identifier": "Infiltration", "path": "imod/msw/infiltration.py", "snippet": "class Infiltration(MetaSwapPackage):\n \"\"\"\n This contains the infiltration data.\n\n This class is responsible for the file `infi_svat.inp`\n\n Parameters\n ----------\n infiltration_capacity: array of floats (xr.DataArray)\n Describes the infiltration capacity of SVAT units. This array must have\n a subunit coordinate to describe different land uses.\n downward_resistance: array of floats (xr.DataArray)\n Describes the downward resisitance of SVAT units. Set to -9999.0 to make\n MetaSWAP ignore this resistance. This array must not have a subunit\n coordinate.\n upward_resistance: array of floats (xr.DataArray)\n Describes the upward resistance of SVAT units. Set to -9999.0 to make\n MetaSWAP ignore this resistance. This array must not have a subunit\n coordinate.\n bottom_resistance: array of floats (xr.DataArray)\n Describes the infiltration capacity of SVAT units. Set to -9999.0 to\n make MetaSWAP ignore this resistance. This array must not have a subunit\n coordinate.\n extra_storage_coefficient: array of floats (xr.DataArray)\n Extra storage coefficient of phreatic layer. This array must not have a\n subunit coordinate.\n active: array of bools (xr.DataArray)\n Describes whether SVAT units are active or not. This array must not have\n a subunit coordinate.\n \"\"\"\n\n _file_name = \"infi_svat.inp\"\n _metadata_dict = {\n \"svat\": VariableMetaData(10, 1, 99999999, int),\n \"infiltration_capacity\": VariableMetaData(8, 0.0, 1000.0, float),\n \"downward_resistance\": VariableMetaData(8, -9999.0, 999999.0, float),\n \"upward_resistance\": VariableMetaData(8, -9999.0, 999999.0, float),\n \"bottom_resistance\": VariableMetaData(8, -9999.0, 999999.0, float),\n \"extra_storage_coefficient\": VariableMetaData(8, 0.01, 1.0, float),\n }\n\n _with_subunit = (\"infiltration_capacity\",)\n _without_subunit = (\n \"downward_resistance\",\n \"upward_resistance\",\n \"bottom_resistance\",\n \"extra_storage_coefficient\",\n )\n _to_fill = ()\n\n def __init__(\n self,\n infiltration_capacity: xr.DataArray,\n downward_resistance: xr.DataArray,\n upward_resistance: xr.DataArray,\n bottom_resistance: xr.DataArray,\n extra_storage_coefficient: xr.DataArray,\n ):\n super().__init__()\n self.dataset[\"infiltration_capacity\"] = infiltration_capacity\n self.dataset[\"downward_resistance\"] = downward_resistance\n self.dataset[\"upward_resistance\"] = upward_resistance\n self.dataset[\"bottom_resistance\"] = bottom_resistance\n self.dataset[\"extra_storage_coefficient\"] = extra_storage_coefficient\n\n self._pkgcheck()" }, { "identifier": "InitialConditionsEquilibrium", "path": "imod/msw/initial_conditions.py", "snippet": "class InitialConditionsEquilibrium(MetaSwapPackage):\n \"\"\"\n Use an equilibrium profile to initialize the model.\n\n This class is responsible for the file `init_svat.inp`\n \"\"\"\n\n _file_name = \"init_svat.inp\"\n _option = \"Equilibrium\"\n _metadata_dict = {}\n\n def __init__(self):\n super().__init__()\n\n def _render(self, file, *args):\n file.write(self._option + \"\\n\")" }, { "identifier": "InitialConditionsPercolation", "path": "imod/msw/initial_conditions.py", "snippet": "class InitialConditionsPercolation(MetaSwapPackage):\n \"\"\"\n The precipitation intensity at the starting time (iybg, tdbg in\n PARA_SIM.INP) is used for initializing the percolation flux in the profiles.\n This type of initialization is normally done separately from the actual run,\n using a specially prepared meteo-input file. After letting the model reach\n near equilibrium by letting it run for a number of years, the saved state is\n used for the initialization of subsequent runs.\n\n This class is responsible for the file `init_svat.inp`\n \"\"\"\n\n _file_name = \"init_svat.inp\"\n _option = \"MeteoInputP\"\n _metadata_dict = {}\n\n def __init__(self):\n super().__init__()\n\n def _render(self, file, *args):\n file.write(self._option + \"\\n\")" }, { "identifier": "InitialConditionsRootzonePressureHead", "path": "imod/msw/initial_conditions.py", "snippet": "class InitialConditionsRootzonePressureHead(MetaSwapPackage):\n \"\"\"\n Use the pF-value of the root zone pressure head as initial condition.\n\n This class is responsible for the file `init_svat.inp`\n\n Parameters\n ----------\n initial_pF: float\n Initial pF value to be used for all soil columns.\n \"\"\"\n\n _file_name = \"init_svat.inp\"\n _option = \"Rootzone_pF\"\n _metadata_dict = {\n \"initial_pF\": VariableMetaData(6, 0.0, 6.0, float),\n }\n\n def __init__(self, initial_pF=2.2):\n super().__init__()\n self.dataset[\"initial_pF\"] = initial_pF\n\n def _render(self, file, *args):\n file.write(self._option + \"\\n\")\n\n dataframe = self.dataset.assign_coords(index=[0]).to_dataframe()\n\n self.write_dataframe_fixed_width(file, dataframe)" }, { "identifier": "InitialConditionsSavedState", "path": "imod/msw/initial_conditions.py", "snippet": "class InitialConditionsSavedState(MetaSwapPackage):\n \"\"\"\n Use saved state of a previous MetaSWAP run as initial condition.\n\n This class is responsible for the file `init_svat.inp`\n\n Parameters\n ----------\n saved_state: Path or str\n Path to a previously saved state. This file will be copied to\n init_svat.inp.\n\n \"\"\"\n\n _file_name = \"init_svat.inp\"\n _option = \"Saved_State\"\n _metadata_dict = {}\n\n def __init__(self, saved_state):\n super().__init__()\n self.saved_state = saved_state\n\n def write(self, directory, *args):\n directory = pathlib.Path(directory)\n filename = directory / self._file_name\n\n shutil.copyfile(self.saved_state, filename)" }, { "identifier": "LanduseOptions", "path": "imod/msw/landuse.py", "snippet": "class LanduseOptions(MetaSwapPackage):\n \"\"\"\n Land use options. This object is responsible for luse_svat.inp\n\n Parameters\n ----------\n landuse_name: array of strings (xr.DataArray)\n Names of land use\n vegetation_index: array of integers (xr.DataArray)\n Vegetation indices\n jarvis_o2_stress: array of floats (xr.DataArray)\n Jarvis parameter for oxygen stress\n jarvis_drought_stress: array of floats (xr.DataArray)\n Jarvis parameter for drought stress\n feddes_p1: array of floats (xr.DataArray)\n p1 (m) in Feddes function for transpiration reduction\n feddes_p2: array of floats (xr.DataArray)\n p2 (m) in Feddes function for transpiration reduction\n feddes_p3h: array of floats (xr.DataArray)\n p3h (m) in Feddes function for transpiration reduction\n feddes_p3l: array of floats (xr.DataArray)\n p3l (m) in Feddes function for transpiration reduction\n feddes_p4: array of floats (xr.DataArray)\n p4 (m) in Feddes function for transpiration reduction\n feddes_t3h: array of floats (xr.DataArray)\n t3h (mm/d) in Feddes function for transpiration reduction\n feddes_t3l: array of floats (xr.DataArray)\n t3l (mm/d) in Feddes function for transpiration reduction\n threshold_sprinkling: array of floats (xr.DataArray)\n If <0, pressure head (m) at which sprinkling begins. If >0 drought\n stress at which sprinkling begins.\n fraction_evaporated_sprinkling: array of floats (xr.DataArray)\n Fraction evaporated sprinkling water\n gift: array of floats (xr.DataArray)\n Gift (mm) during rotational period\n gift_duration: array of floats (xr.DataArray)\n Gift duration (d)\n rotational_period: array of floats (xr.DataArray)\n Rotational period (d)\n start_sprinkling_season: array of floats (xr.DataArray)\n Day of year at which sprinkling season starts (d)\n end_sprinkling_season: array of floats (xr.DataArray)\n Day of year at which sprinkling season ends (d)\n interception_option: array of integers (xr.DataAray)\n Choose interception model. 0=Rutter, 1=Von Hoyningen. NOTE: option\n 2=GASH, but this is not supported by MetaSWAP v8.1.0.3 and lower\n interception_capacity_per_LAI: array of floats (xr.DataArray)\n Interception capacity (mm/LAI) will be set for both Rutter and Von\n Hoyningen.\n interception_intercept: array of floats (xr.DataArray)\n Intercept of the interception evaporation curve. Pun unintended.\n\n Notes\n -----\n No Penman-Monteith is supported in iMOD Python, so albedo, rsc, rsw, rsoil,\n kdif, and kdir cannot be specified. (We might create a seperate object for\n this if there is a demand for it.)\n\n The GASH model (interception_option = 2) and salt stress parameters Maas &\n Hoffman are not supported by MetaSWAP at the time of writing this class. So\n these are not supported.\n \"\"\"\n\n _metadata_dict = {\n \"landuse_index\": VariableMetaData(6, 1, 999, int),\n \"landuse_name\": VariableMetaData(20, None, None, str),\n \"vegetation_index\": VariableMetaData(6, 0.0, 1e6, int),\n # Jarvis stress\n # Columns 33-35 and 36-38, but both F6?\n \"jarvis_o2_stress\": VariableMetaData(3, 0.0, 1e6, float),\n \"jarvis_drought_stress\": VariableMetaData(3, 0.0, 1e6, float),\n # Feddes transpiration function\n \"feddes_p1\": VariableMetaData(8, -160.0, 100.0, float),\n \"feddes_p2\": VariableMetaData(8, -160.0, 100.0, float),\n \"feddes_p3h\": VariableMetaData(8, -160.0, 0.0, float),\n \"feddes_p3l\": VariableMetaData(8, -160.0, 0.0, float),\n \"feddes_p4\": VariableMetaData(8, -160.0, 0.0, float),\n \"feddes_t3h\": VariableMetaData(8, 0.1, 10.0, float),\n \"feddes_t3l\": VariableMetaData(8, 0.1, 10.0, float),\n # Sprinkling\n \"threshold_sprinkling\": VariableMetaData(8, -160.0, 1.0, float),\n \"fraction_evaporated_sprinkling\": VariableMetaData(8, 0.0, 1.0, float),\n \"gift\": VariableMetaData(8, 1.0, 1000.0, float),\n \"gift_duration\": VariableMetaData(8, 0.01, 1000.0, float),\n \"rotational_period\": VariableMetaData(6, 1.0, 366.0, float),\n \"start_sprinkling_season\": VariableMetaData(6, 0.0, 366.0, float),\n \"end_sprinkling_season\": VariableMetaData(6, 0.0, 366.0, float),\n # Penman-Monteith: not supported\n \"albedo\": VariableMetaData(8, None, None, str),\n \"rsc\": VariableMetaData(8, None, None, str),\n \"rsw\": VariableMetaData(8, None, None, str),\n \"rsoil\": VariableMetaData(8, None, None, str),\n \"kdif\": VariableMetaData(8, None, None, str),\n \"kdir\": VariableMetaData(8, None, None, str),\n # Interception\n \"interception_option\": VariableMetaData(6, 0, 2, int),\n \"interception_capacity_per_LAI_Rutter\": VariableMetaData(8, 0.0, 10.0, float),\n \"interception_intercept\": VariableMetaData(8, 0.0, 1.0, float),\n \"interception_capacity_per_LAI_VonHoyningen\": VariableMetaData(\n 8, 0.0, 10.0, float\n ),\n # Gash interception: not supported\n \"pfree\": VariableMetaData(8, None, None, str),\n \"pstem\": VariableMetaData(8, None, None, str),\n \"scanopy\": VariableMetaData(8, None, None, str),\n \"avprec\": VariableMetaData(8, None, None, str),\n \"avevap\": VariableMetaData(8, None, None, str),\n # Maas-Hoffman: not supported\n \"saltmax\": VariableMetaData(8, None, None, str),\n \"saltslope\": VariableMetaData(8, None, None, str),\n }\n\n _file_name = \"luse_svat.inp\"\n\n def __init__(\n self,\n landuse_name,\n vegetation_index,\n jarvis_o2_stress,\n jarvis_drought_stress,\n feddes_p1,\n feddes_p2,\n feddes_p3h,\n feddes_p3l,\n feddes_p4,\n feddes_t3h,\n feddes_t3l,\n threshold_sprinkling,\n fraction_evaporated_sprinkling,\n gift,\n gift_duration,\n rotational_period,\n start_sprinkling_season,\n end_sprinkling_season,\n interception_option,\n interception_capacity_per_LAI,\n interception_intercept,\n ):\n super().__init__()\n self.dataset[\"landuse_name\"] = landuse_name\n self.dataset[\"vegetation_index\"] = vegetation_index\n self.dataset[\"jarvis_o2_stress\"] = jarvis_o2_stress\n self.dataset[\"jarvis_drought_stress\"] = jarvis_drought_stress\n self.dataset[\"feddes_p1\"] = feddes_p1\n self.dataset[\"feddes_p2\"] = feddes_p2\n self.dataset[\"feddes_p3h\"] = feddes_p3h\n self.dataset[\"feddes_p3l\"] = feddes_p3l\n self.dataset[\"feddes_p4\"] = feddes_p4\n self.dataset[\"feddes_t3h\"] = feddes_t3h\n self.dataset[\"feddes_t3l\"] = feddes_t3l\n self.dataset[\"threshold_sprinkling\"] = threshold_sprinkling\n self.dataset[\"fraction_evaporated_sprinkling\"] = fraction_evaporated_sprinkling\n self.dataset[\"gift\"] = gift\n self.dataset[\"gift_duration\"] = gift_duration\n self.dataset[\"rotational_period\"] = rotational_period\n self.dataset[\"start_sprinkling_season\"] = start_sprinkling_season\n self.dataset[\"end_sprinkling_season\"] = end_sprinkling_season\n self.dataset[\"interception_option\"] = interception_option\n self.dataset[\n \"interception_capacity_per_LAI_Rutter\"\n ] = interception_capacity_per_LAI\n self.dataset[\n \"interception_capacity_per_LAI_VonHoyningen\"\n ] = interception_capacity_per_LAI\n self.dataset[\"interception_intercept\"] = interception_intercept\n\n self._pkgcheck()\n\n def _render(self, file, *args):\n dataframe = self.dataset.to_dataframe(\n dim_order=(\"landuse_index\",)\n ).reset_index()\n\n self._check_range(dataframe)\n\n # Find missing columns\n missing_keys = set(self._metadata_dict.keys()) ^ set(dataframe.columns)\n\n # Add missing columns\n for key in missing_keys:\n dataframe[key] = \"\"\n\n # Reorder columns to _metadata_dict order\n dataframe = dataframe[list(self._metadata_dict.keys())]\n\n return self.write_dataframe_fixed_width(file, dataframe)\n\n def _pkgcheck(self):\n dims = self.dataset.dims\n dims_expected = (\"landuse_index\",)\n if len(set(dims) - set(dims_expected)) > 0:\n raise ValueError(\n f\"Please provide DataArrays with dimensions {dims_expected}\"\n )" }, { "identifier": "MeteoGrid", "path": "imod/msw/meteo_grid.py", "snippet": "class MeteoGrid(MetaSwapPackage):\n \"\"\"\n This contains the meteorological grid data. Grids are written to ESRI ASCII\n files. The meteorological data requires a time coordinate. Next to a\n MeteoGrid instance, instances of PrecipitationMapping and\n EvapotranspirationMapping are required as well to specify meteorological\n information to MetaSWAP.\n\n This class is responsible for `mete_grid.inp`.\n\n Parameters\n ----------\n precipitation: array of floats (xr.DataArray)\n Contains the precipitation grids in mm/d. A time coordinate is required.\n evapotranspiration: array of floats (xr.DataArray)\n Contains the evapotranspiration grids in mm/d. A time coordinate is\n required.\n \"\"\"\n\n _file_name = \"mete_grid.inp\"\n _meteo_dirname = \"meteo_grids\"\n\n def __init__(self, precipitation: xr.DataArray, evapotranspiration: xr.DataArray):\n super().__init__()\n\n self.dataset[\"precipitation\"] = precipitation\n self.dataset[\"evapotranspiration\"] = evapotranspiration\n\n self._pkgcheck()\n\n def write_free_format_file(self, path: Union[str, Path], dataframe: pd.DataFrame):\n \"\"\"\n Write free format file. The mete_grid.inp file is free format.\n \"\"\"\n\n columns = list(self.dataset.data_vars)\n\n dataframe.loc[:, columns] = '\"' + dataframe[columns] + '\"'\n # Add required columns, which we will not use.\n # These are only used when WOFOST is used\n # TODO: Add support for temperature to allow WOFOST support\n wofost_columns = [\n \"minimum_day_temperature\",\n \"maximum_day_temperature\",\n \"mean_temperature\",\n ]\n dataframe.loc[:, wofost_columns] = '\"NoValue\"'\n\n self.check_string_lengths(dataframe)\n\n dataframe.to_csv(\n path, header=False, quoting=csv.QUOTE_NONE, float_format=\"%.4f\", index=False\n )\n\n def _compose_filename(\n self, d: dict, directory: Path, pattern: Optional[str] = None\n ):\n \"\"\"\n Construct a filename, following the iMOD conventions.\n\n\n Parameters\n ----------\n d : dict\n dict of parts (time, layer) for filename.\n pattern : string or re.pattern\n Format to create pattern for.\n\n Returns\n -------\n str\n Absolute path.\n\n \"\"\"\n return str(directory / util.compose(d, pattern))\n\n def _is_grid(self, varname: str):\n coords = self.dataset[varname].coords\n\n if \"y\" not in coords and \"x\" not in coords:\n return False\n else:\n return True\n\n def _compose_dataframe(self, times: np.array):\n dataframe = pd.DataFrame(index=times)\n\n year, time_since_start_year = to_metaswap_timeformat(times)\n\n dataframe[\"time_since_start_year\"] = time_since_start_year\n dataframe[\"year\"] = year\n\n # Data dir is always relative to model dir, so don't use model directory\n # here\n data_dir = Path(\".\") / self._meteo_dirname\n\n for varname in self.dataset.data_vars:\n # If grid, we have to add the filename of the .asc to be written\n if self._is_grid(varname):\n dataframe[varname] = [\n self._compose_filename(\n dict(time=time, name=varname, extension=\".asc\"),\n directory=data_dir,\n )\n for time in times\n ]\n else:\n dataframe[varname] = self.dataset[varname].values.astype(str)\n\n return dataframe\n\n def check_string_lengths(self, dataframe: pd.DataFrame):\n \"\"\"\n Check if strings lengths do not exceed 256 characters.\n With absolute paths this might be an issue.\n \"\"\"\n\n # Because two quote marks are added later.\n character_limit = 254\n\n columns = list(self.dataset.data_vars)\n\n str_too_long = [\n np.any(dataframe[varname].str.len() > character_limit)\n for varname in columns\n ]\n\n if any(str_too_long):\n indexes_true = np.where(str_too_long)[0]\n too_long_columns = list(np.array(columns)[indexes_true])\n raise ValueError(\n f\"Encountered strings longer than 256 characters in columns: {too_long_columns}\"\n )\n\n def write(self, directory: Union[str, Path], *args):\n \"\"\"\n Write mete_grid.inp and accompanying ASCII grid files.\n\n Parameters\n ----------\n directory: str or Path\n directory to write file in.\n \"\"\"\n\n directory = Path(directory)\n\n times = self.dataset[\"time\"].values\n\n dataframe = self._compose_dataframe(times)\n self.write_free_format_file(directory / self._file_name, dataframe)\n\n # Write grid data to ESRI ASCII files\n for varname in self.dataset.data_vars:\n if self._is_grid(varname):\n path = (directory / self._meteo_dirname / varname).with_suffix(\".asc\")\n imod.rasterio.save(path, self.dataset[varname], nodata=-9999.0)\n\n def _pkgcheck(self):\n for varname in self.dataset.data_vars:\n coords = self.dataset[varname].coords\n if \"time\" not in coords:\n raise ValueError(f\"No 'time' coordinate included in {varname}\")\n\n allowed_dims = [\"time\", \"y\", \"x\"]\n\n excess_dims = set(self.dataset[varname].dims) - set(allowed_dims)\n if len(excess_dims) > 0:\n raise ValueError(\n f\"Received excess dims {excess_dims} in {self.__class__} for \"\n f\"{varname}, please provide data with {allowed_dims}\"\n )" }, { "identifier": "EvapotranspirationMapping", "path": "imod/msw/meteo_mapping.py", "snippet": "class EvapotranspirationMapping(MeteoMapping):\n \"\"\"\n This contains the data to connect evapotranspiration grid cells to MetaSWAP\n svats. The evapotranspiration grid does not have to be equal to the metaswap\n grid: connections between the evapotranspiration cells to svats will be\n established using a nearest neighbour lookup.\n\n This class is responsible for the file `svat2etrefgrid.inp`.\n\n Parameters\n ----------\n evapotransporation: array of floats (xr.DataArray)\n Describes the evapotransporation data. The extend of the grid must be\n larger than the MetaSvap grid. The data must also be coarser than the\n MetaSvap grid.\n \"\"\"\n\n _file_name = \"svat2etrefgrid.inp\"\n _metadata_dict = {\n \"svat\": VariableMetaData(10, None, None, int),\n \"row\": VariableMetaData(10, None, None, int),\n \"column\": VariableMetaData(10, None, None, int),\n }\n\n def __init__(\n self,\n evapotranspiration: xr.DataArray,\n ):\n super().__init__()\n self.meteo = evapotranspiration" }, { "identifier": "PrecipitationMapping", "path": "imod/msw/meteo_mapping.py", "snippet": "class PrecipitationMapping(MeteoMapping):\n \"\"\"\n This contains the data to connect precipitation grid cells to MetaSWAP\n svats. The precipitation grid does not have to be equal to the metaswap\n grid: connections between the precipitation cells to svats will be\n established using a nearest neighbour lookup.\n\n This class is responsible for the file `svat2precgrid.inp`.\n\n Parameters\n ----------\n precipitation: array of floats (xr.DataArray)\n Describes the precipitation data. The extend of the grid must be larger\n than the MetaSvap grid. The data must also be coarser than the MetaSvap\n grid.\n \"\"\"\n\n _file_name = \"svat2precgrid.inp\"\n _metadata_dict = {\n \"svat\": VariableMetaData(10, None, None, int),\n \"row\": VariableMetaData(10, None, None, int),\n \"column\": VariableMetaData(10, None, None, int),\n }\n\n def __init__(\n self,\n precipitation: xr.DataArray,\n ):\n super().__init__()\n self.meteo = precipitation" }, { "identifier": "TimeOutputControl", "path": "imod/msw/output_control.py", "snippet": "class TimeOutputControl(MetaSwapPackage):\n \"\"\"\n Specify the accumulation periods which will be used to write output. For\n example, say the model computes on a daily timestep, but timesteps two days\n apart are specified, the summed fluxes of each two days are written by\n MetaSWAP.\n\n Parameters\n ----------\n time: xr.DataArray\n Timesteps at which to write output.\n \"\"\"\n\n _file_name = \"tiop_sim.inp\"\n _settings = {}\n _metadata_dict = {\n \"time_since_start_year\": VariableMetaData(15, 0.0, 366.0, float),\n \"year\": VariableMetaData(6, 1, 9999, int),\n \"option\": VariableMetaData(6, 1, 7, int),\n }\n\n def __init__(self, time):\n super().__init__()\n\n self.dataset[\"times\"] = time\n\n def _render(self, file, *args):\n year, time_since_start_year = to_metaswap_timeformat(self.dataset[\"times\"])\n\n dataframe = pd.DataFrame(\n data=dict(time_since_start_year=time_since_start_year, year=year)\n )\n\n dataframe[\"time_since_start_year\"] += 1\n dataframe[\"option\"] = 7\n\n self._check_range(dataframe)\n\n return self.write_dataframe_fixed_width(file, dataframe)" }, { "identifier": "MetaSwapPackage", "path": "imod/msw/pkgbase.py", "snippet": "class MetaSwapPackage(abc.ABC):\n \"\"\"\n MetaSwapPackage is used to share methods for Metaswap packages.\n\n It is not meant to be used directly, only to inherit from, to implement new\n packages.\n \"\"\"\n\n __slots__ = \"_pkg_id\"\n\n def __init__(self):\n self.dataset = xr.Dataset()\n\n def __getitem__(self, key):\n return self.dataset.__getitem__(key)\n\n def __setitem__(self, key, value):\n self.dataset.__setitem__(key, value)\n\n def isel(self):\n raise NotImplementedError(\n f\"Selection on packages not yet supported. \"\n f\"To make a selection on the xr.Dataset, \"\n f\"call {self._pkg_id}.dataset.isel instead. \"\n f\"You can create a new package with a selection by calling: \"\n f\"{__class__.__name__}(**{self._pkg_id}.dataset.isel(**selection))\"\n )\n\n def sel(self):\n raise NotImplementedError(\n f\"Selection on packages not yet supported. \"\n f\"To make a selection on the xr.Dataset, \"\n f\"call {self._pkg_id}.dataset.sel instead. \"\n f\"You can create a new package with a selection by calling: \"\n f\"{__class__.__name__}(**{self._pkg_id}.dataset.sel(**selection))\"\n )\n\n def write(self, directory: Union[str, Path], index: np.ndarray, svat: xr.DataArray):\n \"\"\"\n Write MetaSWAP package to its corresponding fixed format file. This has\n the `.inp` extension.\n \"\"\"\n directory = Path(directory)\n\n filename = directory / self._file_name\n with open(filename, \"w\") as f:\n self._render(f, index, svat)\n\n def _check_range(self, dataframe):\n \"\"\"\n Check if provided data does not exceeds MetaSWAPs ranges. These ranges\n are specified in the ``self._metadata_dict`` for each variable.\n \"\"\"\n for varname in dataframe:\n min_value = self._metadata_dict[varname].min_value\n max_value = self._metadata_dict[varname].max_value\n if (dataframe[varname] < min_value).any() or (\n dataframe[varname] > max_value\n ).any():\n raise ValueError(\n f\"{varname}: not all values are within range ({min_value}-{max_value}).\"\n )\n\n def write_dataframe_fixed_width(self, file, dataframe):\n \"\"\"Write dataframe to fixed format file.\"\"\"\n for row in dataframe.itertuples():\n for index, metadata in enumerate(self._metadata_dict.values()):\n content = format_fixed_width(row[index + 1], metadata)\n file.write(content)\n file.write(\"\\n\")\n\n def _index_da(self, da, index):\n \"\"\"\n Helper method that converts a DataArray to a 1d numpy array, and\n consequently applies boolean indexing.\n \"\"\"\n return da.values.ravel()[index]\n\n def _render(self, file, index, svat):\n \"\"\"\n Collect to be written data in a DataFrame and call\n ``self.write_dataframe_fixed_width``\n \"\"\"\n data_dict = {\"svat\": svat.values.ravel()[index]}\n\n subunit = svat.coords[\"subunit\"]\n\n for var in self._with_subunit:\n data_dict[var] = self._index_da(self.dataset[var], index)\n\n for var in self._without_subunit:\n da = self.dataset[var].expand_dims(subunit=subunit)\n data_dict[var] = self._index_da(da, index)\n\n for var in self._to_fill:\n data_dict[var] = \"\"\n\n dataframe = pd.DataFrame(\n data=data_dict, columns=list(self._metadata_dict.keys())\n )\n\n self._check_range(dataframe)\n\n return self.write_dataframe_fixed_width(file, dataframe)\n\n def _pkgcheck(self):\n \"\"\"\n Method to do package checks. The base class version checks if provided\n data has a subunit coordinate or not.\n \"\"\"\n for var in self._with_subunit:\n if \"subunit\" not in self.dataset[var].coords:\n raise ValueError(\n f\"Variable '{var}' in {self.__class__} should contain \"\n \"'subunit' coordinate\"\n )\n for var in self._without_subunit:\n if \"subunit\" in self.dataset[var].coords:\n raise ValueError(\n f\"Variable '{var}' in {self.__class__} should not \"\n \"contain 'subunit' coordinate\"\n )" }, { "identifier": "to_metaswap_timeformat", "path": "imod/msw/timeutil.py", "snippet": "def to_metaswap_timeformat(times):\n \"\"\"\n Convert times to MetaSWAP's own time format, which consists of a year as\n integer and the number of days since the start of the year as float.\n\n Returns\n -------\n tuple\n Consists of the year as integer and the number of days since the\n start of the year as float.\n\n \"\"\"\n\n # TODO: Also support cftime\n times = pd.DatetimeIndex(times)\n\n year = times.year\n\n # MetaSWAP requires a days since start year\n days_since_start_year = times.day_of_year.astype(np.float64) - 1.0\n # Behind the decimal is the time since start day\n time_since_start_day = times.hour / 24 + times.minute / 1440 + times.second / 86400\n\n time_since_start_year = days_since_start_year + time_since_start_day\n\n return year, time_since_start_year" }, { "identifier": "AnnualCropFactors", "path": "imod/msw/vegetation.py", "snippet": "class AnnualCropFactors(MetaSwapPackage):\n \"\"\"\n For each vegetation type specify a yearly trend in vegetation factors and\n interception characteristics. These are used if WOFOST is not used.\n\n This class is responsible for the file `fact_svat.inp`.\n\n Parameters\n ----------\n soil_cover: array of floats (xr.DataArray)\n Soil cover in m2/m2. Must have a \"vegetation_index\" and \"day_of_year\" a\n coordinates.\n leaf_area_index: array of floats (xr.DataArray)\n Leaf area index in m2/m2. Must have a \"vegetation_index\" and\n \"day_of_year\" a coordinates.\n interception_capacity: array of floats (xr.DataArray)\n Interception capacity in m3/m2. Must have a \"vegetation_index\" and\n \"day_of_year\" a coordinates.\n vegetation_factor: array of floats (xr.DataArray)\n Vegetation factor. Must have a \"vegetation_index\" and \"day_of_year\" a\n coordinates.\n interception_factor: array of floats (xr.DataArray)\n Interception evaporation factor. Must have a \"vegetation_index\" and\n \"day_of_year\" a coordinates.\n bare_soil_factor: array of floats (xr.DataArray)\n Bare soil evaporation factor. Must have a \"vegetation_index\" and\n \"day_of_year\" a coordinates.\n ponding_factor: array of floats (xr.DataArray)\n Ponding factor. Must have a \"vegetation_index\" and \"day_of_year\" a\n coordinates.\n \"\"\"\n\n _file_name = \"fact_svat.inp\"\n _metadata_dict = {\n \"vegetation_index\": VariableMetaData(6, 0, 999, int),\n \"day_of_year\": VariableMetaData(6, 1, 366, int),\n \"soil_cover\": VariableMetaData(8, 0.0, 1.0, float),\n \"leaf_area_index\": VariableMetaData(8, 0.0, 10.0, float),\n \"interception_capacity\": VariableMetaData(8, 0.0, 0.1, float),\n # io manual: min value vegetation_factor = 0.1, but example file has 0.\n # and works\n \"vegetation_factor\": VariableMetaData(8, 0.0, 10.0, float),\n \"interception_factor\": VariableMetaData(8, 0.01, 10.0, float),\n \"bare_soil_factor\": VariableMetaData(8, 0.01, 10.0, float),\n \"ponding_factor\": VariableMetaData(8, 0.01, 10.0, float),\n }\n\n def __init__(\n self,\n soil_cover: xr.DataArray,\n leaf_area_index: xr.DataArray,\n interception_capacity: xr.DataArray,\n vegetation_factor: xr.DataArray,\n interception_factor: xr.DataArray,\n bare_soil_factor: xr.DataArray,\n ponding_factor: xr.DataArray,\n ):\n super().__init__()\n self.dataset[\"soil_cover\"] = soil_cover\n self.dataset[\"leaf_area_index\"] = leaf_area_index\n self.dataset[\"interception_capacity\"] = interception_capacity\n self.dataset[\"vegetation_factor\"] = vegetation_factor\n self.dataset[\"interception_factor\"] = interception_factor\n self.dataset[\"bare_soil_factor\"] = bare_soil_factor\n self.dataset[\"ponding_factor\"] = ponding_factor\n\n self._pkgcheck()\n\n def _render(self, file, *args):\n dataframe = self.dataset.to_dataframe(\n dim_order=(\"vegetation_index\", \"day_of_year\")\n ).reset_index()\n\n self._check_range(dataframe)\n\n return self.write_dataframe_fixed_width(file, dataframe)\n\n def _pkgcheck(self):\n dims = self.dataset.dims\n dims_expected = (\"day_of_year\", \"vegetation_index\")\n if len(set(dims) - set(dims_expected)) > 0:\n raise ValueError(\n f\"Please provide DataArrays with dimensions {dims_expected}\"\n )\n\n day_of_year = self.dataset.coords[\"day_of_year\"].values\n if not np.all(day_of_year == np.arange(1, 367)):\n raise ValueError(r\"Not all days of the year included in data.\")" } ]
import collections import jinja2 import numpy as np from copy import copy from pathlib import Path from typing import Union from imod.msw.coupler_mapping import CouplerMapping from imod.msw.grid_data import GridData from imod.msw.idf_mapping import IdfMapping from imod.msw.infiltration import Infiltration from imod.msw.initial_conditions import ( InitialConditionsEquilibrium, InitialConditionsPercolation, InitialConditionsRootzonePressureHead, InitialConditionsSavedState, ) from imod.msw.landuse import LanduseOptions from imod.msw.meteo_grid import MeteoGrid from imod.msw.meteo_mapping import EvapotranspirationMapping, PrecipitationMapping from imod.msw.output_control import TimeOutputControl from imod.msw.pkgbase import MetaSwapPackage from imod.msw.timeutil import to_metaswap_timeformat from imod.msw.vegetation import AnnualCropFactors
13,279
class Model(collections.UserDict): def __setitem__(self, key, value): # TODO: Add packagecheck super().__setitem__(key, value) def update(self, *args, **kwargs): for k, v in dict(*args, **kwargs).items(): self[k] = v class MetaSwapModel(Model): """ Contains data and writes consistent model input files Parameters ---------- unsaturated_database: Path-like or str Path to the MetaSWAP soil physical database folder. """ _pkg_id = "model" _file_name = "para_sim.inp" _template = jinja2.Template( "{%for setting, value in settings.items()%}" "{{setting}} = {{value}}\n" "{%endfor%}" ) def __init__(self, unsaturated_database): super().__init__() self.simulation_settings = copy(DEFAULT_SETTINGS) self.simulation_settings[ "unsa_svat_path" ] = self._render_unsaturated_database_path(unsaturated_database) def _render_unsaturated_database_path(self, unsaturated_database): # Force to Path object unsaturated_database = Path(unsaturated_database) # Render to string for MetaSWAP if unsaturated_database.is_absolute(): return f'"{unsaturated_database}\\"' else: # TODO: Test if this is how MetaSWAP accepts relative paths return f'"${unsaturated_database}\\"' def _check_required_packages(self): pkg_types_included = {type(pkg) for pkg in self.values()} missing_packages = set(REQUIRED_PACKAGES) - pkg_types_included if len(missing_packages) > 0: raise ValueError( f"Missing the following required packages: {missing_packages}" ) initial_condition_set = pkg_types_included & set(INITIAL_CONDITIONS_PACKAGES) if len(initial_condition_set) < 1: raise ValueError( "Missing InitialCondition package, assign one of " f"{INITIAL_CONDITIONS_PACKAGES}" ) elif len(initial_condition_set) > 1: raise ValueError( "Multiple InitialConditions assigned, choose one of " f"{initial_condition_set}" ) def _check_landuse_indices_in_lookup_options(self): grid_key = self._get_pkg_key(GridData) landuse_options_key = self._get_pkg_key(LanduseOptions) indices_in_grid = set(self[grid_key]["landuse"].values.ravel()) indices_in_options = set( self[landuse_options_key].dataset.coords["landuse_index"].values ) missing_indices = indices_in_grid - indices_in_options if len(missing_indices) > 0: raise ValueError( "Found the following landuse indices in GridData which " f"were not in LanduseOptions: {missing_indices}" ) def _check_vegetation_indices_in_annual_crop_factors(self): landuse_options_key = self._get_pkg_key(LanduseOptions) annual_crop_factors_key = self._get_pkg_key(AnnualCropFactors) indices_in_options = set( np.unique(self[landuse_options_key]["vegetation_index"]) ) indices_in_crop_factors = set( self[annual_crop_factors_key].dataset.coords["vegetation_index"].values ) missing_indices = indices_in_options - indices_in_crop_factors if len(missing_indices) > 0: raise ValueError( "Found the following vegetation indices in LanduseOptions " f"which were not in AnnualCropGrowth: {missing_indices}" ) def _get_starttime(self): """ Loop over all packages to get the minimum time. MetaSWAP requires a starttime in its simulation settings (para_sim.inp) """ starttimes = [] for pkgname in self: ds = self[pkgname].dataset if "time" in ds.coords: starttimes.append(ds["time"].min().values) starttime = min(starttimes)
REQUIRED_PACKAGES = ( GridData, CouplerMapping, Infiltration, LanduseOptions, MeteoGrid, EvapotranspirationMapping, PrecipitationMapping, IdfMapping, TimeOutputControl, AnnualCropFactors, ) INITIAL_CONDITIONS_PACKAGES = ( InitialConditionsEquilibrium, InitialConditionsPercolation, InitialConditionsRootzonePressureHead, InitialConditionsSavedState, ) DEFAULT_SETTINGS = dict( vegetation_mdl=1, evapotranspiration_mdl=1, saltstress_mdl=0, surfacewater_mdl=0, infilimsat_opt=0, netcdf_per=0, postmsw_opt=0, dtgw=1.0, dtsw=1.0, ipstep=2, nxlvage_dim=366, co2=404.32, fact_beta2=1.0, rcsoil=0.15, iterur1=3, iterur2=5, tdbgsm=91.0, tdedsm=270.0, clocktime=0, ) class Model(collections.UserDict): def __setitem__(self, key, value): # TODO: Add packagecheck super().__setitem__(key, value) def update(self, *args, **kwargs): for k, v in dict(*args, **kwargs).items(): self[k] = v class MetaSwapModel(Model): """ Contains data and writes consistent model input files Parameters ---------- unsaturated_database: Path-like or str Path to the MetaSWAP soil physical database folder. """ _pkg_id = "model" _file_name = "para_sim.inp" _template = jinja2.Template( "{%for setting, value in settings.items()%}" "{{setting}} = {{value}}\n" "{%endfor%}" ) def __init__(self, unsaturated_database): super().__init__() self.simulation_settings = copy(DEFAULT_SETTINGS) self.simulation_settings[ "unsa_svat_path" ] = self._render_unsaturated_database_path(unsaturated_database) def _render_unsaturated_database_path(self, unsaturated_database): # Force to Path object unsaturated_database = Path(unsaturated_database) # Render to string for MetaSWAP if unsaturated_database.is_absolute(): return f'"{unsaturated_database}\\"' else: # TODO: Test if this is how MetaSWAP accepts relative paths return f'"${unsaturated_database}\\"' def _check_required_packages(self): pkg_types_included = {type(pkg) for pkg in self.values()} missing_packages = set(REQUIRED_PACKAGES) - pkg_types_included if len(missing_packages) > 0: raise ValueError( f"Missing the following required packages: {missing_packages}" ) initial_condition_set = pkg_types_included & set(INITIAL_CONDITIONS_PACKAGES) if len(initial_condition_set) < 1: raise ValueError( "Missing InitialCondition package, assign one of " f"{INITIAL_CONDITIONS_PACKAGES}" ) elif len(initial_condition_set) > 1: raise ValueError( "Multiple InitialConditions assigned, choose one of " f"{initial_condition_set}" ) def _check_landuse_indices_in_lookup_options(self): grid_key = self._get_pkg_key(GridData) landuse_options_key = self._get_pkg_key(LanduseOptions) indices_in_grid = set(self[grid_key]["landuse"].values.ravel()) indices_in_options = set( self[landuse_options_key].dataset.coords["landuse_index"].values ) missing_indices = indices_in_grid - indices_in_options if len(missing_indices) > 0: raise ValueError( "Found the following landuse indices in GridData which " f"were not in LanduseOptions: {missing_indices}" ) def _check_vegetation_indices_in_annual_crop_factors(self): landuse_options_key = self._get_pkg_key(LanduseOptions) annual_crop_factors_key = self._get_pkg_key(AnnualCropFactors) indices_in_options = set( np.unique(self[landuse_options_key]["vegetation_index"]) ) indices_in_crop_factors = set( self[annual_crop_factors_key].dataset.coords["vegetation_index"].values ) missing_indices = indices_in_options - indices_in_crop_factors if len(missing_indices) > 0: raise ValueError( "Found the following vegetation indices in LanduseOptions " f"which were not in AnnualCropGrowth: {missing_indices}" ) def _get_starttime(self): """ Loop over all packages to get the minimum time. MetaSWAP requires a starttime in its simulation settings (para_sim.inp) """ starttimes = [] for pkgname in self: ds = self[pkgname].dataset if "time" in ds.coords: starttimes.append(ds["time"].min().values) starttime = min(starttimes)
year, time_since_start_year = to_metaswap_timeformat([starttime])
14
2023-12-08 13:57:59+00:00
16k
camenduru/MotionDirector-hf
MotionDirector_train.py
[ { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, it will skip the normalization and activation layers in post-processing\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n up_block_types: Tuple[str] = (\"UpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\"),\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1024,\n attention_head_dim: Union[int, Tuple[int]] = 64,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n self.gradient_checkpointing = False\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_out_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n self.time_proj = Timesteps(block_out_channels[0], True, 0)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n self.transformer_in = TransformerTemporalModel(\n num_attention_heads=8,\n attention_head_dim=attention_head_dim,\n in_channels=block_out_channels[0],\n num_layers=1,\n )\n\n # class embedding\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=False,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=False,\n )\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=False,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, value=False):\n self.gradient_checkpointing = value\n self.mid_block.gradient_checkpointing = value\n for module in self.down_blocks + self.up_blocks:\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Returns:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n num_frames = sample.shape[2]\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n emb = emb.repeat_interleave(repeats=num_frames, dim=0)\n encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0)\n\n # 2. pre-process\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in(sample)\n \n if num_frames > 1:\n if self.gradient_checkpointing:\n sample = transformer_g_c(self.transformer_in, sample, num_frames)\n else:\n sample = self.transformer_in(sample, num_frames=num_frames).sample\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)\n\n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n num_frames=num_frames,\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n\n sample = self.conv_out(sample)\n\n # reshape to (batch, channel, framerate, width, height)\n sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)" }, { "identifier": "VideoJsonDataset", "path": "utils/dataset.py", "snippet": "class VideoJsonDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n sample_start_idx: int = 1,\n frame_step: int = 1,\n json_path: str =\"\",\n json_data = None,\n vid_data_key: str = \"video_path\",\n preprocessed: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.use_bucketing = use_bucketing\n self.tokenizer = tokenizer\n self.preprocessed = preprocessed\n \n self.vid_data_key = vid_data_key\n self.train_data = self.load_from_json(json_path, json_data)\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.sample_start_idx = sample_start_idx\n self.frame_step = frame_step\n\n def build_json(self, json_data):\n extended_data = []\n for data in json_data['data']:\n for nested_data in data['data']:\n self.build_json_dict(\n data, \n nested_data, \n extended_data\n )\n json_data = extended_data\n return json_data\n\n def build_json_dict(self, data, nested_data, extended_data):\n clip_path = nested_data['clip_path'] if 'clip_path' in nested_data else None\n \n extended_data.append({\n self.vid_data_key: data[self.vid_data_key],\n 'frame_index': nested_data['frame_index'],\n 'prompt': nested_data['prompt'],\n 'clip_path': clip_path\n })\n \n def load_from_json(self, path, json_data):\n try:\n with open(path) as jpath:\n print(f\"Loading JSON from {path}\")\n json_data = json.load(jpath)\n\n return self.build_json(json_data)\n\n except:\n self.train_data = []\n print(\"Non-existant JSON path. Skipping.\")\n \n def validate_json(self, base_path, path):\n return os.path.exists(f\"{base_path}/{path}\")\n\n def get_frame_range(self, vr):\n return get_video_frames(\n vr, \n self.sample_start_idx, \n self.frame_step, \n self.n_sample_frames\n )\n \n def get_vid_idx(self, vr, vid_data=None):\n frames = self.n_sample_frames\n\n if vid_data is not None:\n idx = vid_data['frame_index']\n else:\n idx = self.sample_start_idx\n\n return idx\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n # width, height = self.width, self.height\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n frame_range = self.get_frame_range(vr)\n frames = vr.get_batch(frame_range)\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def train_data_batch(self, index):\n\n # If we are training on individual clips.\n if 'clip_path' in self.train_data[index] and \\\n self.train_data[index]['clip_path'] is not None:\n\n vid_data = self.train_data[index]\n\n clip_path = vid_data['clip_path']\n \n # Get video prompt\n prompt = vid_data['prompt']\n\n video, _ = self.process_video_wrapper(clip_path)\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n\n # Assign train data\n train_data = self.train_data[index]\n \n # Get the frame of the current index.\n self.sample_start_idx = train_data['frame_index']\n \n # Initialize resize\n resize = None\n\n video, vr = self.process_video_wrapper(train_data[self.vid_data_key])\n\n # Get video prompt\n prompt = train_data['prompt']\n vr.seek(0)\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'json'\n\n def __len__(self):\n if self.train_data is not None:\n return len(self.train_data)\n else: \n return 0\n\n def __getitem__(self, index):\n \n # Initialize variables\n video = None\n prompt = None\n prompt_ids = None\n\n # Use default JSON training\n if self.train_data is not None:\n video, prompt, prompt_ids = self.train_data_batch(index)\n\n example = {\n \"pixel_values\": (video / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "SingleVideoDataset", "path": "utils/dataset.py", "snippet": "class SingleVideoDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n frame_step: int = 1,\n single_video_path: str = \"\",\n single_video_prompt: str = \"\",\n use_caption: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n self.frames = []\n self.index = 1\n\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.n_sample_frames = n_sample_frames\n self.frame_step = frame_step\n\n self.single_video_path = single_video_path\n self.single_video_prompt = single_video_prompt\n\n self.width = width\n self.height = height\n def create_video_chunks(self):\n vr = decord.VideoReader(self.single_video_path)\n vr_range = range(0, len(vr), self.frame_step)\n\n self.frames = list(self.chunk(vr_range, self.n_sample_frames))\n return self.frames\n\n def chunk(self, it, size):\n it = iter(it)\n return iter(lambda: tuple(islice(it, size)), ())\n\n def get_frame_batch(self, vr, resize=None):\n index = self.index\n frames = vr.get_batch(self.frames[self.index])\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n # width, height = sensible_buckets(self.width, self.height, h, w)\n width, height = self.width, self.height\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def single_video_batch(self, index):\n train_data = self.single_video_path\n self.index = index\n\n if train_data.endswith(self.vid_types):\n video, _ = self.process_video_wrapper(train_data)\n\n prompt = self.single_video_prompt\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n else:\n raise ValueError(f\"Single video is not a video type. Types: {self.vid_types}\")\n \n @staticmethod\n def __getname__(): return 'single_video'\n\n def __len__(self):\n \n return len(self.create_video_chunks())\n\n def __getitem__(self, index):\n\n video, prompt, prompt_ids = self.single_video_batch(index)\n\n example = {\n \"pixel_values\": (video / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "ImageDataset", "path": "utils/dataset.py", "snippet": "class ImageDataset(Dataset):\n \n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n base_width: int = 256,\n base_height: int = 256,\n use_caption: bool = False,\n image_dir: str = '',\n single_img_prompt: str = '',\n use_bucketing: bool = False,\n fallback_prompt: str = '',\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.img_types = (\".png\", \".jpg\", \".jpeg\", '.bmp')\n self.use_bucketing = use_bucketing\n\n self.image_dir = self.get_images_list(image_dir)\n self.fallback_prompt = fallback_prompt\n\n self.use_caption = use_caption\n self.single_img_prompt = single_img_prompt\n\n self.width = width\n self.height = height\n\n def get_images_list(self, image_dir):\n if os.path.exists(image_dir):\n imgs = [x for x in os.listdir(image_dir) if x.endswith(self.img_types)]\n full_img_dir = []\n\n for img in imgs: \n full_img_dir.append(f\"{image_dir}/{img}\")\n\n return sorted(full_img_dir)\n\n return ['']\n\n def image_batch(self, index):\n train_data = self.image_dir[index]\n img = train_data\n\n try:\n img = torchvision.io.read_image(img, mode=torchvision.io.ImageReadMode.RGB)\n except:\n img = T.transforms.PILToTensor()(Image.open(img).convert(\"RGB\"))\n\n width = self.width\n height = self.height\n\n if self.use_bucketing:\n _, h, w = img.shape\n width, height = sensible_buckets(width, height, w, h)\n \n resize = T.transforms.Resize((height, width), antialias=True)\n\n img = resize(img) \n img = repeat(img, 'c h w -> f c h w', f=1)\n\n prompt = get_text_prompt(\n file_path=train_data,\n text_prompt=self.single_img_prompt,\n fallback_prompt=self.fallback_prompt,\n ext_types=self.img_types, \n use_caption=True\n )\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return img, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'image'\n \n def __len__(self):\n # Image directory\n if os.path.exists(self.image_dir[0]):\n return len(self.image_dir)\n else:\n return 0\n\n def __getitem__(self, index):\n img, prompt, prompt_ids = self.image_batch(index)\n example = {\n \"pixel_values\": (img / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt, \n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "VideoFolderDataset", "path": "utils/dataset.py", "snippet": "class VideoFolderDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n path: str = \"./data\",\n fallback_prompt: str = \"\",\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n\n self.fallback_prompt = fallback_prompt\n\n self.video_files = glob(f\"{path}/*.mp4\")\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.fps = fps\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n # width, height = self.width, self.height\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n n_sample_frames = self.n_sample_frames\n native_fps = vr.get_avg_fps()\n \n every_nth_frame = max(1, round(native_fps / self.fps))\n every_nth_frame = min(len(vr), every_nth_frame)\n \n effective_length = len(vr) // every_nth_frame\n if effective_length < n_sample_frames:\n n_sample_frames = effective_length\n\n effective_idx = random.randint(0, (effective_length - n_sample_frames))\n idxs = every_nth_frame * np.arange(effective_idx, effective_idx + n_sample_frames)\n\n video = vr.get_batch(idxs)\n video = rearrange(video, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video, vr\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n return video, vr\n \n def get_prompt_ids(self, prompt):\n return self.tokenizer(\n prompt,\n truncation=True,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n return_tensors=\"pt\",\n ).input_ids\n\n @staticmethod\n def __getname__(): return 'folder'\n\n def __len__(self):\n return len(self.video_files)\n\n def __getitem__(self, index):\n\n video, _ = self.process_video_wrapper(self.video_files[index])\n\n prompt = self.fallback_prompt\n\n prompt_ids = self.get_prompt_ids(prompt)\n\n return {\"pixel_values\": (video[0] / 127.5 - 1.0), \"prompt_ids\": prompt_ids[0], \"text_prompt\": prompt, 'dataset': self.__getname__()}" }, { "identifier": "CachedDataset", "path": "utils/dataset.py", "snippet": "class CachedDataset(Dataset):\n def __init__(self,cache_dir: str = ''):\n self.cache_dir = cache_dir\n self.cached_data_list = self.get_files_list()\n\n def get_files_list(self):\n tensors_list = [f\"{self.cache_dir}/{x}\" for x in os.listdir(self.cache_dir) if x.endswith('.pt')]\n return sorted(tensors_list)\n\n def __len__(self):\n return len(self.cached_data_list)\n\n def __getitem__(self, index):\n cached_latent = torch.load(self.cached_data_list[index], map_location='cuda:0')\n return cached_latent" }, { "identifier": "LoraHandler", "path": "utils/lora_handler.py", "snippet": "class LoraHandler(object):\n def __init__(\n self, \n version: LORA_VERSIONS = LoraVersions.cloneofsimo, \n use_unet_lora: bool = False,\n use_text_lora: bool = False,\n save_for_webui: bool = False,\n only_for_webui: bool = False,\n lora_bias: str = 'none',\n unet_replace_modules: list = None,\n text_encoder_replace_modules: list = None\n ):\n self.version = version\n self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)\n self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)\n self.lora_bias = lora_bias\n self.use_unet_lora = use_unet_lora\n self.use_text_lora = use_text_lora\n self.save_for_webui = save_for_webui\n self.only_for_webui = only_for_webui\n self.unet_replace_modules = unet_replace_modules\n self.text_encoder_replace_modules = text_encoder_replace_modules\n self.use_lora = any([use_text_lora, use_unet_lora])\n\n def is_cloneofsimo_lora(self):\n return self.version == LoraVersions.cloneofsimo\n\n\n def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader):\n\n if self.is_cloneofsimo_lora():\n\n if func_type == LoraFuncTypes.loader:\n return monkeypatch_or_replace_lora_extended\n\n if func_type == LoraFuncTypes.injector:\n return inject_trainable_lora_extended\n \n assert \"LoRA Version does not exist.\"\n\n def check_lora_ext(self, lora_file: str):\n return lora_file.endswith(tuple(LORA_FILE_TYPES))\n\n def get_lora_file_path(\n self, \n lora_path: str, \n model: Union[UNet3DConditionModel, CLIPTextModel]\n ):\n if os.path.exists(lora_path):\n lora_filenames = [fns for fns in os.listdir(lora_path)]\n is_lora = self.check_lora_ext(lora_path)\n\n is_unet = isinstance(model, UNet3DConditionModel)\n is_text = isinstance(model, CLIPTextModel)\n idx = 0 if is_unet else 1\n\n base_name = FILE_BASENAMES[idx]\n \n for lora_filename in lora_filenames:\n is_lora = self.check_lora_ext(lora_filename)\n if not is_lora:\n continue\n \n if base_name in lora_filename:\n return os.path.join(lora_path, lora_filename)\n\n return None\n\n def handle_lora_load(self, file_name:str, lora_loader_args: dict = None):\n self.lora_loader(**lora_loader_args)\n print(f\"Successfully loaded LoRA from: {file_name}\")\n \n def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,):\n try:\n lora_file = self.get_lora_file_path(lora_path, model)\n\n if lora_file is not None:\n lora_loader_args.update({\"lora_path\": lora_file})\n self.handle_lora_load(lora_file, lora_loader_args)\n\n else:\n print(f\"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...\")\n\n except Exception as e:\n print(f\"An error occured while loading a LoRA file: {e}\")\n \n def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias, scale):\n return_dict = lora_args.copy()\n \n if self.is_cloneofsimo_lora():\n return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)\n return_dict.update({\n \"model\": model,\n \"loras\": self.get_lora_file_path(lora_path, model),\n \"target_replace_module\": replace_modules,\n \"r\": r,\n \"scale\": scale,\n \"dropout_p\": dropout,\n })\n\n return return_dict\n\n def do_lora_injection(\n self, \n model, \n replace_modules, \n bias='none',\n dropout=0,\n r=4,\n lora_loader_args=None,\n ): \n REPLACE_MODULES = replace_modules\n\n params = None\n negation = None\n is_injection_hybrid = False\n \n if self.is_cloneofsimo_lora():\n is_injection_hybrid = True\n injector_args = lora_loader_args\n\n params, negation = self.lora_injector(**injector_args) # inject_trainable_lora_extended\n for _up, _down in extract_lora_ups_down(\n model, \n target_replace_module=REPLACE_MODULES):\n\n if all(x is not None for x in [_up, _down]):\n print(f\"Lora successfully injected into {model.__class__.__name__}.\")\n\n break\n\n return params, negation, is_injection_hybrid\n\n return params, negation, is_injection_hybrid\n\n def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16, scale=1.0):\n\n params = None\n negation = None\n\n lora_loader_args = self.get_lora_func_args(\n lora_path,\n use_lora,\n model,\n replace_modules,\n r,\n dropout,\n self.lora_bias,\n scale\n )\n\n if use_lora:\n params, negation, is_injection_hybrid = self.do_lora_injection(\n model, \n replace_modules, \n bias=self.lora_bias,\n lora_loader_args=lora_loader_args,\n dropout=dropout,\n r=r\n )\n\n if not is_injection_hybrid:\n self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args)\n \n params = model if params is None else params\n return params, negation\n\n def save_cloneofsimo_lora(self, model, save_path, step, flag):\n \n def save_lora(model, name, condition, replace_modules, step, save_path, flag=None):\n if condition and replace_modules is not None:\n save_path = f\"{save_path}/{step}_{name}.pt\"\n save_lora_weight(model, save_path, replace_modules, flag)\n\n save_lora(\n model.unet, \n FILE_BASENAMES[0], \n self.use_unet_lora, \n self.unet_replace_modules, \n step,\n save_path,\n flag\n )\n save_lora(\n model.text_encoder, \n FILE_BASENAMES[1], \n self.use_text_lora, \n self.text_encoder_replace_modules, \n step, \n save_path,\n flag\n )\n\n # train_patch_pipe(model, self.use_unet_lora, self.use_text_lora)\n\n def save_lora_weights(self, model: None, save_path: str ='',step: str = '', flag=None):\n save_path = f\"{save_path}/lora\"\n os.makedirs(save_path, exist_ok=True)\n\n if self.is_cloneofsimo_lora():\n if any([self.save_for_webui, self.only_for_webui]):\n warnings.warn(\n \"\"\"\n You have 'save_for_webui' enabled, but are using cloneofsimo's LoRA implemention.\n Only 'stable_lora' is supported for saving to a compatible webui file.\n \"\"\"\n )\n self.save_cloneofsimo_lora(model, save_path, step, flag)" }, { "identifier": "extract_lora_child_module", "path": "utils/lora.py", "snippet": "def extract_lora_child_module(model, target_replace_module=DEFAULT_TARGET_REPLACE):\n\n loras = []\n\n for target_replace_module_i in target_replace_module:\n\n for _m, _n, _child_module in _find_modules(\n model,\n [target_replace_module_i],\n search_class=[LoraInjectedLinear, LoraInjectedConv2d, LoraInjectedConv3d],\n ):\n loras.append(_child_module)\n\n if len(loras) == 0:\n raise ValueError(\"No lora injected.\")\n\n return loras" }, { "identifier": "ddim_inversion", "path": "utils/ddim_utils.py", "snippet": "@torch.no_grad()\ndef ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=\"\"):\n ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)\n return ddim_latents" } ]
import argparse import datetime import logging import inspect import math import os import random import gc import copy import torch import torch.nn.functional as F import torch.utils.checkpoint import diffusers import transformers import imageio import numpy as np import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from torchvision import transforms from tqdm.auto import tqdm from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from models.unet_3d_condition import UNet3DConditionModel from diffusers.models import AutoencoderKL from diffusers import DDIMScheduler, TextToVideoSDPipeline from diffusers.optimization import get_scheduler from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset from einops import rearrange, repeat from utils.lora_handler import LoraHandler from utils.lora import extract_lora_child_module from utils.ddim_utils import ddim_inversion from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
11,373
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process.
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process.
for DataSet in [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset]:
3
2023-12-11 04:51:39+00:00
16k
ZS-YANG/FemtoDet-v3
mmdet/models/dense_heads/atss_vlfusion_head.py
[ { "identifier": "MODELS", "path": "mmdet/registry.py", "snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])" }, { "identifier": "cat_boxes", "path": "mmdet/structures/bbox/transforms.py", "snippet": "def cat_boxes(data_list: List[Union[Tensor, BaseBoxes]],\n dim: int = 0) -> Union[Tensor, BaseBoxes]:\n \"\"\"Concatenate boxes with type of tensor or box type.\n\n Args:\n data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors\n or box types need to be concatenated.\n dim (int): The dimension over which the box are concatenated.\n Defaults to 0.\n\n Returns:\n Union[Tensor, :obj`BaseBoxes`]: Concatenated results.\n \"\"\"\n if data_list and isinstance(data_list[0], BaseBoxes):\n return data_list[0].cat(data_list, dim=dim)\n else:\n return torch.cat(data_list, dim=dim)" }, { "identifier": "reduce_mean", "path": "mmdet/utils/dist_utils.py", "snippet": "def reduce_mean(tensor):\n \"\"\"\"Obtain the mean of tensor on different GPUs.\"\"\"\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor" }, { "identifier": "InstanceList", "path": "mmdet/utils/typing_utils.py", "snippet": "" }, { "identifier": "filter_scores_and_topk", "path": "mmdet/models/utils/misc.py", "snippet": "def filter_scores_and_topk(scores, score_thr, topk, results=None):\n \"\"\"Filter results using score threshold and topk candidates.\n\n Args:\n scores (Tensor): The scores, shape (num_bboxes, K).\n score_thr (float): The score filter threshold.\n topk (int): The number of topk candidates.\n results (dict or list or Tensor, Optional): The results to\n which the filtering rule is to be applied. The shape\n of each item is (num_bboxes, N).\n\n Returns:\n tuple: Filtered results\n\n - scores (Tensor): The scores after being filtered, \\\n shape (num_bboxes_filtered, ).\n - labels (Tensor): The class labels, shape \\\n (num_bboxes_filtered, ).\n - anchor_idxs (Tensor): The anchor indexes, shape \\\n (num_bboxes_filtered, ).\n - filtered_results (dict or list or Tensor, Optional): \\\n The filtered results. The shape of each item is \\\n (num_bboxes_filtered, N).\n \"\"\"\n valid_mask = scores > score_thr\n scores = scores[valid_mask]\n valid_idxs = torch.nonzero(valid_mask)\n\n num_topk = min(topk, valid_idxs.size(0))\n # torch.sort is actually faster than .topk (at least on GPUs)\n scores, idxs = scores.sort(descending=True)\n scores = scores[:num_topk]\n topk_idxs = valid_idxs[idxs[:num_topk]]\n keep_idxs, labels = topk_idxs.unbind(dim=1)\n\n filtered_results = None\n if results is not None:\n if isinstance(results, dict):\n filtered_results = {k: v[keep_idxs] for k, v in results.items()}\n elif isinstance(results, list):\n filtered_results = [result[keep_idxs] for result in results]\n elif isinstance(results, torch.Tensor):\n filtered_results = results[keep_idxs]\n else:\n raise NotImplementedError(f'Only supports dict or list or Tensor, '\n f'but get {type(results)}.')\n return scores, labels, keep_idxs, filtered_results" }, { "identifier": "select_single_mlvl", "path": "mmdet/models/utils/misc.py", "snippet": "def select_single_mlvl(mlvl_tensors, batch_id, detach=True):\n \"\"\"Extract a multi-scale single image tensor from a multi-scale batch\n tensor based on batch index.\n\n Note: The default value of detach is True, because the proposal gradient\n needs to be detached during the training of the two-stage model. E.g\n Cascade Mask R-CNN.\n\n Args:\n mlvl_tensors (list[Tensor]): Batch tensor for all scale levels,\n each is a 4D-tensor.\n batch_id (int): Batch index.\n detach (bool): Whether detach gradient. Default True.\n\n Returns:\n list[Tensor]: Multi-scale single image tensor.\n \"\"\"\n assert isinstance(mlvl_tensors, (list, tuple))\n num_levels = len(mlvl_tensors)\n\n if detach:\n mlvl_tensor_list = [\n mlvl_tensors[i][batch_id].detach() for i in range(num_levels)\n ]\n else:\n mlvl_tensor_list = [\n mlvl_tensors[i][batch_id] for i in range(num_levels)\n ]\n return mlvl_tensor_list" }, { "identifier": "unpack_gt_instances", "path": "mmdet/models/utils/misc.py", "snippet": "def unpack_gt_instances(batch_data_samples: SampleList) -> tuple:\n \"\"\"Unpack ``gt_instances``, ``gt_instances_ignore`` and ``img_metas`` based\n on ``batch_data_samples``\n\n Args:\n batch_data_samples (List[:obj:`DetDataSample`]): The Data\n Samples. It usually includes information such as\n `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n Returns:\n tuple:\n\n - batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n - batch_gt_instances_ignore (list[:obj:`InstanceData`]):\n Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n - batch_img_metas (list[dict]): Meta information of each image,\n e.g., image size, scaling factor, etc.\n \"\"\"\n batch_gt_instances = []\n batch_gt_instances_ignore = []\n batch_img_metas = []\n for data_sample in batch_data_samples:\n batch_img_metas.append(data_sample.metainfo)\n batch_gt_instances.append(data_sample.gt_instances)\n if 'ignored_instances' in data_sample:\n batch_gt_instances_ignore.append(data_sample.ignored_instances)\n else:\n batch_gt_instances_ignore.append(None)\n\n return batch_gt_instances, batch_gt_instances_ignore, batch_img_metas" }, { "identifier": "BertEncoderLayer", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "class BertEncoderLayer(BertPreTrainedModel):\n \"\"\"A modified version of the `BertLayer` class from the\n `transformers.models.bert.modeling_bert` module.\n\n Args:\n config (:class:`~transformers.BertConfig`):\n The configuration object that\n contains various parameters for the model.\n clamp_min_for_underflow (bool, optional):\n Whether to clamp the minimum value of the hidden states\n to prevent underflow. Defaults to `False`.\n clamp_max_for_overflow (bool, optional):\n Whether to clamp the maximum value of the hidden states\n to prevent overflow. Defaults to `False`.\n \"\"\"\n\n def __init__(self,\n config: BertConfig,\n clamp_min_for_underflow: bool = False,\n clamp_max_for_overflow: bool = False):\n super().__init__(config)\n self.config = config\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n\n self.attention = BertAttention(config, clamp_min_for_underflow,\n clamp_max_for_overflow)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(\n self, inputs: Dict[str, Dict[str, torch.Tensor]]\n ) -> Dict[str, Dict[str, torch.Tensor]]:\n \"\"\"Applies the BertEncoderLayer to the input features.\"\"\"\n language_dict_features = inputs['lang']\n hidden_states = language_dict_features['hidden']\n attention_mask = language_dict_features['masks']\n\n device = hidden_states.device\n input_shape = hidden_states.size()[:-1]\n extended_attention_mask = self.get_extended_attention_mask(\n attention_mask, input_shape, device)\n\n self_attention_outputs = self.attention(\n hidden_states,\n extended_attention_mask,\n None,\n output_attentions=False,\n past_key_value=None)\n attention_output = self_attention_outputs[0]\n outputs = self_attention_outputs[1:]\n layer_output = apply_chunking_to_forward(self.feed_forward_chunk,\n self.chunk_size_feed_forward,\n self.seq_len_dim,\n attention_output)\n outputs = (layer_output, ) + outputs\n hidden_states = outputs[0]\n\n language_dict_features['hidden'] = hidden_states\n\n features_dict = {\n 'visual': inputs['visual'],\n 'lang': language_dict_features\n }\n\n return features_dict\n\n def feed_forward_chunk(self, attention_output: Tensor) -> Tensor:\n \"\"\"Applies the intermediate and output layers of the BertEncoderLayer\n to a chunk of the input sequence.\"\"\"\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output" }, { "identifier": "VLFuse", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "class VLFuse(nn.Module):\n \"\"\"Early Fusion Module.\n\n Args:\n v_dim (int): Dimension of visual features.\n l_dim (int): Dimension of language features.\n embed_dim (int): The embedding dimension for the attention operation.\n num_heads (int): Number of attention heads.\n dropout (float): Dropout probability.\n drop_path (float): Drop path probability.\n use_checkpoint (bool): Whether to use PyTorch's checkpoint function.\n \"\"\"\n\n def __init__(self,\n v_dim: int = 256,\n l_dim: int = 768,\n embed_dim: int = 2048,\n num_heads: int = 8,\n dropout: float = 0.1,\n drop_path: float = 0.0,\n use_checkpoint: bool = False):\n super().__init__()\n self.use_checkpoint = use_checkpoint\n self.b_attn = BiAttentionBlock(\n v_dim=v_dim,\n l_dim=l_dim,\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=dropout,\n drop_path=drop_path,\n init_values=1.0 / 6.0)\n\n def forward(self, x: dict) -> dict:\n \"\"\"Forward pass of the VLFuse module.\"\"\"\n visual_features = x['visual']\n language_dict_features = x['lang']\n\n if self.use_checkpoint:\n # vf is mean visual_features\n # checkpoint does not allow complex data structures as input,\n # such as list, so we must split them.\n vf0, vf1, vf2, vf3, vf4, language_features = checkpoint.checkpoint(\n self.b_attn, *visual_features,\n language_dict_features['hidden'],\n language_dict_features['masks'])\n else:\n vf0, vf1, vf2, vf3, vf4, language_features = self.b_attn(\n *visual_features, language_dict_features['hidden'],\n language_dict_features['masks'])\n\n language_dict_features['hidden'] = language_features\n fused_language_dict_features = language_dict_features\n\n features_dict = {\n 'visual': [vf0, vf1, vf2, vf3, vf4],\n 'lang': fused_language_dict_features\n }\n\n return features_dict" }, { "identifier": "permute_and_flatten", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "def permute_and_flatten(layer: Tensor, N: int, A: int, C: int, H: int,\n W: int) -> Tensor:\n \"\"\"Permute and then flatten a tensor,\n\n from size (N, A, C, H, W) to (N, H * W * A, C).\n\n Args:\n layer (Tensor): Tensor of shape (N, C, H, W).\n N (int): Batch size.\n A (int): Number of attention heads.\n C (int): Number of channels.\n H (int): Height of feature map.\n W (int): Width of feature map.\n\n Returns:\n Tensor: A Tensor of shape (N, H * W * A, C).\n \"\"\"\n layer = layer.view(N, A, C, H, W)\n layer = layer.permute(0, 3, 4, 1, 2)\n layer = layer.reshape(N, -1, C)\n return layer" }, { "identifier": "MAX_CLAMP_VALUE", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "MAX_CLAMP_VALUE = 50000" }, { "identifier": "ATSSHead", "path": "mmdet/models/dense_heads/atss_head.py", "snippet": "class ATSSHead(AnchorHead):\n \"\"\"Detection Head of `ATSS <https://arxiv.org/abs/1912.02424>`_.\n\n ATSS head structure is similar with FCOS, however ATSS use anchor boxes\n and assign label by Adaptive Training Sample Selection instead max-iou.\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n pred_kernel_size (int): Kernel size of ``nn.Conv2d``\n stacked_convs (int): Number of stacking convs of the head.\n conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n convolution layer. Defaults to None.\n norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization\n layer. Defaults to ``dict(type='GN', num_groups=32,\n requires_grad=True)``.\n reg_decoded_bbox (bool): If true, the regression loss would be\n applied directly on decoded bounding boxes, converting both\n the predicted boxes and regression targets to absolute\n coordinates format. Defaults to False. It should be `True` when\n using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n loss_centerness (:obj:`ConfigDict` or dict): Config of centerness loss.\n Defaults to ``dict(type='CrossEntropyLoss', use_sigmoid=True,\n loss_weight=1.0)``.\n init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n list[:obj:`ConfigDict`]): Initialization config dict.\n \"\"\"\n\n def __init__(self,\n num_classes: int,\n in_channels: int,\n pred_kernel_size: int = 3,\n stacked_convs: int = 4,\n conv_cfg: OptConfigType = None,\n norm_cfg: ConfigType = dict(\n type='GN', num_groups=32, requires_grad=True),\n reg_decoded_bbox: bool = True,\n loss_centerness: ConfigType = dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n loss_weight=1.0),\n init_cfg: MultiConfig = dict(\n type='Normal',\n layer='Conv2d',\n std=0.01,\n override=dict(\n type='Normal',\n name='atss_cls',\n std=0.01,\n bias_prob=0.01)),\n **kwargs) -> None:\n self.pred_kernel_size = pred_kernel_size\n self.stacked_convs = stacked_convs\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n super().__init__(\n num_classes=num_classes,\n in_channels=in_channels,\n reg_decoded_bbox=reg_decoded_bbox,\n init_cfg=init_cfg,\n **kwargs)\n\n self.sampling = False\n self.loss_centerness = MODELS.build(loss_centerness)\n\n def _init_layers(self) -> None:\n \"\"\"Initialize layers of the head.\"\"\"\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pred_pad_size = self.pred_kernel_size // 2\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_reg = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 4,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_centerness = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 1,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:\n \"\"\"Forward features from the upstream network.\n\n Args:\n x (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: Usually a tuple of classification scores and bbox prediction\n cls_scores (list[Tensor]): Classification scores for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * 4.\n \"\"\"\n return multi_apply(self.forward_single, x, self.scales)\n\n def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]:\n \"\"\"Forward feature of a single scale level.\n\n Args:\n x (Tensor): Features of a single scale level.\n scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n the bbox prediction.\n\n Returns:\n tuple:\n cls_score (Tensor): Cls scores for a single scale level\n the channels number is num_anchors * num_classes.\n bbox_pred (Tensor): Box energies / deltas for a single scale\n level, the channels number is num_anchors * 4.\n centerness (Tensor): Centerness for a single scale level, the\n channel number is (N, num_anchors * 1, H, W).\n \"\"\"\n cls_feat = x\n reg_feat = x\n for cls_conv in self.cls_convs:\n cls_feat = cls_conv(cls_feat)\n for reg_conv in self.reg_convs:\n reg_feat = reg_conv(reg_feat)\n cls_score = self.atss_cls(cls_feat)\n # we just follow atss, not apply exp in bbox_pred\n bbox_pred = scale(self.atss_reg(reg_feat)).float()\n centerness = self.atss_centerness(reg_feat)\n return cls_score, bbox_pred, centerness\n\n def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor,\n bbox_pred: Tensor, centerness: Tensor,\n labels: Tensor, label_weights: Tensor,\n bbox_targets: Tensor, avg_factor: float) -> dict:\n \"\"\"Calculate the loss of a single scale level based on the features\n extracted by the detection head.\n\n Args:\n cls_score (Tensor): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W).\n bbox_pred (Tensor): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W).\n anchors (Tensor): Box reference for each scale level with shape\n (N, num_total_anchors, 4).\n labels (Tensor): Labels of each anchors with shape\n (N, num_total_anchors).\n label_weights (Tensor): Label weights of each anchor with shape\n (N, num_total_anchors)\n bbox_targets (Tensor): BBox regression targets of each anchor with\n shape (N, num_total_anchors, 4).\n avg_factor (float): Average factor that is used to average\n the loss. When using sampling method, avg_factor is usually\n the sum of positive and negative priors. When using\n `PseudoSampler`, `avg_factor` is usually equal to the number\n of positive priors.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n\n anchors = anchors.reshape(-1, 4)\n cls_score = cls_score.permute(0, 2, 3, 1).reshape(\n -1, self.cls_out_channels).contiguous()\n bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n centerness = centerness.permute(0, 2, 3, 1).reshape(-1)\n bbox_targets = bbox_targets.reshape(-1, 4)\n labels = labels.reshape(-1)\n label_weights = label_weights.reshape(-1)\n\n # classification loss\n loss_cls = self.loss_cls(\n cls_score, labels, label_weights, avg_factor=avg_factor)\n\n # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n bg_class_ind = self.num_classes\n pos_inds = ((labels >= 0)\n & (labels < bg_class_ind)).nonzero().squeeze(1)\n\n if len(pos_inds) > 0:\n pos_bbox_targets = bbox_targets[pos_inds]\n pos_bbox_pred = bbox_pred[pos_inds]\n pos_anchors = anchors[pos_inds]\n pos_centerness = centerness[pos_inds]\n\n centerness_targets = self.centerness_target(\n pos_anchors, pos_bbox_targets)\n pos_decode_bbox_pred = self.bbox_coder.decode(\n pos_anchors, pos_bbox_pred)\n\n # regression loss\n loss_bbox = self.loss_bbox(\n pos_decode_bbox_pred,\n pos_bbox_targets,\n weight=centerness_targets,\n avg_factor=1.0)\n\n # centerness loss\n loss_centerness = self.loss_centerness(\n pos_centerness, centerness_targets, avg_factor=avg_factor)\n\n else:\n loss_bbox = bbox_pred.sum() * 0\n loss_centerness = centerness.sum() * 0\n centerness_targets = bbox_targets.new_tensor(0.)\n\n return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum()\n\n def loss_by_feat(\n self,\n cls_scores: List[Tensor],\n bbox_preds: List[Tensor],\n centernesses: List[Tensor],\n batch_gt_instances: InstanceList,\n batch_img_metas: List[dict],\n batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n \"\"\"Calculate the loss based on the features extracted by the detection\n head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n centernesses (list[Tensor]): Centerness for each scale\n level with shape (N, num_anchors * 1, H, W)\n batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n batch_img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds]\n assert len(featmap_sizes) == self.prior_generator.num_levels\n\n device = cls_scores[0].device\n anchor_list, valid_flag_list = self.get_anchors(\n featmap_sizes, batch_img_metas, device=device)\n\n cls_reg_targets = self.get_targets(\n anchor_list,\n valid_flag_list,\n batch_gt_instances,\n batch_img_metas,\n batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n bbox_weights_list, avg_factor) = cls_reg_targets\n avg_factor = reduce_mean(\n torch.tensor(avg_factor, dtype=torch.float, device=device)).item()\n\n losses_cls, losses_bbox, loss_centerness, \\\n bbox_avg_factor = multi_apply(\n self.loss_by_feat_single,\n anchor_list,\n cls_scores,\n bbox_preds,\n centernesses,\n labels_list,\n label_weights_list,\n bbox_targets_list,\n avg_factor=avg_factor)\n\n bbox_avg_factor = sum(bbox_avg_factor)\n bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item()\n losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))\n return dict(\n loss_cls=losses_cls,\n loss_bbox=losses_bbox,\n loss_centerness=loss_centerness)\n\n def centerness_target(self, anchors: Tensor, gts: Tensor) -> Tensor:\n \"\"\"Calculate the centerness between anchors and gts.\n\n Only calculate pos centerness targets, otherwise there may be nan.\n\n Args:\n anchors (Tensor): Anchors with shape (N, 4), \"xyxy\" format.\n gts (Tensor): Ground truth bboxes with shape (N, 4), \"xyxy\" format.\n\n Returns:\n Tensor: Centerness between anchors and gts.\n \"\"\"\n anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2\n anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2\n l_ = anchors_cx - gts[:, 0]\n t_ = anchors_cy - gts[:, 1]\n r_ = gts[:, 2] - anchors_cx\n b_ = gts[:, 3] - anchors_cy\n\n left_right = torch.stack([l_, r_], dim=1)\n top_bottom = torch.stack([t_, b_], dim=1)\n centerness = torch.sqrt(\n (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *\n (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))\n assert not torch.isnan(centerness).any()\n return centerness\n\n def get_targets(self,\n anchor_list: List[List[Tensor]],\n valid_flag_list: List[List[Tensor]],\n batch_gt_instances: InstanceList,\n batch_img_metas: List[dict],\n batch_gt_instances_ignore: OptInstanceList = None,\n unmap_outputs: bool = True) -> tuple:\n \"\"\"Get targets for ATSS head.\n\n This method is almost the same as `AnchorHead.get_targets()`. Besides\n returning the targets as the parent method does, it also returns the\n anchors as the first element of the returned tuple.\n \"\"\"\n num_imgs = len(batch_img_metas)\n assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n # anchor number of multi levels\n num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n num_level_anchors_list = [num_level_anchors] * num_imgs\n\n # concat all level anchors and flags to a single tensor\n for i in range(num_imgs):\n assert len(anchor_list[i]) == len(valid_flag_list[i])\n anchor_list[i] = torch.cat(anchor_list[i])\n valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n # compute targets for each image\n if batch_gt_instances_ignore is None:\n batch_gt_instances_ignore = [None] * num_imgs\n (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n all_bbox_weights, pos_inds_list, neg_inds_list,\n sampling_results_list) = multi_apply(\n self._get_targets_single,\n anchor_list,\n valid_flag_list,\n num_level_anchors_list,\n batch_gt_instances,\n batch_img_metas,\n batch_gt_instances_ignore,\n unmap_outputs=unmap_outputs)\n # Get `avg_factor` of all images, which calculate in `SamplingResult`.\n # When using sampling method, avg_factor is usually the sum of\n # positive and negative priors. When using `PseudoSampler`,\n # `avg_factor` is usually equal to the number of positive priors.\n avg_factor = sum(\n [results.avg_factor for results in sampling_results_list])\n # split targets to a list w.r.t. multiple levels\n anchors_list = images_to_levels(all_anchors, num_level_anchors)\n labels_list = images_to_levels(all_labels, num_level_anchors)\n label_weights_list = images_to_levels(all_label_weights,\n num_level_anchors)\n bbox_targets_list = images_to_levels(all_bbox_targets,\n num_level_anchors)\n bbox_weights_list = images_to_levels(all_bbox_weights,\n num_level_anchors)\n return (anchors_list, labels_list, label_weights_list,\n bbox_targets_list, bbox_weights_list, avg_factor)\n\n def _get_targets_single(self,\n flat_anchors: Tensor,\n valid_flags: Tensor,\n num_level_anchors: List[int],\n gt_instances: InstanceData,\n img_meta: dict,\n gt_instances_ignore: Optional[InstanceData] = None,\n unmap_outputs: bool = True) -> tuple:\n \"\"\"Compute regression, classification targets for anchors in a single\n image.\n\n Args:\n flat_anchors (Tensor): Multi-level anchors of the image, which are\n concatenated into a single tensor of shape (num_anchors ,4)\n valid_flags (Tensor): Multi level valid flags of the image,\n which are concatenated into a single tensor of\n shape (num_anchors,).\n num_level_anchors (List[int]): Number of anchors of each scale\n level.\n gt_instances (:obj:`InstanceData`): Ground truth of instance\n annotations. It usually includes ``bboxes`` and ``labels``\n attributes.\n img_meta (dict): Meta information for current image.\n gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n to be ignored during training. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n unmap_outputs (bool): Whether to map outputs back to the original\n set of anchors.\n\n Returns:\n tuple: N is the number of total anchors in the image.\n labels (Tensor): Labels of all anchors in the image with shape\n (N,).\n label_weights (Tensor): Label weights of all anchor in the\n image with shape (N,).\n bbox_targets (Tensor): BBox targets of all anchors in the\n image with shape (N, 4).\n bbox_weights (Tensor): BBox weights of all anchors in the\n image with shape (N, 4)\n pos_inds (Tensor): Indices of positive anchor with shape\n (num_pos,).\n neg_inds (Tensor): Indices of negative anchor with shape\n (num_neg,).\n sampling_result (:obj:`SamplingResult`): Sampling results.\n \"\"\"\n inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n img_meta['img_shape'][:2],\n self.train_cfg['allowed_border'])\n if not inside_flags.any():\n raise ValueError(\n 'There is no valid anchor inside the image boundary. Please '\n 'check the image size and anchor sizes, or set '\n '``allowed_border`` to -1 to skip the condition.')\n # assign gt and sample anchors\n anchors = flat_anchors[inside_flags, :]\n\n num_level_anchors_inside = self.get_num_level_anchors_inside(\n num_level_anchors, inside_flags)\n pred_instances = InstanceData(priors=anchors)\n assign_result = self.assigner.assign(pred_instances,\n num_level_anchors_inside,\n gt_instances, gt_instances_ignore)\n\n sampling_result = self.sampler.sample(assign_result, pred_instances,\n gt_instances)\n\n num_valid_anchors = anchors.shape[0]\n bbox_targets = torch.zeros_like(anchors)\n bbox_weights = torch.zeros_like(anchors)\n labels = anchors.new_full((num_valid_anchors, ),\n self.num_classes,\n dtype=torch.long)\n label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n pos_inds = sampling_result.pos_inds\n neg_inds = sampling_result.neg_inds\n if len(pos_inds) > 0:\n if self.reg_decoded_bbox:\n pos_bbox_targets = sampling_result.pos_gt_bboxes\n else:\n pos_bbox_targets = self.bbox_coder.encode(\n sampling_result.pos_priors, sampling_result.pos_gt_bboxes)\n\n bbox_targets[pos_inds, :] = pos_bbox_targets\n bbox_weights[pos_inds, :] = 1.0\n\n labels[pos_inds] = sampling_result.pos_gt_labels\n if self.train_cfg['pos_weight'] <= 0:\n label_weights[pos_inds] = 1.0\n else:\n label_weights[pos_inds] = self.train_cfg['pos_weight']\n if len(neg_inds) > 0:\n label_weights[neg_inds] = 1.0\n\n # map up to original set of anchors\n if unmap_outputs:\n num_total_anchors = flat_anchors.size(0)\n anchors = unmap(anchors, num_total_anchors, inside_flags)\n labels = unmap(\n labels, num_total_anchors, inside_flags, fill=self.num_classes)\n label_weights = unmap(label_weights, num_total_anchors,\n inside_flags)\n bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n pos_inds, neg_inds, sampling_result)\n\n def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):\n \"\"\"Get the number of valid anchors in every level.\"\"\"\n\n split_inside_flags = torch.split(inside_flags, num_level_anchors)\n num_level_anchors_inside = [\n int(flags.sum()) for flags in split_inside_flags\n ]\n return num_level_anchors_inside" } ]
import copy import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Callable, List, Optional, Sequence, Tuple, Union from mmcv.cnn import Scale from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d from mmengine.config import ConfigDict from mmengine.model import BaseModel from mmengine.structures import InstanceData from torch import Tensor from transformers import BertConfig from mmdet.registry import MODELS from mmdet.structures.bbox import cat_boxes from mmdet.utils import InstanceList, OptInstanceList, reduce_mean from ..utils import (BertEncoderLayer, VLFuse, filter_scores_and_topk, permute_and_flatten, select_single_mlvl, unpack_gt_instances) from ..utils.vlfuse_helper import MAX_CLAMP_VALUE from .atss_head import ATSSHead
10,876
out_vis_feats = [self.relu(item) for item in out_vis_feats] features_dict = {'visual': out_vis_feats, 'lang': inputs['lang']} return features_dict class VLFusionModule(BaseModel): """Visual-lang Fusion Module.""" def __init__(self, in_channels: int, feat_channels: int, num_base_priors: int, early_fuse: bool = False, num_dyhead_blocks: int = 6, lang_model_name: str = 'bert-base-uncased', use_dyrelu: bool = True, use_dyfuse: bool = True, use_dcn: bool = True, use_checkpoint: bool = False, **kwargs) -> None: super().__init__(**kwargs) if BertConfig is None: raise RuntimeError( 'transformers is not installed, please install it by: ' 'pip install transformers.') self.in_channels = in_channels self.feat_channels = feat_channels self.num_base_priors = num_base_priors self.early_fuse = early_fuse self.num_dyhead_blocks = num_dyhead_blocks self.use_dyrelu = use_dyrelu self.use_dyfuse = use_dyfuse self.use_dcn = use_dcn self.use_checkpoint = use_checkpoint self.lang_cfg = BertConfig.from_pretrained(lang_model_name) self.lang_dim = self.lang_cfg.hidden_size self._init_layers() def _init_layers(self) -> None: """Initialize layers of the model.""" bias_value = -math.log((1 - 0.01) / 0.01) dyhead_tower = [] for i in range(self.num_dyhead_blocks): if self.early_fuse: # cross-modality fusion dyhead_tower.append(VLFuse(use_checkpoint=self.use_checkpoint)) # lang branch dyhead_tower.append( BertEncoderLayer( self.lang_cfg, clamp_min_for_underflow=True, clamp_max_for_overflow=True)) # vision branch dyhead_tower.append( DyConv( lambda i, o, s: Conv3x3Norm( i, o, s, use_dcn=self.use_dcn, norm_type=['gn', 16]), self.in_channels if i == 0 else self.feat_channels, self.feat_channels, use_dyrelu=(self.use_dyrelu and self.in_channels == self.feat_channels) if i == 0 else self.use_dyrelu, use_dyfuse=(self.use_dyfuse and self.in_channels == self.feat_channels) if i == 0 else self.use_dyfuse, use_dcn=(self.use_dcn and self.in_channels == self.feat_channels) if i == 0 else self.use_dcn, )) self.add_module('dyhead_tower', nn.Sequential(*dyhead_tower)) self.bbox_pred = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, kernel_size=1) self.centerness = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, kernel_size=1) self.dot_product_projection_text = nn.Linear( self.lang_dim, self.num_base_priors * self.feat_channels, bias=True) self.log_scale = nn.Parameter(torch.Tensor([0.0]), requires_grad=True) self.bias_lang = nn.Parameter( torch.zeros(self.lang_dim), requires_grad=True) self.bias0 = nn.Parameter( torch.Tensor([bias_value]), requires_grad=True) self.scales = nn.ModuleList([Scale(1.0) for _ in range(5)]) def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple: feat_inputs = {'visual': visual_feats, 'lang': language_feats} dyhead_tower = self.dyhead_tower(feat_inputs) if self.early_fuse: embedding = dyhead_tower['lang']['hidden'] else: embedding = language_feats['embedded'] embedding = F.normalize(embedding, p=2, dim=-1) dot_product_proj_tokens = self.dot_product_projection_text(embedding / 2.0) dot_product_proj_tokens_bias = torch.matmul( embedding, self.bias_lang) + self.bias0 bbox_preds = [] centerness = [] cls_logits = [] for i, feature in enumerate(visual_feats): visual = dyhead_tower['visual'][i] B, C, H, W = visual.shape bbox_pred = self.scales[i](self.bbox_pred(visual)) bbox_preds.append(bbox_pred) centerness.append(self.centerness(visual))
# Copyright (c) OpenMMLab. All rights reserved. try: except ImportError: BertConfig = None def convert_grounding_to_cls_scores(logits: Tensor, positive_maps: List[dict]) -> Tensor: """Convert logits to class scores.""" assert len(positive_maps) == logits.shape[0] # batch size scores = torch.zeros(logits.shape[0], logits.shape[1], len(positive_maps[0])).to(logits.device) if positive_maps is not None: if all(x == positive_maps[0] for x in positive_maps): # only need to compute once positive_map = positive_maps[0] for label_j in positive_map: scores[:, :, label_j - 1] = logits[:, :, torch.LongTensor(positive_map[label_j] )].mean(-1) else: for i, positive_map in enumerate(positive_maps): for label_j in positive_map: scores[i, :, label_j - 1] = logits[ i, :, torch.LongTensor(positive_map[label_j])].mean(-1) return scores class Conv3x3Norm(nn.Module): """Conv3x3 and norm.""" def __init__(self, in_channels: int, out_channels: int, stride: int, groups: int = 1, use_dcn: bool = False, norm_type: Optional[Union[Sequence, str]] = None): super().__init__() if use_dcn: self.conv = ModulatedDeformConv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups) else: self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups) if isinstance(norm_type, Sequence): assert len(norm_type) == 2 assert norm_type[0] == 'gn' gn_group = norm_type[1] norm_type = norm_type[0] if norm_type == 'bn': bn_op = nn.BatchNorm2d(out_channels) elif norm_type == 'gn': bn_op = nn.GroupNorm( num_groups=gn_group, num_channels=out_channels) if norm_type is not None: self.bn = bn_op else: self.bn = None def forward(self, x, **kwargs): x = self.conv(x, **kwargs) if self.bn: x = self.bn(x) return x class DyReLU(nn.Module): """Dynamic ReLU.""" def __init__(self, in_channels: int, out_channels: int, expand_ratio: int = 4): super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.expand_ratio = expand_ratio self.out_channels = out_channels self.fc = nn.Sequential( nn.Linear(in_channels, in_channels // expand_ratio), nn.ReLU(inplace=True), nn.Linear(in_channels // expand_ratio, out_channels * self.expand_ratio), nn.Hardsigmoid(inplace=True)) def forward(self, x) -> Tensor: x_out = x b, c, h, w = x.size() x = self.avg_pool(x).view(b, c) x = self.fc(x).view(b, -1, 1, 1) a1, b1, a2, b2 = torch.split(x, self.out_channels, dim=1) a1 = (a1 - 0.5) * 2 + 1.0 a2 = (a2 - 0.5) * 2 b1 = b1 - 0.5 b2 = b2 - 0.5 out = torch.max(x_out * a1 + b1, x_out * a2 + b2) return out class DyConv(nn.Module): """Dynamic Convolution.""" def __init__(self, conv_func: Callable, in_channels: int, out_channels: int, use_dyfuse: bool = True, use_dyrelu: bool = False, use_dcn: bool = False): super().__init__() self.dyconvs = nn.ModuleList() self.dyconvs.append(conv_func(in_channels, out_channels, 1)) self.dyconvs.append(conv_func(in_channels, out_channels, 1)) self.dyconvs.append(conv_func(in_channels, out_channels, 2)) if use_dyfuse: self.attnconv = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, 1, kernel_size=1), nn.ReLU(inplace=True)) self.h_sigmoid = nn.Hardsigmoid(inplace=True) else: self.attnconv = None if use_dyrelu: self.relu = DyReLU(in_channels, out_channels) else: self.relu = nn.ReLU() if use_dcn: self.offset = nn.Conv2d( in_channels, 27, kernel_size=3, stride=1, padding=1) else: self.offset = None self.init_weights() def init_weights(self): for m in self.dyconvs.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight.data, 0, 0.01) if m.bias is not None: m.bias.data.zero_() if self.attnconv is not None: for m in self.attnconv.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight.data, 0, 0.01) if m.bias is not None: m.bias.data.zero_() def forward(self, inputs: dict) -> dict: visual_feats = inputs['visual'] out_vis_feats = [] for level, feature in enumerate(visual_feats): offset_conv_args = {} if self.offset is not None: offset_mask = self.offset(feature) offset = offset_mask[:, :18, :, :] mask = offset_mask[:, 18:, :, :].sigmoid() offset_conv_args = dict(offset=offset, mask=mask) temp_feats = [self.dyconvs[1](feature, **offset_conv_args)] if level > 0: temp_feats.append(self.dyconvs[2](visual_feats[level - 1], **offset_conv_args)) if level < len(visual_feats) - 1: temp_feats.append( F.upsample_bilinear( self.dyconvs[0](visual_feats[level + 1], **offset_conv_args), size=[feature.size(2), feature.size(3)])) mean_feats = torch.mean( torch.stack(temp_feats), dim=0, keepdim=False) if self.attnconv is not None: attn_feat = [] res_feat = [] for feat in temp_feats: res_feat.append(feat) attn_feat.append(self.attnconv(feat)) res_feat = torch.stack(res_feat) spa_pyr_attn = self.h_sigmoid(torch.stack(attn_feat)) mean_feats = torch.mean( res_feat * spa_pyr_attn, dim=0, keepdim=False) out_vis_feats.append(mean_feats) out_vis_feats = [self.relu(item) for item in out_vis_feats] features_dict = {'visual': out_vis_feats, 'lang': inputs['lang']} return features_dict class VLFusionModule(BaseModel): """Visual-lang Fusion Module.""" def __init__(self, in_channels: int, feat_channels: int, num_base_priors: int, early_fuse: bool = False, num_dyhead_blocks: int = 6, lang_model_name: str = 'bert-base-uncased', use_dyrelu: bool = True, use_dyfuse: bool = True, use_dcn: bool = True, use_checkpoint: bool = False, **kwargs) -> None: super().__init__(**kwargs) if BertConfig is None: raise RuntimeError( 'transformers is not installed, please install it by: ' 'pip install transformers.') self.in_channels = in_channels self.feat_channels = feat_channels self.num_base_priors = num_base_priors self.early_fuse = early_fuse self.num_dyhead_blocks = num_dyhead_blocks self.use_dyrelu = use_dyrelu self.use_dyfuse = use_dyfuse self.use_dcn = use_dcn self.use_checkpoint = use_checkpoint self.lang_cfg = BertConfig.from_pretrained(lang_model_name) self.lang_dim = self.lang_cfg.hidden_size self._init_layers() def _init_layers(self) -> None: """Initialize layers of the model.""" bias_value = -math.log((1 - 0.01) / 0.01) dyhead_tower = [] for i in range(self.num_dyhead_blocks): if self.early_fuse: # cross-modality fusion dyhead_tower.append(VLFuse(use_checkpoint=self.use_checkpoint)) # lang branch dyhead_tower.append( BertEncoderLayer( self.lang_cfg, clamp_min_for_underflow=True, clamp_max_for_overflow=True)) # vision branch dyhead_tower.append( DyConv( lambda i, o, s: Conv3x3Norm( i, o, s, use_dcn=self.use_dcn, norm_type=['gn', 16]), self.in_channels if i == 0 else self.feat_channels, self.feat_channels, use_dyrelu=(self.use_dyrelu and self.in_channels == self.feat_channels) if i == 0 else self.use_dyrelu, use_dyfuse=(self.use_dyfuse and self.in_channels == self.feat_channels) if i == 0 else self.use_dyfuse, use_dcn=(self.use_dcn and self.in_channels == self.feat_channels) if i == 0 else self.use_dcn, )) self.add_module('dyhead_tower', nn.Sequential(*dyhead_tower)) self.bbox_pred = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, kernel_size=1) self.centerness = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, kernel_size=1) self.dot_product_projection_text = nn.Linear( self.lang_dim, self.num_base_priors * self.feat_channels, bias=True) self.log_scale = nn.Parameter(torch.Tensor([0.0]), requires_grad=True) self.bias_lang = nn.Parameter( torch.zeros(self.lang_dim), requires_grad=True) self.bias0 = nn.Parameter( torch.Tensor([bias_value]), requires_grad=True) self.scales = nn.ModuleList([Scale(1.0) for _ in range(5)]) def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple: feat_inputs = {'visual': visual_feats, 'lang': language_feats} dyhead_tower = self.dyhead_tower(feat_inputs) if self.early_fuse: embedding = dyhead_tower['lang']['hidden'] else: embedding = language_feats['embedded'] embedding = F.normalize(embedding, p=2, dim=-1) dot_product_proj_tokens = self.dot_product_projection_text(embedding / 2.0) dot_product_proj_tokens_bias = torch.matmul( embedding, self.bias_lang) + self.bias0 bbox_preds = [] centerness = [] cls_logits = [] for i, feature in enumerate(visual_feats): visual = dyhead_tower['visual'][i] B, C, H, W = visual.shape bbox_pred = self.scales[i](self.bbox_pred(visual)) bbox_preds.append(bbox_pred) centerness.append(self.centerness(visual))
dot_product_proj_queries = permute_and_flatten(
9
2023-12-11 15:23:03+00:00
16k
merlresearch/PixPNet
pixpnet/protonets/prp/prp.py
[ { "identifier": "AdaptiveAvgPool2DWrapperFct", "path": "pixpnet/protonets/prp/lrp_general6.py", "snippet": "class AdaptiveAvgPool2DWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module, eps):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. ctx is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward method.\n \"\"\"\n\n def configvalues_totensorlist(module, device):\n\n propertynames = [\"output_size\"]\n values = []\n for attr in propertynames:\n v = getattr(module, attr)\n # convert it into tensor\n # has no treatment for booleans yet\n if isinstance(v, int):\n v = torch.tensor([v], dtype=torch.int32, device=device)\n elif isinstance(v, tuple):\n v = torch.tensor(v, dtype=torch.int32, device=device)\n else:\n print(\"v is neither int nor tuple. unexpected\")\n exit()\n values.append(v)\n return propertynames, values\n\n # stash module config params and trainable params\n propertynames, values = configvalues_totensorlist(module, x.device)\n epstensor = torch.tensor([eps], dtype=torch.float32, device=x.device)\n ctx.save_for_backward(x, epstensor, *values) # *values unpacks the list\n\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \"\"\"\n\n input_, epstensor, *values = ctx.saved_tensors\n\n #######################################################################\n # reconstruct dictionary of config parameters\n def tensorlist_todict(values):\n propertynames = [\"output_size\"]\n # idea: paramsdict={ n: values[i]\n # for i,n in enumerate(propertynames) }\n # but needs to turn tensors to ints or tuples!\n paramsdict = {}\n for i, n in enumerate(propertynames):\n v = values[i]\n if v.numel == 1:\n paramsdict[n] = v.item() # to cpu?\n else:\n alist = v.tolist()\n if len(alist) == 1:\n paramsdict[n] = alist[0]\n else:\n paramsdict[n] = tuple(alist)\n return paramsdict\n\n #######################################################################\n paramsdict = tensorlist_todict(values)\n eps = epstensor.item()\n\n # class instantiation\n layerclass = torch.nn.AdaptiveAvgPool2d(**paramsdict)\n\n X = input_.clone().detach().requires_grad_(True)\n R = lrp_backward(_input=X, layer=layerclass, relevance_output=grad_output[0], eps0=eps, eps=eps)\n\n return R, None, None" }, { "identifier": "Conv2DBeta0WrapperFct", "path": "pixpnet/protonets/prp/lrp_general6.py", "snippet": "class Conv2DBeta0WrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module, lrpignorebias):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. ctx is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward method.\n \"\"\"\n\n def configvalues_totensorlist(module):\n propertynames = [\"in_channels\", \"out_channels\", \"kernel_size\", \"stride\", \"padding\", \"dilation\", \"groups\"]\n values = []\n for attr in propertynames:\n v = getattr(module, attr)\n # convert it into tensor\n # has no treatment for booleans yet\n if isinstance(v, int):\n v = torch.tensor([v], dtype=torch.int32, device=module.weight.device)\n elif isinstance(v, tuple):\n ################\n ################\n # FAILMODE: if it is not a tuple of ints but e.g. a tuple of floats, or a tuple of a tuple\n\n v = torch.tensor(v, dtype=torch.int32, device=module.weight.device)\n else:\n print(\"v is neither int nor tuple. unexpected\")\n exit()\n values.append(v)\n return propertynames, values\n\n # stash module config params and trainable params\n propertynames, values = configvalues_totensorlist(module)\n\n if module.bias is None:\n bias = None\n else:\n bias = module.bias.data.clone()\n lrpignorebiastensor = torch.tensor([lrpignorebias], dtype=torch.bool, device=module.weight.device)\n ctx.save_for_backward(\n x, module.weight.data.clone(), bias, lrpignorebiastensor, *values\n ) # *values unpacks the list\n\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \"\"\"\n\n input_, conv2dweight, conv2dbias, lrpignorebiastensor, *values = ctx.saved_tensors\n\n #######################################################################\n # reconstruct dictionary of config parameters\n def tensorlist_todict(values):\n propertynames = [\"in_channels\", \"out_channels\", \"kernel_size\", \"stride\", \"padding\", \"dilation\", \"groups\"]\n # but needs to turn tensors to ints or tuples!\n paramsdict = {}\n for i, n in enumerate(propertynames):\n v = values[i]\n if v.numel == 1:\n paramsdict[n] = v.item() # to cpu?\n else:\n alist = v.tolist()\n if len(alist) == 1:\n paramsdict[n] = alist[0]\n else:\n paramsdict[n] = tuple(alist)\n return paramsdict\n\n #######################################################################\n paramsdict = tensorlist_todict(values)\n\n if conv2dbias is None:\n module = nn.Conv2d(**paramsdict, bias=False)\n else:\n module = nn.Conv2d(**paramsdict, bias=True)\n module.bias = torch.nn.Parameter(conv2dbias)\n\n module.weight = torch.nn.Parameter(conv2dweight)\n\n pnconv = PosNegConv(module, ignorebias=lrpignorebiastensor.item())\n\n X = input_.clone().detach().requires_grad_(True)\n R = lrp_backward(_input=X, layer=pnconv, relevance_output=grad_output[0], eps0=1e-12, eps=0)\n\n return R, None, None" }, { "identifier": "CosineDistLRPClass", "path": "pixpnet/protonets/prp/lrp_general6.py", "snippet": "class CosineDistLRPClass(torch.autograd.Function):\n @staticmethod\n def forward(ctx, conv_features, model):\n ctx.save_for_backward(conv_features, model.prototype_vectors)\n if VERBOSE:\n print(\"cosine custom forward\")\n\n # An alternative distance metric used in TesNet. Alternative to\n # l2_convolution\n x = F.normalize(conv_features, p=2, dim=1)\n prototype_vectors = F.normalize(model.prototype_vectors, p=2, dim=1)\n similarities = F.conv2d(input=x, weight=prototype_vectors)\n # clip similarities in the range [-1, +1] (numerical error can\n # cause similarities to be outside this range)\n similarities = torch.clamp(similarities, -1, 1)\n distances = 1 - similarities # bounded [0, 2]\n\n similarities = torch.log((distances + 1) / (distances + model.epsilon))\n\n return similarities\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the\n loss with respect to the output, and we need to compute the gradient of\n the loss with respect to the input.\n \"\"\"\n if VERBOSE:\n print(\"cosine custom backward\")\n conv, prototypes = ctx.saved_tensors\n i = conv.shape[2]\n j = conv.shape[3]\n c = conv.shape[1]\n p = prototypes.shape[0]\n\n # Broadcast conv to Nxsize(conv) (No. of prototypes)\n conv = conv.repeat(p, 1, 1, 1) # NP x D x Hz x Wz\n prototype = prototypes.repeat(1, 1, i, j) # P x D x Hz x Wz\n\n conv = conv.squeeze() # think this does nothing\n\n cosine_dists = 1 - F.normalize(prototype, p=2, dim=1) * F.normalize(conv, p=2, dim=1)\n d = 1 / (cosine_dists**2 + 1e-12)\n\n denom = torch.sum(d, dim=1, keepdim=True) + 1e-12\n denom = denom.repeat(1, c, 1, 1) + 1e-12\n R = torch.div(d, denom)\n\n grad_output = grad_output.repeat(c, 1, 1, 1)\n grad_output = grad_output.permute(1, 0, 2, 3)\n\n R = R * grad_output\n\n R = torch.sum(R, dim=0)\n\n R = torch.unsqueeze(R, dim=0)\n\n return R, None, None" }, { "identifier": "EltwiseSumStacked2EpsWrapperFct", "path": "pixpnet/protonets/prp/lrp_general6.py", "snippet": "class EltwiseSumStacked2EpsWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, stackedx, module, eps):\n epstensor = torch.tensor([eps], dtype=torch.float32, device=stackedx.device)\n ctx.save_for_backward(stackedx, epstensor)\n return module.forward(stackedx)\n\n @staticmethod\n def backward(ctx, grad_output):\n stackedx, epstensor = ctx.saved_tensors\n\n X = stackedx.clone().detach().requires_grad_(True)\n\n eps = epstensor.item()\n\n s2 = SumStacked2().to(X.device)\n Rtmp = lrp_backward(_input=X, layer=s2, relevance_output=grad_output[0], eps0=eps, eps=eps)\n\n return Rtmp, None, None" }, { "identifier": "L2LRPClass", "path": "pixpnet/protonets/prp/lrp_general6.py", "snippet": "class L2LRPClass(torch.autograd.Function):\n @staticmethod\n def forward(ctx, conv_features, model):\n # *values unpacks the list\n ctx.save_for_backward(conv_features, model.prototype_vectors)\n if VERBOSE:\n print(\"l2 custom forward\")\n x2 = conv_features**2\n x2_patch_sum = F.conv2d(input=x2, weight=model.ones)\n\n p2 = model.prototype_vectors**2\n p2 = torch.sum(p2, dim=(1, 2, 3))\n # p2 is a vector of shape (num_prototypes,)\n # then we reshape it to (num_prototypes, 1, 1)\n p2_reshape = p2.view(-1, 1, 1)\n\n xp = F.conv2d(input=conv_features, weight=model.prototype_vectors)\n intermediate_result = -2 * xp + p2_reshape # use broadcast\n # x2_patch_sum and intermediate_result are of the same shape\n distances = F.relu(x2_patch_sum + intermediate_result)\n\n similarities = torch.log((distances + 1) / (distances + model.epsilon))\n\n return similarities\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the\n loss with respect to the output, and we need to compute the gradient of\n the loss with respect to the input.\n \"\"\"\n if VERBOSE:\n print(\"l2 custom backward\")\n conv, prototypes = ctx.saved_tensors\n i = conv.shape[2]\n j = conv.shape[3]\n c = conv.shape[1]\n p = prototypes.shape[0]\n\n # Broadcast conv to Nxsize(conv) (No. of prototypes)\n conv = conv.repeat(p, 1, 1, 1)\n prototype = prototypes.repeat(1, 1, i, j)\n\n conv = conv.squeeze()\n\n l2 = (conv - prototype) ** 2\n d = 1 / (l2**2 + 1e-12)\n\n denom = torch.sum(d, dim=1, keepdim=True) + 1e-12\n denom = denom.repeat(1, c, 1, 1) + 1e-12\n R = torch.div(d, denom)\n\n grad_output = grad_output.repeat(c, 1, 1, 1)\n grad_output = grad_output.permute(1, 0, 2, 3)\n\n R = R * grad_output\n\n R = torch.sum(R, dim=0)\n\n R = torch.unsqueeze(R, dim=0)\n\n return R, None, None" }, { "identifier": "LinearLayerEpsWrapperFct", "path": "pixpnet/protonets/prp/lrp_general6.py", "snippet": "class LinearLayerEpsWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module, eps):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. ctx is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward\n method.\n \"\"\"\n\n def configvalues_totensorlist(module):\n\n propertynames = [\"in_features\", \"out_features\"]\n values = []\n for attr in propertynames:\n v = getattr(module, attr)\n # convert it into tensor\n # has no treatment for booleans yet\n if isinstance(v, int):\n v = torch.tensor([v], dtype=torch.int32, device=module.weight.device)\n elif isinstance(v, tuple):\n ################\n ################\n # FAILMODE: if it is not a tuple of ints but e.g. a tuple\n # of floats, or a tuple of a tuple\n\n v = torch.tensor(v, dtype=torch.int32, device=module.weight.device)\n else:\n print(\"v is neither int nor tuple. unexpected\")\n exit()\n values.append(v)\n return propertynames, values\n\n # stash module config params and trainable params\n propertynames, values = configvalues_totensorlist(module)\n epstensor = torch.tensor([eps], dtype=torch.float32, device=x.device)\n\n if module.bias is None:\n bias = None\n else:\n bias = module.bias.data.clone()\n ctx.save_for_backward(x, module.weight.data.clone(), bias, epstensor, *values) # *values unpacks the list\n\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \"\"\"\n\n input_, weight, bias, epstensor, *values = ctx.saved_tensors\n\n #######################################################################\n # reconstruct dictionary of config parameters\n def tensorlist_todict(values):\n propertynames = [\"in_features\", \"out_features\"]\n # but needs to turn tensors to ints or tuples!\n paramsdict = {}\n for i, n in enumerate(propertynames):\n v = values[i]\n if v.numel == 1:\n paramsdict[n] = v.item() # to cpu?\n else:\n alist = v.tolist()\n if len(alist) == 1:\n paramsdict[n] = alist[0]\n else:\n paramsdict[n] = tuple(alist)\n return paramsdict\n\n #######################################################################\n paramsdict = tensorlist_todict(values)\n\n if bias is None:\n module = nn.Linear(**paramsdict, bias=False)\n else:\n module = nn.Linear(**paramsdict, bias=True)\n module.bias = torch.nn.Parameter(bias)\n\n module.weight = torch.nn.Parameter(weight)\n\n eps = epstensor.item()\n X = input_.clone().detach().requires_grad_(True)\n R = lrp_backward(_input=X, layer=module, relevance_output=grad_output[0], eps0=eps, eps=eps)\n\n return R, None, None" }, { "identifier": "MaxPool2DWrapperFct", "path": "pixpnet/protonets/prp/lrp_general6.py", "snippet": "class MaxPool2DWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. ctx is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward\n method.\n \"\"\"\n\n def configvalues_totensorlist(module, device):\n\n propertynames = [\"kernel_size\", \"stride\", \"padding\", \"dilation\", \"return_indices\", \"ceil_mode\"]\n values = []\n for attr in propertynames:\n v = getattr(module, attr)\n # convert it into tensor\n # has no treatment for booleans yet\n if isinstance(v, bool):\n v = torch.tensor([v], dtype=torch.bool, device=device)\n elif isinstance(v, int):\n v = torch.tensor([v], dtype=torch.int32, device=device)\n elif isinstance(v, bool):\n\n v = torch.tensor([v], dtype=torch.int32, device=device)\n elif isinstance(v, tuple):\n ################\n ################\n # FAILMODE: if it is not a tuple of ints but e.g. a tuple\n # of floats, or a tuple of a tuple\n\n v = torch.tensor(v, dtype=torch.int32, device=device)\n else:\n print(\"v is neither int nor tuple. unexpected\")\n exit()\n values.append(v)\n return propertynames, values\n\n # stash module config params and trainable params\n propertynames, values = configvalues_totensorlist(module, x.device)\n ctx.save_for_backward(x, *values) # *values unpacks the list\n\n if VERBOSE:\n print(\"maxpool2d custom forward\")\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \"\"\"\n\n input_, *values = ctx.saved_tensors\n\n #######################################################################\n # reconstruct dictionary of config parameters\n def tensorlist_todict(values):\n propertynames = [\"kernel_size\", \"stride\", \"padding\", \"dilation\", \"return_indices\", \"ceil_mode\"]\n # idea: paramsdict={ n: values[i]\n # for i,n in enumerate(propertynames) }\n # but needs to turn tensors to ints or tuples!\n paramsdict = {}\n for i, n in enumerate(propertynames):\n v = values[i]\n if v.numel == 1:\n paramsdict[n] = v.item() # to cpu?\n else:\n alist = v.tolist()\n if len(alist) == 1:\n paramsdict[n] = alist[0]\n else:\n paramsdict[n] = tuple(alist)\n return paramsdict\n\n paramsdict = tensorlist_todict(values)\n\n layerclass = torch.nn.MaxPool2d(**paramsdict)\n\n X = input_.clone().detach().requires_grad_(True)\n with torch.enable_grad():\n Z = layerclass.forward(X)\n relevance_output_data = grad_output[0].clone().detach().unsqueeze(0)\n Z.backward(relevance_output_data)\n R = X.grad\n\n return R, None" }, { "identifier": "ReluWrapperFct", "path": "pixpnet/protonets/prp/lrp_general6.py", "snippet": "class ReluWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module):\n # stash module config params and trainable params\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output, None" }, { "identifier": "SigmoidWrapperFct", "path": "pixpnet/protonets/prp/lrp_general6.py", "snippet": "class SigmoidWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module):\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output, None" }, { "identifier": "SumStacked2", "path": "pixpnet/protonets/prp/lrp_general6.py", "snippet": "class SumStacked2(nn.Module):\n def __init__(self):\n super(SumStacked2, self).__init__()\n\n @staticmethod\n def forward(x): # from X=torch.stack([X0, X1], dim=0)\n assert x.shape[0] == 2\n return torch.sum(x, dim=0)" }, { "identifier": "bnafterconv_overwrite_intoconv", "path": "pixpnet/protonets/prp/lrp_general6.py", "snippet": "def bnafterconv_overwrite_intoconv(conv, bn): # after visatt\n\n if VERBOSE:\n print(conv, bn)\n\n assert isinstance(bn, nn.BatchNorm2d)\n assert isinstance(conv, nn.Conv2d)\n\n s = (bn.running_var + bn.eps) ** 0.5\n w = bn.weight\n b = bn.bias\n m = bn.running_mean\n conv.weight = torch.nn.Parameter(conv.weight * (w / s).reshape(-1, 1, 1, 1))\n\n if conv.bias is None:\n conv.bias = torch.nn.Parameter((0 - m) * (w / s) + b)\n else:\n conv.bias = torch.nn.Parameter((conv.bias - m) * (w / s) + b)\n return conv" }, { "identifier": "get_lrpwrapperformodule", "path": "pixpnet/protonets/prp/lrp_general6.py", "snippet": "def get_lrpwrapperformodule(module, lrp_params, lrp_layer2method, thisis_inputconv_andiwant_zbeta=False):\n if isinstance(module, nn.ReLU):\n key = \"nn.ReLU\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, nn.Sigmoid):\n key = \"nn.Sigmoid\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, nn.BatchNorm2d):\n\n key = \"nn.BatchNorm2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, nn.Linear):\n\n key = \"nn.Linear\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default linearlayer_eps_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"linear_eps\"])\n\n elif isinstance(module, nn.Conv2d):\n if thisis_inputconv_andiwant_zbeta:\n return Conv2DZBetaWrapperClass(module, lrp_params[\"conv2d_ignorebias\"])\n else:\n key = \"nn.Conv2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\n \"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key\n )\n\n # default conv2d_beta0_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(\n module, autogradfunction=autogradfunction, parameter1=lrp_params[\"conv2d_ignorebias\"]\n )\n\n elif isinstance(module, nn.AdaptiveAvgPool2d):\n\n key = \"nn.AdaptiveAvgPool2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default adaptiveavgpool2d_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"pooling_eps\"])\n\n elif isinstance(module, nn.AvgPool2d):\n\n key = \"nn.AvgPool2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default adaptiveavgpool2d_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"pooling_eps\"])\n\n elif isinstance(module, nn.MaxPool2d):\n\n key = \"nn.MaxPool2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default maxpool2d_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, SumStacked2): # resnet specific\n\n key = \"sum_stacked2\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default eltwisesum_stacked2_eps_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"eltwise_eps\"])\n\n elif isinstance(module, ClampLayer): # densenet specific\n\n key = \"clamplayer\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, TensorBiasedLinearLayer): # densenet specific\n\n key = \"tensorbiased_linearlayer\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"linear_eps\"])\n\n elif isinstance(module, TensorBiasedConvLayer): # densenet specific\n\n key = \"tensorbiased_convlayer\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(\n module, autogradfunction=autogradfunction, parameter1=lrp_params[\"conv2d_ignorebias\"]\n )\n\n else:\n key = \"nn.MaxPool2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default maxpool2d_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n print(\"found no lookup for this module:\", module)\n raise LRLookupNotFoundError(\"found no lookup for this module:\", module)" }, { "identifier": "resetbn", "path": "pixpnet/protonets/prp/lrp_general6.py", "snippet": "def resetbn(bn):\n assert isinstance(bn, nn.BatchNorm2d)\n\n bnc = copy.deepcopy(bn)\n bnc.reset_parameters()\n\n return bnc" }, { "identifier": "BasicBlock", "path": "pixpnet/protonets/prp/resnet_features.py", "snippet": "class BasicBlock(nn.Module):\n # class attribute\n expansion = 1\n num_layers = 2\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n # only conv with possibly not 1 stride\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n\n # if stride is not 1 then self.downsample cannot be None\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n # the residual connection\n out += identity\n out = self.relu(out)\n\n return out\n\n def block_conv_info(self):\n block_kernel_sizes = [3, 3]\n block_strides = [self.stride, 1]\n block_paddings = [1, 1]\n\n return block_kernel_sizes, block_strides, block_paddings" }, { "identifier": "Bottleneck", "path": "pixpnet/protonets/prp/resnet_features.py", "snippet": "class Bottleneck(nn.Module):\n # class attribute\n expansion = 4\n num_layers = 3\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = conv1x1(inplanes, planes)\n self.bn1 = nn.BatchNorm2d(planes)\n # only conv with possibly not 1 stride\n self.conv2 = conv3x3(planes, planes, stride)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = conv1x1(planes, planes * self.expansion)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n\n # if stride is not 1 then self.downsample cannot be None\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n def block_conv_info(self):\n block_kernel_sizes = [1, 3, 1]\n block_strides = [1, self.stride, 1]\n block_paddings = [0, 1, 0]\n\n return block_kernel_sizes, block_strides, block_paddings" }, { "identifier": "ResNetFeatures", "path": "pixpnet/protonets/prp/resnet_features.py", "snippet": "class ResNetFeatures(nn.Module):\n \"\"\"\n the convolutional layers of ResNet\n the average pooling and final fully convolutional layer is removed\n \"\"\"\n\n def __init__(self, block, layers, num_classes=1000, zero_init_residual=False):\n super(ResNetFeatures, self).__init__()\n\n self.inplanes = 64\n\n # the first convolutional layer before the structured sequence of blocks\n # self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n # bias=False)\n self.conv1_no_act = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.conv1 = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n # comes from the first conv and the following max pool\n self.kernel_sizes = [7, 3]\n self.strides = [2, 2]\n self.paddings = [3, 1]\n\n # the following layers, each layer is a sequence of blocks\n self.block = block\n self.layers = layers\n self.layer1 = self._make_layer(block=block, planes=64, num_blocks=self.layers[0])\n self.layer2 = self._make_layer(block=block, planes=128, num_blocks=self.layers[1], stride=2)\n self.layer3 = self._make_layer(block=block, planes=256, num_blocks=self.layers[2], stride=2)\n self.layer4 = self._make_layer(block=block, planes=512, num_blocks=self.layers[3], stride=2)\n\n # initialize the parameters\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual\n # block behaves like an identity.\n # This improves the model by 0.2~0.3% according to\n # https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, num_blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = [block(self.inplanes, planes, stride, downsample)]\n # only the first block has downsample that is possibly not None\n\n self.inplanes = planes * block.expansion\n for _ in range(1, num_blocks):\n layers.append(block(self.inplanes, planes))\n\n # keep track of every block's conv size, stride size, and padding size\n for each_block in layers:\n block_kernel_sizes, block_strides, block_paddings = each_block.block_conv_info()\n self.kernel_sizes.extend(block_kernel_sizes)\n self.strides.extend(block_strides)\n self.paddings.extend(block_paddings)\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1_no_act(x)\n x = self.bn1(x)\n x = self.conv1(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n return x\n\n def conv_info(self):\n return self.kernel_sizes, self.strides, self.paddings\n\n def num_layers(self):\n \"\"\"\n the number of conv layers in the network, not counting the number\n of bypass layers\n \"\"\"\n return (\n self.block.num_layers * self.layers[0]\n + self.block.num_layers * self.layers[1]\n + self.block.num_layers * self.layers[2]\n + self.block.num_layers * self.layers[3]\n + 1\n )\n\n def __repr__(self):\n template = \"resnet{}_features\"\n return template.format(self.num_layers() + 1)" } ]
import copy import torch from collections import OrderedDict from torch import nn from torchvision import datasets from pixpnet.protonets.prp.lrp_general6 import ( AdaptiveAvgPool2DWrapperFct, Conv2DBeta0WrapperFct, CosineDistLRPClass, EltwiseSumStacked2EpsWrapperFct, L2LRPClass, LinearLayerEpsWrapperFct, MaxPool2DWrapperFct, ReluWrapperFct, SigmoidWrapperFct, SumStacked2, bnafterconv_overwrite_intoconv, get_lrpwrapperformodule, resetbn, ) from pixpnet.protonets.prp.resnet_features import BasicBlock, Bottleneck, ResNetFeatures
12,326
""" return _resnet_canonized("resnet101", BottleneckFused, [3, 4, 23, 3], **kwargs) class SumLRP(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) # *values unpacks the list if VERBOSE: print("ctx.needs_input_grad", ctx.needs_input_grad) print("sum custom forward") return torch.sum(x, dim=(1, 2, 3)) @staticmethod def backward(ctx, grad_output): """ In the backward pass we receive a Tensor containing the gradient of the loss with respect to the output, and we need to compute the gradient of the loss with respect to the input. """ input_ = ctx.saved_tensors X = input_.clone().detach().requires_grad_(True) with torch.enable_grad(): Z = torch.sum(X, dim=(1, 2, 3)) relevance_output_data = grad_output[0].clone().detach().unsqueeze(0) R = relevance_output_data * X / Z return R, None def generate_prp_image(inputs, pno, model, config): model.train(False) inputs.requires_grad = True with torch.enable_grad(): conv_features = model.conv_features(inputs) if config.model.distance == "cosine": new_dist = CosineDistLRPClass.apply else: new_dist = L2LRPClass.apply similarities = new_dist(conv_features, model) # global max pooling min_distances = model.max_layer(similarities) min_distances = min_distances.view(-1, model.num_prototypes) # For individual prototype (min_distances[:, pno]).backward() rel = inputs.grad.data prp = imshow_im(rel.to("cpu")) return prp class ImageFolderWithPaths(datasets.ImageFolder): """Custom dataset that includes image file paths. Extends torchvision.datasets.ImageFolder """ # override the __getitem__ method. this is the method that dataloader calls def __getitem__(self, index): # this is what ImageFolder normally returns original_tuple = super(ImageFolderWithPaths, self).__getitem__(index) # the image file path path = self.imgs[index][0] # make a new tuple that includes original and the path tuple_with_path = original_tuple + (path,) return tuple_with_path def setbyname(obj, name, value): def iteratset(obj, components, value): if not hasattr(obj, components[0]): if VERBOSE: print(components[0]) return False elif len(components) == 1: setattr(obj, components[0], value) return True else: nextobj = getattr(obj, components[0]) return iteratset(nextobj, components[1:], value) components = name.split(".") success = iteratset(obj, components, value) print("success =", success, "name =", name, "obj =", str(obj)[:20], "value =", str(value)[:20]) return success base_architecture_to_features = { "resnet18": resnet18_canonized, "resnet34": resnet34_canonized, "resnet50": resnet50_canonized, "resnet101": resnet101_canonized, "resnet152": resnet152_canonized, } def prp_canonized_model(ppnet, config): device = ppnet.prototype_vectors.device base_arch = config.model.feature_extractor distance = config.model.distance model = base_architecture_to_features[base_arch](pretrained=False) model = model.to(device) lrp_params_def1 = { "conv2d_ignorebias": True, "eltwise_eps": 1e-6, "linear_eps": 1e-6, "pooling_eps": 1e-6, "use_zbeta": True, } lrp_layer2method = {
""" Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL) Copyright (c) 2022 Srishti Gautam, Marina Hohne, Robert Jenssen, Michael Kampffmeyer SPDX-License-Identifier: AGPL-3.0-or-later SPDX-License-Identifier: MIT """ def imshow_im(hm, q=100): hm = hm.squeeze().sum(dim=0).detach() return hm # partial replacement of BN, use own classes, no pretrained loading class TorchModuleNotFoundError(Exception): pass class BasicBlockFused(BasicBlock): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlockFused, self).__init__(inplanes, planes, stride, downsample) # own self.elt = SumStacked2() # eltwisesum2() def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out = self.elt(torch.stack([out, identity], dim=0)) # self.elt(out,identity) out = self.relu(out) return out class BottleneckFused(Bottleneck): # Bottleneck in torchvision places the stride for downsampling at 3x3 # convolution(self.conv2) while original implementation places the stride # at the first 1x1 convolution(self.conv1) according to "Deep residual # learning for image recognition"https://arxiv.org/abs/1512.03385. # This variant is also known as ResNet V1.5 and improves accuracy according # to # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BottleneckFused, self).__init__(inplanes, planes, stride, downsample) # own self.elt = SumStacked2() # eltwisesum2() def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out = self.elt(torch.stack([out, identity], dim=0)) # self.elt(out,identity) out = self.relu(out) return out VERBOSE = False class ResNetCanonized(ResNetFeatures): def __init__(self, block, layers, num_classes=1000, zero_init_residual=False): super(ResNetCanonized, self).__init__(block, layers, num_classes=1000, zero_init_residual=False) # runs in your current module to find the object layer3.1.conv2, and # replaces it by the object stored in value # (see success=iteratset(self,components,value) as initializer, # can be modified to run in another class when replacing that self) def setbyname(self, name, value): def iteratset(obj, components, value): if not hasattr(obj, components[0]): return False elif len(components) == 1: setattr(obj, components[0], value) return True else: nextobj = getattr(obj, components[0]) return iteratset(nextobj, components[1:], value) components = name.split(".") success = iteratset(self, components, value) if VERBOSE: print("success =", success, "name =", name, "obj = resnet", "value =", str(value)[:20]) return success def copyfromresnet(self, net, lrp_params, lrp_layer2method): # --copy linear # --copy conv2, while fusing bns # --reset bn # first conv, then bn, # means: when encounter bn, find the conv before -- implementation # dependent updated_layers_names = [] last_src_module_name = None last_src_module = None for src_module_name, src_module in net.named_modules(): if VERBOSE: print("at src_module_name", src_module_name) if src_module_name.startswith("module_dict."): src_module_name = src_module_name.split(".", 1)[1] if isinstance(src_module, nn.Linear): # copy linear layers if VERBOSE: print("is Linear") wrapped = get_lrpwrapperformodule(copy.deepcopy(src_module), lrp_params, lrp_layer2method) if VERBOSE: print(wrapped) if not self.setbyname(src_module_name, wrapped): raise TorchModuleNotFoundError( "could not find module " + src_module_name + " in target net to copy" ) updated_layers_names.append(src_module_name) # end of if if isinstance(src_module, nn.Conv2d): # store conv2d layers if VERBOSE: print("is Conv2d") last_src_module_name = src_module_name last_src_module = src_module # end of if if isinstance(src_module, nn.BatchNorm2d): # conv-bn chain if VERBOSE: print("is BatchNorm2d") if lrp_params["use_zbeta"] and (last_src_module_name == "conv1"): thisis_inputconv_andiwant_zbeta = True else: thisis_inputconv_andiwant_zbeta = False m = copy.deepcopy(last_src_module) m = bnafterconv_overwrite_intoconv(m, bn=src_module) # wrap conv wrapped = get_lrpwrapperformodule( m, lrp_params, lrp_layer2method, thisis_inputconv_andiwant_zbeta=(thisis_inputconv_andiwant_zbeta) ) if VERBOSE: print(wrapped) if not self.setbyname(last_src_module_name, wrapped): raise TorchModuleNotFoundError( "could not find module " + last_src_module_name + " in target net to copy" ) updated_layers_names.append(last_src_module_name) # wrap batchnorm wrapped = get_lrpwrapperformodule(resetbn(src_module), lrp_params, lrp_layer2method) if VERBOSE: print(wrapped) if not self.setbyname(src_module_name, wrapped): raise TorchModuleNotFoundError( "could not find module " + src_module_name + " in target net to copy" ) updated_layers_names.append(src_module_name) # end of if if VERBOSE: print("\n") # sum_stacked2 is present only in the targetclass, so must iterate here for target_module_name, target_module in self.named_modules(): if isinstance(target_module, (nn.ReLU, nn.AdaptiveAvgPool2d, nn.MaxPool2d)): wrapped = get_lrpwrapperformodule(target_module, lrp_params, lrp_layer2method) if VERBOSE: print(wrapped) if not self.setbyname(target_module_name, wrapped): raise TorchModuleNotFoundError( "could not find module " + src_module_name + " in target net to copy" ) updated_layers_names.append(target_module_name) if isinstance(target_module, SumStacked2): wrapped = get_lrpwrapperformodule(target_module, lrp_params, lrp_layer2method) if VERBOSE: print(wrapped) if not self.setbyname(target_module_name, wrapped): raise TorchModuleNotFoundError( "could not find module " + target_module_name + " in target net , impossible!" ) updated_layers_names.append(target_module_name) to_delete = [] for target_module_name, target_module in self.named_modules(): if target_module_name not in updated_layers_names: if not (target_module_name.endswith(".module") or target_module_name.endswith(".downsample")): if ( target_module_name and "." not in target_module_name and hasattr(net, "module_dict") and not hasattr(net.module_dict, target_module_name) ): print("Replacing", target_module_name, "with identity") to_delete.append(target_module_name) setattr(self, target_module_name, nn.Identity()) elif target_module_name.split(".", 1)[0] in to_delete: if VERBOSE: print(target_module_name, "part of to_delete") else: print("not updated:", target_module_name) else: if VERBOSE: print("updated:", target_module_name) class AddonCanonized(nn.Module): def __init__(self, in_channels=512, out_channels=128): super(AddonCanonized, self).__init__() self.addon = nn.Sequential( OrderedDict( ( ("conv1", nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1)), ("relu1", nn.ReLU()), ("conv_last", nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=1)), ("sigmoid", nn.Sigmoid()), ) ) ) def _addon_canonized(in_channels=512, out_channels=128, pretrained=False, progress=True, **kwargs): model = AddonCanonized(in_channels=in_channels, out_channels=out_channels) return model def _resnet_canonized(arch, block, layers, pretrained, progress, **kwargs): model = ResNetCanonized(block, layers, **kwargs) return model def resnet18_canonized(pretrained=False, progress=True, **kwargs): r"""ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pretrained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet_canonized("resnet18", BasicBlockFused, [2, 2, 2, 2], pretrained, progress, **kwargs) def resnet50_canonized(pretrained=False, progress=True, **kwargs): r"""ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pretrained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet_canonized("resnet50", BottleneckFused, [3, 4, 6, 3], pretrained, progress, **kwargs) def resnet34_canonized(pretrained=False, progress=True, **kwargs): r"""ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pretrained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet_canonized("resnet34", BasicBlockFused, [3, 4, 6, 3], **kwargs) def resnet152_canonized(pretrained=False, progress=True, **kwargs): r"""ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pretrained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet_canonized("resnet152", BottleneckFused, [3, 8, 36, 3], **kwargs) def resnet101_canonized(pretrained=False, progress=True, **kwargs): r"""ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pretrained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet_canonized("resnet101", BottleneckFused, [3, 4, 23, 3], **kwargs) class SumLRP(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) # *values unpacks the list if VERBOSE: print("ctx.needs_input_grad", ctx.needs_input_grad) print("sum custom forward") return torch.sum(x, dim=(1, 2, 3)) @staticmethod def backward(ctx, grad_output): """ In the backward pass we receive a Tensor containing the gradient of the loss with respect to the output, and we need to compute the gradient of the loss with respect to the input. """ input_ = ctx.saved_tensors X = input_.clone().detach().requires_grad_(True) with torch.enable_grad(): Z = torch.sum(X, dim=(1, 2, 3)) relevance_output_data = grad_output[0].clone().detach().unsqueeze(0) R = relevance_output_data * X / Z return R, None def generate_prp_image(inputs, pno, model, config): model.train(False) inputs.requires_grad = True with torch.enable_grad(): conv_features = model.conv_features(inputs) if config.model.distance == "cosine": new_dist = CosineDistLRPClass.apply else: new_dist = L2LRPClass.apply similarities = new_dist(conv_features, model) # global max pooling min_distances = model.max_layer(similarities) min_distances = min_distances.view(-1, model.num_prototypes) # For individual prototype (min_distances[:, pno]).backward() rel = inputs.grad.data prp = imshow_im(rel.to("cpu")) return prp class ImageFolderWithPaths(datasets.ImageFolder): """Custom dataset that includes image file paths. Extends torchvision.datasets.ImageFolder """ # override the __getitem__ method. this is the method that dataloader calls def __getitem__(self, index): # this is what ImageFolder normally returns original_tuple = super(ImageFolderWithPaths, self).__getitem__(index) # the image file path path = self.imgs[index][0] # make a new tuple that includes original and the path tuple_with_path = original_tuple + (path,) return tuple_with_path def setbyname(obj, name, value): def iteratset(obj, components, value): if not hasattr(obj, components[0]): if VERBOSE: print(components[0]) return False elif len(components) == 1: setattr(obj, components[0], value) return True else: nextobj = getattr(obj, components[0]) return iteratset(nextobj, components[1:], value) components = name.split(".") success = iteratset(obj, components, value) print("success =", success, "name =", name, "obj =", str(obj)[:20], "value =", str(value)[:20]) return success base_architecture_to_features = { "resnet18": resnet18_canonized, "resnet34": resnet34_canonized, "resnet50": resnet50_canonized, "resnet101": resnet101_canonized, "resnet152": resnet152_canonized, } def prp_canonized_model(ppnet, config): device = ppnet.prototype_vectors.device base_arch = config.model.feature_extractor distance = config.model.distance model = base_architecture_to_features[base_arch](pretrained=False) model = model.to(device) lrp_params_def1 = { "conv2d_ignorebias": True, "eltwise_eps": 1e-6, "linear_eps": 1e-6, "pooling_eps": 1e-6, "use_zbeta": True, } lrp_layer2method = {
"nn.ReLU": ReluWrapperFct,
7
2023-12-06 23:49:31+00:00
16k
open-mmlab/PIA
animatediff/pipelines/i2v_pipeline.py
[ { "identifier": "InflatedConv3d", "path": "animatediff/models/resnet.py", "snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x" }, { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n\n # Additional\n use_motion_module = True,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # Image to Video Conv\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n @property\n def attn_processors(self) -> Dict[str, AttnProcessor]:\n r\"\"\"\n Returns:\n `dict` of attention processors: A dictionary containing all attention processors used in the model with\n indexed by its weight name.\n \"\"\"\n # set recursively\n processors = {}\n\n def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttnProcessor]):\n if hasattr(module, \"set_processor\"):\n processors[f\"{name}.processor\"] = module.processor\n\n for sub_name, child in module.named_children():\n fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n return processors\n\n for name, module in self.named_children():\n fn_recursive_add_processors(name, module, processors)\n\n return processors\n\n def set_attn_processor(self, processor: Union[AttnProcessor, Dict[str, AttnProcessor]]):\n r\"\"\"\n Parameters:\n `processor (`dict` of `AttnProcessor` or `AttnProcessor`):\n The instantiated processor class or a dictionary of processor classes that will be set as the processor\n of **all** `CrossAttention` layers.\n In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainablae attention processors.:\n\n \"\"\"\n count = len(self.attn_processors.keys())\n\n if isinstance(processor, dict) and len(processor) != count:\n raise ValueError(\n f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n )\n\n def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n if hasattr(module, \"set_processor\"):\n if not isinstance(processor, dict):\n print(f'Set {module}')\n module.set_processor(processor)\n else:\n print(f'Set {module}')\n module.set_processor(processor.pop(f\"{name}.processor\"))\n\n for sub_name, child in module.named_children():\n fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n for name, module in self.named_children():\n fn_recursive_attn_processor(name, module, processor)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n mask_sample: torch.FloatTensor,\n masked_sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n image_embeds: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # image to video b c f h w\n sample = torch.cat([sample, mask_sample, masked_sample], dim=1).to(sample.device)\n\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * - 10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # prepare for ip-adapter\n if image_embeds is not None:\n image_embeds = self.encoder_hid_proj(\n image_embeds).to(encoder_hidden_states.dtype)\n encoder_hidden_states = torch.cat(\n [encoder_hidden_states, image_embeds], dim=1)\n\n # pre-process\n # b c f h w\n # 2 4 16 64 64\n sample = self.conv_in(sample)\n # down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n down_block_res_samples += res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n\n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n\n return model" }, { "identifier": "convert_ldm_clip_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_clip_checkpoint(checkpoint):\n keys = list(checkpoint.keys())\n\n text_model_dict = {}\n for key in keys:\n if key.startswith(\"cond_stage_model.transformer\"):\n text_model_dict[key[len(\"cond_stage_model.transformer.\") :]] = checkpoint[key]\n\n return text_model_dict" }, { "identifier": "convert_ldm_unet_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False, controlnet=False):\n \"\"\"\n Takes a state dict and a config, and returns a converted checkpoint.\n \"\"\"\n\n # extract state_dict for UNet\n unet_state_dict = {}\n keys = list(checkpoint.keys())\n\n if controlnet:\n unet_key = \"control_model.\"\n else:\n unet_key = \"model.diffusion_model.\"\n\n # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA\n if sum(k.startswith(\"model_ema\") for k in keys) > 100 and extract_ema:\n print(f\"Checkpoint {path} has both EMA and non-EMA weights.\")\n print(\n \"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA\"\n \" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.\"\n )\n for key in keys:\n if key.startswith(\"model.diffusion_model\"):\n flat_ema_key = \"model_ema.\" + \"\".join(key.split(\".\")[1:])\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(flat_ema_key)\n else:\n if sum(k.startswith(\"model_ema\") for k in keys) > 100:\n print(\n \"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA\"\n \" weights (usually better for inference), please make sure to add the `--extract_ema` flag.\"\n )\n\n for key in keys:\n if key.startswith(unet_key):\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"time_embedding.linear_1.weight\"] = unet_state_dict[\"time_embed.0.weight\"]\n new_checkpoint[\"time_embedding.linear_1.bias\"] = unet_state_dict[\"time_embed.0.bias\"]\n new_checkpoint[\"time_embedding.linear_2.weight\"] = unet_state_dict[\"time_embed.2.weight\"]\n new_checkpoint[\"time_embedding.linear_2.bias\"] = unet_state_dict[\"time_embed.2.bias\"]\n\n if config[\"class_embed_type\"] is None:\n # No parameters to port\n ...\n elif config[\"class_embed_type\"] == \"timestep\" or config[\"class_embed_type\"] == \"projection\":\n new_checkpoint[\"class_embedding.linear_1.weight\"] = unet_state_dict[\"label_emb.0.0.weight\"]\n new_checkpoint[\"class_embedding.linear_1.bias\"] = unet_state_dict[\"label_emb.0.0.bias\"]\n new_checkpoint[\"class_embedding.linear_2.weight\"] = unet_state_dict[\"label_emb.0.2.weight\"]\n new_checkpoint[\"class_embedding.linear_2.bias\"] = unet_state_dict[\"label_emb.0.2.bias\"]\n else:\n raise NotImplementedError(f\"Not implemented `class_embed_type`: {config['class_embed_type']}\")\n\n new_checkpoint[\"conv_in.weight\"] = unet_state_dict[\"input_blocks.0.0.weight\"]\n new_checkpoint[\"conv_in.bias\"] = unet_state_dict[\"input_blocks.0.0.bias\"]\n\n if not controlnet:\n new_checkpoint[\"conv_norm_out.weight\"] = unet_state_dict[\"out.0.weight\"]\n new_checkpoint[\"conv_norm_out.bias\"] = unet_state_dict[\"out.0.bias\"]\n new_checkpoint[\"conv_out.weight\"] = unet_state_dict[\"out.2.weight\"]\n new_checkpoint[\"conv_out.bias\"] = unet_state_dict[\"out.2.bias\"]\n\n # Retrieves the keys for the input blocks only\n num_input_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"input_blocks\" in layer})\n input_blocks = {\n layer_id: [key for key in unet_state_dict if f\"input_blocks.{layer_id}\" in key]\n for layer_id in range(num_input_blocks)\n }\n\n # Retrieves the keys for the middle blocks only\n num_middle_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"middle_block\" in layer})\n middle_blocks = {\n layer_id: [key for key in unet_state_dict if f\"middle_block.{layer_id}\" in key]\n for layer_id in range(num_middle_blocks)\n }\n\n # Retrieves the keys for the output blocks only\n num_output_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"output_blocks\" in layer})\n output_blocks = {\n layer_id: [key for key in unet_state_dict if f\"output_blocks.{layer_id}\" in key]\n for layer_id in range(num_output_blocks)\n }\n\n for i in range(1, num_input_blocks):\n block_id = (i - 1) // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = (i - 1) % (config[\"layers_per_block\"] + 1)\n\n resnets = [\n key for key in input_blocks[i] if f\"input_blocks.{i}.0\" in key and f\"input_blocks.{i}.0.op\" not in key\n ]\n attentions = [key for key in input_blocks[i] if f\"input_blocks.{i}.1\" in key]\n\n if f\"input_blocks.{i}.0.op.weight\" in unet_state_dict:\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.weight\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.weight\"\n )\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.bias\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.bias\"\n )\n\n paths = renew_resnet_paths(resnets)\n meta_path = {\"old\": f\"input_blocks.{i}.0\", \"new\": f\"down_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\"old\": f\"input_blocks.{i}.1\", \"new\": f\"down_blocks.{block_id}.attentions.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n resnet_0 = middle_blocks[0]\n attentions = middle_blocks[1]\n resnet_1 = middle_blocks[2]\n\n resnet_0_paths = renew_resnet_paths(resnet_0)\n assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)\n\n resnet_1_paths = renew_resnet_paths(resnet_1)\n assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)\n\n attentions_paths = renew_attention_paths(attentions)\n meta_path = {\"old\": \"middle_block.1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(\n attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n for i in range(num_output_blocks):\n block_id = i // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = i % (config[\"layers_per_block\"] + 1)\n output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]\n output_block_list = {}\n\n for layer in output_block_layers:\n layer_id, layer_name = layer.split(\".\")[0], shave_segments(layer, 1)\n if layer_id in output_block_list:\n output_block_list[layer_id].append(layer_name)\n else:\n output_block_list[layer_id] = [layer_name]\n\n if len(output_block_list) > 1:\n resnets = [key for key in output_blocks[i] if f\"output_blocks.{i}.0\" in key]\n attentions = [key for key in output_blocks[i] if f\"output_blocks.{i}.1\" in key]\n\n resnet_0_paths = renew_resnet_paths(resnets)\n paths = renew_resnet_paths(resnets)\n\n meta_path = {\"old\": f\"output_blocks.{i}.0\", \"new\": f\"up_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n output_block_list = {k: sorted(v) for k, v in output_block_list.items()}\n if [\"conv.bias\", \"conv.weight\"] in output_block_list.values():\n index = list(output_block_list.values()).index([\"conv.bias\", \"conv.weight\"])\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.weight\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.weight\"\n ]\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.bias\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.bias\"\n ]\n\n # Clear attentions as they have been attributed above.\n if len(attentions) == 2:\n attentions = []\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\n \"old\": f\"output_blocks.{i}.1\",\n \"new\": f\"up_blocks.{block_id}.attentions.{layer_in_block_id}\",\n }\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n else:\n resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)\n for path in resnet_0_paths:\n old_path = \".\".join([\"output_blocks\", str(i), path[\"old\"]])\n new_path = \".\".join([\"up_blocks\", str(block_id), \"resnets\", str(layer_in_block_id), path[\"new\"]])\n\n new_checkpoint[new_path] = unet_state_dict[old_path]\n\n if controlnet:\n # conditioning embedding\n\n orig_index = 0\n\n new_checkpoint[\"controlnet_cond_embedding.conv_in.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_in.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n orig_index += 2\n\n diffusers_index = 0\n\n while diffusers_index < 6:\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n diffusers_index += 1\n orig_index += 2\n\n new_checkpoint[\"controlnet_cond_embedding.conv_out.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_out.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n # down blocks\n for i in range(num_input_blocks):\n new_checkpoint[f\"controlnet_down_blocks.{i}.weight\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.weight\")\n new_checkpoint[f\"controlnet_down_blocks.{i}.bias\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.bias\")\n\n # mid block\n new_checkpoint[\"controlnet_mid_block.weight\"] = unet_state_dict.pop(\"middle_block_out.0.weight\")\n new_checkpoint[\"controlnet_mid_block.bias\"] = unet_state_dict.pop(\"middle_block_out.0.bias\")\n\n return new_checkpoint" }, { "identifier": "convert_ldm_vae_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_vae_checkpoint(checkpoint, config, only_decoder=False, only_encoder=False):\n # extract state dict for VAE\n vae_state_dict = {}\n vae_key = \"first_stage_model.\"\n keys = list(checkpoint.keys())\n for key in keys:\n if key.startswith(vae_key):\n vae_state_dict[key.replace(vae_key, \"\")] = checkpoint.get(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"encoder.conv_in.weight\"] = vae_state_dict[\"encoder.conv_in.weight\"]\n new_checkpoint[\"encoder.conv_in.bias\"] = vae_state_dict[\"encoder.conv_in.bias\"]\n new_checkpoint[\"encoder.conv_out.weight\"] = vae_state_dict[\"encoder.conv_out.weight\"]\n new_checkpoint[\"encoder.conv_out.bias\"] = vae_state_dict[\"encoder.conv_out.bias\"]\n new_checkpoint[\"encoder.conv_norm_out.weight\"] = vae_state_dict[\"encoder.norm_out.weight\"]\n new_checkpoint[\"encoder.conv_norm_out.bias\"] = vae_state_dict[\"encoder.norm_out.bias\"]\n\n new_checkpoint[\"decoder.conv_in.weight\"] = vae_state_dict[\"decoder.conv_in.weight\"]\n new_checkpoint[\"decoder.conv_in.bias\"] = vae_state_dict[\"decoder.conv_in.bias\"]\n new_checkpoint[\"decoder.conv_out.weight\"] = vae_state_dict[\"decoder.conv_out.weight\"]\n new_checkpoint[\"decoder.conv_out.bias\"] = vae_state_dict[\"decoder.conv_out.bias\"]\n new_checkpoint[\"decoder.conv_norm_out.weight\"] = vae_state_dict[\"decoder.norm_out.weight\"]\n new_checkpoint[\"decoder.conv_norm_out.bias\"] = vae_state_dict[\"decoder.norm_out.bias\"]\n\n new_checkpoint[\"quant_conv.weight\"] = vae_state_dict[\"quant_conv.weight\"]\n new_checkpoint[\"quant_conv.bias\"] = vae_state_dict[\"quant_conv.bias\"]\n new_checkpoint[\"post_quant_conv.weight\"] = vae_state_dict[\"post_quant_conv.weight\"]\n new_checkpoint[\"post_quant_conv.bias\"] = vae_state_dict[\"post_quant_conv.bias\"]\n\n # Retrieves the keys for the encoder down blocks only\n num_down_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"encoder.down\" in layer})\n down_blocks = {\n layer_id: [key for key in vae_state_dict if f\"down.{layer_id}\" in key] for layer_id in range(num_down_blocks)\n }\n\n # Retrieves the keys for the decoder up blocks only\n num_up_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"decoder.up\" in layer})\n up_blocks = {\n layer_id: [key for key in vae_state_dict if f\"up.{layer_id}\" in key] for layer_id in range(num_up_blocks)\n }\n\n for i in range(num_down_blocks):\n resnets = [key for key in down_blocks[i] if f\"down.{i}\" in key and f\"down.{i}.downsample\" not in key]\n\n if f\"encoder.down.{i}.downsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.weight\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.weight\"\n )\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.bias\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.bias\"\n )\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"down.{i}.block\", \"new\": f\"down_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"encoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"encoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"encoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n for i in range(num_up_blocks):\n block_id = num_up_blocks - 1 - i\n resnets = [\n key for key in up_blocks[block_id] if f\"up.{block_id}\" in key and f\"up.{block_id}.upsample\" not in key\n ]\n\n if f\"decoder.up.{block_id}.upsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.weight\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.weight\"\n ]\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.bias\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.bias\"\n ]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"up.{block_id}.block\", \"new\": f\"up_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"decoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"decoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"decoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n if only_decoder:\n new_checkpoint = {k: v for k, v in new_checkpoint.items() if k.startswith('decoder') or k.startswith('post_quant')}\n elif only_encoder:\n new_checkpoint = {k: v for k, v in new_checkpoint.items() if k.startswith('encoder') or k.startswith('quant')}\n\n return new_checkpoint" }, { "identifier": "convert_lora_model_level", "path": "animatediff/utils/convert_lora_safetensor_to_diffusers.py", "snippet": "def convert_lora_model_level(state_dict, unet, text_encoder=None, LORA_PREFIX_UNET=\"lora_unet\", LORA_PREFIX_TEXT_ENCODER=\"lora_te\", alpha=0.6):\n \"\"\"convert lora in model level instead of pipeline leval\n \"\"\"\n\n visited = []\n\n # directly update weight in diffusers model\n for key in state_dict:\n # it is suggested to print out the key, it usually will be something like below\n # \"lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight\"\n\n # as we have set the alpha beforehand, so just skip\n if \".alpha\" in key or key in visited:\n continue\n\n if \"text\" in key:\n layer_infos = key.split(\".\")[0].split(LORA_PREFIX_TEXT_ENCODER + \"_\")[-1].split(\"_\")\n assert text_encoder is not None, (\n 'text_encoder must be passed since lora contains text encoder layers')\n curr_layer = text_encoder\n else:\n layer_infos = key.split(\".\")[0].split(LORA_PREFIX_UNET + \"_\")[-1].split(\"_\")\n curr_layer = unet\n\n # find the target layer\n temp_name = layer_infos.pop(0)\n while len(layer_infos) > -1:\n try:\n curr_layer = curr_layer.__getattr__(temp_name)\n if len(layer_infos) > 0:\n temp_name = layer_infos.pop(0)\n elif len(layer_infos) == 0:\n break\n except Exception:\n if len(temp_name) > 0:\n temp_name += \"_\" + layer_infos.pop(0)\n else:\n temp_name = layer_infos.pop(0)\n\n pair_keys = []\n if \"lora_down\" in key:\n pair_keys.append(key.replace(\"lora_down\", \"lora_up\"))\n pair_keys.append(key)\n else:\n pair_keys.append(key)\n pair_keys.append(key.replace(\"lora_up\", \"lora_down\"))\n\n # update weight\n # NOTE: load lycon, meybe have bugs :(\n if 'conv_in' in pair_keys[0]:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n weight_up = weight_up.view(weight_up.size(0), -1)\n weight_down = weight_down.view(weight_down.size(0), -1)\n shape = [e for e in curr_layer.weight.data.shape]\n shape[1] = 4\n curr_layer.weight.data[:, :4, ...] += alpha * (weight_up @ weight_down).view(*shape)\n elif 'conv' in pair_keys[0]:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n weight_up = weight_up.view(weight_up.size(0), -1)\n weight_down = weight_down.view(weight_down.size(0), -1)\n shape = [e for e in curr_layer.weight.data.shape]\n curr_layer.weight.data += alpha * (weight_up @ weight_down).view(*shape)\n elif len(state_dict[pair_keys[0]].shape) == 4:\n weight_up = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.float32)\n weight_down = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.float32)\n curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3).to(curr_layer.weight.data.device)\n else:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).to(curr_layer.weight.data.device)\n\n # update visited list\n for item in pair_keys:\n visited.append(item)\n\n return unet, text_encoder" }, { "identifier": "prepare_mask_coef_by_statistics", "path": "animatediff/utils/util.py", "snippet": "def prepare_mask_coef_by_statistics(video_length: int, cond_frame: int, sim_range: int):\n assert video_length > 0, \\\n 'video_length should be greater than 0'\n\n assert video_length > cond_frame,\\\n 'video_length should be greater than cond_frame'\n\n range_list = RANGE_LIST\n\n assert sim_range < len(range_list),\\\n f'sim_range type{sim_range} not implemented'\n\n coef = range_list[sim_range]\n coef = coef + ([coef[-1]] * (video_length - len(coef)))\n\n order = [abs(i - cond_frame) for i in range(video_length)]\n coef = [coef[order[i]] for i in range(video_length)]\n\n return coef" } ]
import inspect import os.path as osp import numpy as np import torch from dataclasses import dataclass from typing import Callable, List, Optional, Union from diffusers.configuration_utils import FrozenDict from diffusers.loaders import IPAdapterMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL from diffusers.pipelines import DiffusionPipeline from diffusers.schedulers import (DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler) from diffusers.utils import (BaseOutput, deprecate, is_accelerate_available, logging) from diffusers.utils.import_utils import is_xformers_available from einops import rearrange from omegaconf import OmegaConf from packaging import version from safetensors import safe_open from tqdm import tqdm from transformers import (CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection) from animatediff.models.resnet import InflatedConv3d from animatediff.models.unet import UNet3DConditionModel from animatediff.utils.convert_from_ckpt import (convert_ldm_clip_checkpoint, convert_ldm_unet_checkpoint, convert_ldm_vae_checkpoint) from animatediff.utils.convert_lora_safetensor_to_diffusers import \ convert_lora_model_level from animatediff.utils.util import prepare_mask_coef_by_statistics from accelerate import cpu_offload
13,661
new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, image_encoder=image_encoder, feature_extractor=feature_extractor, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) # self.memory_format = memory_format self.use_ip_adapter = False @classmethod def build_pipeline(cls, base_cfg, base_model: str, unet_path: str, dreambooth_path: Optional[str] = None, lora_path: Optional[str] = None, lora_alpha: float = 0, vae_path: Optional[str] = None, ip_adapter_path: Optional[str] = None, ip_adapter_scale: float = 0.0, only_load_vae_decoder: bool = False, only_load_vae_encoder: bool = False) -> 'I2VPipeline': """Method to build pipeline in a faster way~ Args: base_cfg: The config to build model base_mode: The model id to initialize StableDiffusion unet_path: Path for i2v unet dreambooth_path: path for dreambooth model lora_path: path for lora model lora_alpha: value for lora scale only_load_vae_decoder: Only load VAE decoder from dreambooth / VAE ckpt and maitain encoder as original. """ # build unet unet = UNet3DConditionModel.from_pretrained_2d( base_model, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container( base_cfg.unet_additional_kwargs)) old_weights = unet.conv_in.weight old_bias = unet.conv_in.bias new_conv1 = InflatedConv3d( 9, old_weights.shape[0], kernel_size=unet.conv_in.kernel_size, stride=unet.conv_in.stride, padding=unet.conv_in.padding, bias=True if old_bias is not None else False) param = torch.zeros((320,5,3,3),requires_grad=True) new_conv1.weight = torch.nn.Parameter(torch.cat((old_weights,param),dim=1)) if old_bias is not None: new_conv1.bias = old_bias unet.conv_in = new_conv1 unet.config["in_channels"] = 9 unet_ckpt = torch.load(unet_path, map_location='cpu') unet.load_state_dict(unet_ckpt, strict=False) # NOTE: only load temporal layers and condition module # for key, value in unet_ckpt.items(): # if 'motion' in key or 'conv_in' in key: # unet.state_dict()[key].copy_(value) # load vae, tokenizer, text encoder vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae") tokenizer = CLIPTokenizer.from_pretrained(base_model, subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained(base_model, subfolder="text_encoder") noise_scheduler = DDIMScheduler(**OmegaConf.to_container(base_cfg.noise_scheduler_kwargs)) if dreambooth_path: print(" >>> Begin loading DreamBooth >>>") base_model_state_dict = {} with safe_open(dreambooth_path, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) # load unet
# Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py logger = logging.get_logger(__name__) # pylint: disable=invalid-name DEFAULT_N_PROMPT = ('wrong white balance, dark, sketches,worst quality,' 'low quality, deformed, distorted, disfigured, bad eyes, ' 'wrong lips,weird mouth, bad teeth, mutated hands and fingers, ' 'bad anatomy,wrong anatomy, amputation, extra limb, ' 'missing limb, floating,limbs, disconnected limbs, mutation, ' 'ugly, disgusting, bad_pictures, negative_hand-neg') @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class I2VPipeline(DiffusionPipeline, IPAdapterMixin, TextualInversionLoaderMixin): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], # memory_format: torch.memory_format, feature_extractor: CLIPImageProcessor = None, image_encoder: CLIPVisionModelWithProjection = None, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, image_encoder=image_encoder, feature_extractor=feature_extractor, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) # self.memory_format = memory_format self.use_ip_adapter = False @classmethod def build_pipeline(cls, base_cfg, base_model: str, unet_path: str, dreambooth_path: Optional[str] = None, lora_path: Optional[str] = None, lora_alpha: float = 0, vae_path: Optional[str] = None, ip_adapter_path: Optional[str] = None, ip_adapter_scale: float = 0.0, only_load_vae_decoder: bool = False, only_load_vae_encoder: bool = False) -> 'I2VPipeline': """Method to build pipeline in a faster way~ Args: base_cfg: The config to build model base_mode: The model id to initialize StableDiffusion unet_path: Path for i2v unet dreambooth_path: path for dreambooth model lora_path: path for lora model lora_alpha: value for lora scale only_load_vae_decoder: Only load VAE decoder from dreambooth / VAE ckpt and maitain encoder as original. """ # build unet unet = UNet3DConditionModel.from_pretrained_2d( base_model, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container( base_cfg.unet_additional_kwargs)) old_weights = unet.conv_in.weight old_bias = unet.conv_in.bias new_conv1 = InflatedConv3d( 9, old_weights.shape[0], kernel_size=unet.conv_in.kernel_size, stride=unet.conv_in.stride, padding=unet.conv_in.padding, bias=True if old_bias is not None else False) param = torch.zeros((320,5,3,3),requires_grad=True) new_conv1.weight = torch.nn.Parameter(torch.cat((old_weights,param),dim=1)) if old_bias is not None: new_conv1.bias = old_bias unet.conv_in = new_conv1 unet.config["in_channels"] = 9 unet_ckpt = torch.load(unet_path, map_location='cpu') unet.load_state_dict(unet_ckpt, strict=False) # NOTE: only load temporal layers and condition module # for key, value in unet_ckpt.items(): # if 'motion' in key or 'conv_in' in key: # unet.state_dict()[key].copy_(value) # load vae, tokenizer, text encoder vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae") tokenizer = CLIPTokenizer.from_pretrained(base_model, subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained(base_model, subfolder="text_encoder") noise_scheduler = DDIMScheduler(**OmegaConf.to_container(base_cfg.noise_scheduler_kwargs)) if dreambooth_path: print(" >>> Begin loading DreamBooth >>>") base_model_state_dict = {} with safe_open(dreambooth_path, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) # load unet
converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, unet.config)
3
2023-12-21 03:29:34+00:00
16k
xinghaochen/TinySAM
tinysam/hierarchical_mask_generator.py
[ { "identifier": "Sam", "path": "tinysam/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: Union[ImageEncoderViT, TinyViT],\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x" }, { "identifier": "SamPredictor", "path": "tinysam/predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n #import pdb; pdb.set_trace()\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert self.features is not None, \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "tinysam/utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "tinysam/utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "tinysam/utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "tinysam/utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "tinysam/utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "tinysam/utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "tinysam/utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "tinysam/utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "tinysam/utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "tinysam/utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "tinysam/utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "tinysam/utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "tinysam/utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "tinysam/utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "tinysam/utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "tinysam/utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
11,079
if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * 2)]: continue new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * 2) / ih]) self.set_point_grids([np.array(new_points)]) new_masks = self.generate(image, False) new_masks.cat(ori_masks) new_masks = self.post_process(image, new_masks) return new_masks @torch.no_grad() def generate(self, image: np.ndarray, need_high: bool) -> MaskData: orig_size = image.shape[:2] # Get points for this crop points_scale = np.array(orig_size)[None, ::-1] points_for_image = self.point_grids[0] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, orig_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], return_logits=True, ) # Serialize predictions and store in MaskData batch_data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks if self.pred_iou_thresh > 0.0: keep_mask = batch_data["iou_preds"] > self.pred_iou_thresh batch_data.filter(keep_mask) # Calculate stability score batch_data["stability_score"] = calculate_stability_score( batch_data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = batch_data["stability_score"] >= self.stability_score_thresh batch_data.filter(keep_mask) if need_high: batch_data["high_masks"] = batch_data["masks"] > self.high_score_thresh batch_data["masks"] = batch_data["masks"] > self.predictor.model.mask_threshold batch_data["boxes"] = batched_mask_to_box(batch_data["masks"]) keep_mask = ~is_box_near_crop_edge(batch_data["boxes"], [0, 0, orig_w, orig_h], [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): batch_data.filter(keep_mask) # Compress to RLE batch_data["rles"] = mask_to_rle_pytorch(batch_data["masks"]) data.cat(batch_data) del batch_data if need_high: high_masks = data["high_masks"] or_results = torch.zeros([high_masks.shape[1], high_masks.shape[2]]).to(high_masks.device) for mask in high_masks: or_results = torch.logical_or(or_results, mask) del data["high_masks"] or_results = or_results.permute(1, 0) del data['masks'] return data, or_results else: del data['masks'] return data @torch.no_grad() def reset_image(self): self.predictor.reset_image() @torch.no_grad() def post_process(self, image: np.ndarray, data: MaskData) -> List[Dict[str, Any]]: orig_size = image.shape[:2] orig_h, orig_w = orig_size keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: data = self.postprocess_small_regions( data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": data["segmentations"] = [coco_encode_rle(rle) for rle in data["rles"]] elif self.output_mode == "binary_mask": data["segmentations"] = [rle_to_mask(rle) for rle in data["rles"]] else: data["segmentations"] = data["rles"] # Write mask records curr_anns = [] for idx in range(len(data["segmentations"])): ann = { "segmentation": data["segmentations"][idx], "area": area_from_rle(data["rles"][idx]),
# Copyright 2023 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ class SamHierarchicalMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, high_score_thresh: float = 8.5, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. high_score_thresh (float): A filtering threshold in [-inf,inf], to find out the unmasked area for the next generation. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_side = points_per_side self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.high_score_thresh = high_score_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode def set_point_grids(self, point_grids): self.point_grids = point_grids def set_points_per_side(self, points_per_side): self.point_grids = build_all_layer_point_grids( points_per_side, 0, 1, ) @torch.no_grad() def set_image(self, image: np.ndarray) -> MaskData: # Crop the image and calculate embeddings self.predictor.set_image(image) @torch.no_grad() def hierarchical_generate(self, image: np.ndarray) -> List[Dict[str, Any]]: self.set_image(image) self.set_points_per_side(self.points_per_side // 4) ori_masks, or_results = self.generate(image, True) ih, iw, _ = image.shape hstride = ih // self.points_per_side wstride = iw // self.points_per_side new_points = [] pass_counter = 0 full_point_grids = np.array(self.point_grids) for mask in range(full_point_grids.shape[1]): point_coords = [full_point_grids[0, mask, 0] * iw, full_point_grids[0, mask, 1] * ih] for sy in [-1, 0, 1]: for sx in [-1, 0, 1]: if (sy == 0 and sx == 0) or or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * sx)]: continue new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * sx) / ih]) if point_coords[0] + wstride * 2 < iw: for sx in [-1, 0, 1]: if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * sx)]: continue new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * sx) / ih]) if point_coords[1] + hstride * 2 < ih: for sy in [-1, 0, 1]: if or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * 2)]: continue new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * 2) / ih]) if point_coords[0] + wstride * 2 < iw and point_coords[1] + hstride * 2 < ih: if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * 2)]: continue new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * 2) / ih]) self.set_point_grids([np.array(new_points)]) new_masks = self.generate(image, False) new_masks.cat(ori_masks) new_masks = self.post_process(image, new_masks) return new_masks @torch.no_grad() def generate(self, image: np.ndarray, need_high: bool) -> MaskData: orig_size = image.shape[:2] # Get points for this crop points_scale = np.array(orig_size)[None, ::-1] points_for_image = self.point_grids[0] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, orig_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], return_logits=True, ) # Serialize predictions and store in MaskData batch_data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks if self.pred_iou_thresh > 0.0: keep_mask = batch_data["iou_preds"] > self.pred_iou_thresh batch_data.filter(keep_mask) # Calculate stability score batch_data["stability_score"] = calculate_stability_score( batch_data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = batch_data["stability_score"] >= self.stability_score_thresh batch_data.filter(keep_mask) if need_high: batch_data["high_masks"] = batch_data["masks"] > self.high_score_thresh batch_data["masks"] = batch_data["masks"] > self.predictor.model.mask_threshold batch_data["boxes"] = batched_mask_to_box(batch_data["masks"]) keep_mask = ~is_box_near_crop_edge(batch_data["boxes"], [0, 0, orig_w, orig_h], [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): batch_data.filter(keep_mask) # Compress to RLE batch_data["rles"] = mask_to_rle_pytorch(batch_data["masks"]) data.cat(batch_data) del batch_data if need_high: high_masks = data["high_masks"] or_results = torch.zeros([high_masks.shape[1], high_masks.shape[2]]).to(high_masks.device) for mask in high_masks: or_results = torch.logical_or(or_results, mask) del data["high_masks"] or_results = or_results.permute(1, 0) del data['masks'] return data, or_results else: del data['masks'] return data @torch.no_grad() def reset_image(self): self.predictor.reset_image() @torch.no_grad() def post_process(self, image: np.ndarray, data: MaskData) -> List[Dict[str, Any]]: orig_size = image.shape[:2] orig_h, orig_w = orig_size keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: data = self.postprocess_small_regions( data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": data["segmentations"] = [coco_encode_rle(rle) for rle in data["rles"]] elif self.output_mode == "binary_mask": data["segmentations"] = [rle_to_mask(rle) for rle in data["rles"]] else: data["segmentations"] = data["rles"] # Write mask records curr_anns = [] for idx in range(len(data["segmentations"])): ann = { "segmentation": data["segmentations"][idx], "area": area_from_rle(data["rles"][idx]),
"bbox": box_xyxy_to_xywh(data["boxes"][idx]).tolist(),
6
2023-12-19 11:25:54+00:00
16k
OPPOMKLab/u-LLaVA
models/segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "models/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\n \"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False\n )\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack(\n [self.preprocess(x[\"image\"]) for x in batched_input], dim=0\n )\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n\n dtype = masks.dtype\n\n masks = F.interpolate(\n masks.float(),\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n # masks = masks.to(dtype)\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(\n masks, original_size, mode=\"bilinear\", align_corners=False\n )\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x" }, { "identifier": "SamPredictor", "path": "models/segment_anything/predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[\n None, :, :, :\n ]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(\n point_coords, dtype=torch.float, device=self.device\n )\n labels_torch = torch.as_tensor(\n point_labels, dtype=torch.int, device=self.device\n )\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(\n mask_input, dtype=torch.float, device=self.device\n )\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(\n low_res_masks, self.input_size, self.original_size\n )\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert (\n self.features is not None\n ), \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "models/segment_anything/utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "models/segment_anything/utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "models/segment_anything/utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "models/segment_anything/utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "models/segment_anything/utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "models/segment_anything/utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "models/segment_anything/utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "models/segment_anything/utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "models/segment_anything/utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "models/segment_anything/utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "models/segment_anything/utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "models/segment_anything/utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "models/segment_anything/utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "models/segment_anything/utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "models/segment_anything/utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "models/segment_anything/utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
from typing import Any, Dict, List, Optional, Tuple from torchvision.ops.boxes import batched_nms, box_area # type: ignore from .modeling import Sam from .predictor import SamPredictor from .utils.amg import (MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points) from pycocotools import \ mask as mask_utils # type: ignore # noqa: F401 import numpy as np import torch import cv2 # type: ignore # noqa: F401
11,169
# Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch( points, cropped_im_size, crop_box, orig_size ) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones( in_points.shape[0], dtype=torch.int, device=in_points.device ) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset, ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge( data["boxes"], crop_box, [0, 0, orig_w, orig_h] ) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) data["rles"] = mask_to_rle_pytorch(data["masks"]) del data["masks"] return data @staticmethod def postprocess_small_regions( mask_data: MaskData, min_area: int, nms_thresh: float ) -> MaskData: """ Removes small disconnected regions and holes in masks, then reruns box NMS to remove any new duplicates. Edits mask_data in place. Requires open-cv as a dependency. """ if len(mask_data["rles"]) == 0: return mask_data # Filter small disconnected regions and holes new_masks = [] scores = [] for rle in mask_data["rles"]: mask = rle_to_mask(rle)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [ coco_encode_rle(rle) for rle in mask_data["rles"] ] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch( points, cropped_im_size, crop_box, orig_size ) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones( in_points.shape[0], dtype=torch.int, device=in_points.device ) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset, ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge( data["boxes"], crop_box, [0, 0, orig_w, orig_h] ) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) data["rles"] = mask_to_rle_pytorch(data["masks"]) del data["masks"] return data @staticmethod def postprocess_small_regions( mask_data: MaskData, min_area: int, nms_thresh: float ) -> MaskData: """ Removes small disconnected regions and holes in masks, then reruns box NMS to remove any new duplicates. Edits mask_data in place. Requires open-cv as a dependency. """ if len(mask_data["rles"]) == 0: return mask_data # Filter small disconnected regions and holes new_masks = [] scores = [] for rle in mask_data["rles"]: mask = rle_to_mask(rle)
mask, changed = remove_small_regions(mask, min_area, mode="holes")
13
2023-12-21 08:10:23+00:00
16k
chinhsuanwu/ifusion
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config, **kwargs):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**kwargs, **config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n # iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n # for i, step in enumerate(iterator):\n for i, step in enumerate(time_range):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "ldm/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities import rank_zero_only from omegaconf import ListConfig from ldm.util import ( log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config, ) from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import ( normal_kl, DiagonalGaussianDistribution, ) from ldm.models.autoencoder import ( VQModelInterface, IdentityFirstStage, AutoencoderKL, ) from ldm.modules.diffusionmodules.util import ( make_beta_schedule, extract_into_tensor, noise_like, ) from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.attention import CrossAttention
11,978
if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, ) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if ( quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image_target", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") elif self.loss_type == "smooth_l1": if mean: loss = torch.nn.functional.smooth_l1_loss(target, pred) else: loss = torch.nn.functional.smooth_l1_loss( target, pred, reduction="none" ) else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image_cond", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize, ) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params["original_image_size"] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df ) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) output_list = [ self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, step_ratio=None, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c, step_ratio=step_ratio) return loss def forward(self, x, c, step_ratio=None, *args, **kwargs): if step_ratio is not None: t = np.round((1 - step_ratio) * self.num_timesteps).clip(0, self.num_timesteps - 1) t = torch.full((x.shape[0],), t, dtype=torch.long, device=self.device) else: t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() if self.model.conditioning_key is not None: assert c is not None # if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = ( "c_concat" if self.model.conditioning_key == "concat" else "c_crossattn" ) cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold( x_noisy, ks, stride ) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if ( self.cond_stage_key in ["image", "LR_image", "segmentation", "bbox_img"] and self.model.conditioning_key ): # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert len(c) == 1 # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view( (c.shape[0], -1, ks[0], ks[1], c.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == "coordinates_bbox": assert ( "original_image_size" in self.split_input_params ), "BoudingBoxRescaling is missing original_image_size" # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params["original_image_size"] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ ( rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h, ) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ ( x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h, ) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device ) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) # cut tknzd crop position from conditioning assert isinstance(cond, dict), "cond must be dict to be fed into model" cut_cond = cond["c_crossattn"][0][..., :-2].to(self.device) adapted_cond = torch.stack( [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd] ) adapted_cond = rearrange(adapted_cond, "l b n -> (l b) n") adapted_cond = self.get_learned_conditioning(adapted_cond) adapted_cond = rearrange( adapted_cond, "(l b) n d -> l b n d", l=z.shape[-1] ) cond_list = [{"c_crossattn": [e]} for e in adapted_cond] else: cond_list = [ cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 ) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = "train" if self.training else "val" if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f"{prefix}/loss_simple": loss_simple.mean()}) if self.logvar.device != self.device: self.logvar = self.logvar.to(self.device) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f"{prefix}/loss_gamma": loss.mean()}) loss_dict.update({"logvar": self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f"{prefix}/loss_vlb": loss_vlb}) loss += self.original_elbo_weight * loss_vlb loss_dict.update({f"{prefix}/loss": loss}) return loss, loss_dict def p_mean_variance( self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None, ): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score( self, model_out, x, t, c, **corrector_kwargs ) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1.0, 1.0) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample( self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, ): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance( x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * ( 0.5 * model_log_variance ).exp() * noise, logits.argmax(dim=1) if return_x0: return ( model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0, ) else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising( self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm( reversed(range(0, timesteps)), desc="Progressive Generation", total=timesteps, ) if verbose else reversed(range(0, timesteps)) ) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop( self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm(reversed(range(0, timesteps)), desc="Sampling t", total=timesteps) if verbose else reversed(range(0, timesteps)) ) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, ) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample( self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs, ): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, ) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if ( quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL)
and not isinstance(self.first_stage_model, IdentityFirstStage)
12
2023-12-17 12:45:38+00:00
16k
wangzhecheng/SkyScript
customized_train_and_test.py
[ { "identifier": "create_model_and_transforms", "path": "src/open_clip/factory.py", "snippet": "def create_model_and_transforms(\n model_name: str,\n pretrained: Optional[str] = None,\n precision: str = 'fp32',\n device: Union[str, torch.device] = 'cpu',\n jit: bool = False,\n force_quick_gelu: bool = False,\n force_custom_text: bool = False,\n force_patch_dropout: Optional[float] = None,\n force_image_size: Optional[Union[int, Tuple[int, int]]] = None,\n pretrained_image: bool = False,\n pretrained_hf: bool = True,\n image_mean: Optional[Tuple[float, ...]] = None,\n image_std: Optional[Tuple[float, ...]] = None,\n aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,\n cache_dir: Optional[str] = None,\n output_dict: Optional[bool] = None,\n):\n model = create_model(\n model_name,\n pretrained,\n precision=precision,\n device=device,\n jit=jit,\n force_quick_gelu=force_quick_gelu,\n force_custom_text=force_custom_text,\n force_patch_dropout=force_patch_dropout,\n force_image_size=force_image_size,\n pretrained_image=pretrained_image,\n pretrained_hf=pretrained_hf,\n cache_dir=cache_dir,\n output_dict=output_dict,\n )\n\n image_mean = image_mean or getattr(model.visual, 'image_mean', None)\n image_std = image_std or getattr(model.visual, 'image_std', None)\n preprocess_train = image_transform(\n model.visual.image_size,\n is_train=True,\n mean=image_mean,\n std=image_std,\n aug_cfg=aug_cfg,\n )\n preprocess_val = image_transform(\n model.visual.image_size,\n is_train=False,\n mean=image_mean,\n std=image_std,\n )\n\n return model, preprocess_train, preprocess_val" }, { "identifier": "get_tokenizer", "path": "src/open_clip/factory.py", "snippet": "def get_tokenizer(model_name):\n if model_name.startswith(HF_HUB_PREFIX):\n tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):])\n else:\n config = get_model_config(model_name)\n tokenizer = HFTokenizer(\n config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize\n return tokenizer" }, { "identifier": "create_loss", "path": "src/open_clip/factory.py", "snippet": "def create_loss(args):\n if args.distill:\n return DistillClipLoss(\n local_loss=args.local_loss,\n gather_with_grad=args.gather_with_grad,\n cache_labels=True,\n rank=args.rank,\n world_size=args.world_size,\n use_horovod=args.horovod,\n )\n elif \"coca\" in args.model.lower():\n return CoCaLoss(\n caption_loss_weight=args.coca_caption_loss_weight,\n clip_loss_weight=args.coca_contrastive_loss_weight,\n local_loss=args.local_loss,\n gather_with_grad=args.gather_with_grad,\n cache_labels=True,\n rank=args.rank,\n world_size=args.world_size,\n use_horovod=args.horovod,\n )\n return ClipLoss(\n local_loss=args.local_loss,\n gather_with_grad=args.gather_with_grad,\n cache_labels=True,\n rank=args.rank,\n world_size=args.world_size,\n use_horovod=args.horovod,\n )" }, { "identifier": "trace_model", "path": "src/open_clip/model.py", "snippet": "def trace_model(model, batch_size=256, device=torch.device('cpu')):\n model.eval()\n image_size = model.visual.image_size\n example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)\n example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)\n model = torch.jit.trace_module(\n model,\n inputs=dict(\n forward=(example_images, example_text),\n encode_text=(example_text,),\n encode_image=(example_images,)\n ))\n model.visual.image_size = image_size\n return model" }, { "identifier": "get_data", "path": "src/training/data.py", "snippet": "def get_data(args, preprocess_fns, epoch=0, tokenizer=None):\n preprocess_train, preprocess_val = preprocess_fns\n data = {}\n\n if args.train_data or args.dataset_type == \"synthetic\":\n data[\"train\"] = get_dataset_fn(args.train_data, args.dataset_type)(\n args, preprocess_train, is_train=True, epoch=epoch, tokenizer=tokenizer)\n\n if args.val_data:\n data[\"val\"] = get_dataset_fn(args.val_data, args.dataset_type)(\n args, preprocess_val, is_train=False, tokenizer=tokenizer)\n\n if args.imagenet_val is not None:\n data[\"imagenet-val\"] = get_imagenet(args, preprocess_fns, \"val\")\n\n if args.imagenet_v2 is not None:\n data[\"imagenet-v2\"] = get_imagenet(args, preprocess_fns, \"v2\")\n\n return data" }, { "identifier": "is_master", "path": "src/training/distributed.py", "snippet": "def is_master(args, local=False):\n return is_local_master(args) if local else is_global_master(args)" }, { "identifier": "init_distributed_device", "path": "src/training/distributed.py", "snippet": "def init_distributed_device(args):\n # Distributed training = training on more than one GPU.\n # Works in both single and multi-node scenarios.\n args.distributed = False\n args.world_size = 1\n args.rank = 0 # global rank\n args.local_rank = 0\n if args.horovod:\n assert hvd is not None, \"Horovod is not installed\"\n hvd.init()\n args.local_rank = int(hvd.local_rank())\n args.rank = hvd.rank()\n args.world_size = hvd.size()\n args.distributed = True\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n os.environ['RANK'] = str(args.rank)\n os.environ['WORLD_SIZE'] = str(args.world_size)\n elif is_using_distributed():\n if 'SLURM_PROCID' in os.environ:\n # DDP via SLURM\n args.local_rank, args.rank, args.world_size = world_info_from_env()\n # SLURM var -> torch.distributed vars in case needed\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n os.environ['RANK'] = str(args.rank)\n os.environ['WORLD_SIZE'] = str(args.world_size)\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n )\n else:\n # DDP via torchrun, torch.distributed.launch\n args.local_rank, _, _ = world_info_from_env()\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url)\n args.world_size = torch.distributed.get_world_size()\n args.rank = torch.distributed.get_rank()\n args.distributed = True\n\n if torch.cuda.is_available():\n if args.distributed and not args.no_set_device_rank:\n device = 'cuda:%d' % args.local_rank\n else:\n device = 'cuda:0'\n torch.cuda.set_device(device)\n else:\n device = 'cpu'\n args.device = device\n device = torch.device(device)\n return device" }, { "identifier": "broadcast_object", "path": "src/training/distributed.py", "snippet": "def broadcast_object(args, obj, src=0):\n # broadcast a pickle-able python object from rank-0 to all ranks\n if args.horovod:\n return hvd.broadcast_object(obj, root_rank=src)\n else:\n if args.rank == src:\n objects = [obj]\n else:\n objects = [None]\n dist.broadcast_object_list(objects, src=src)\n return objects[0]" }, { "identifier": "setup_logging", "path": "src/training/logger.py", "snippet": "def setup_logging(log_file, level, include_host=False):\n if include_host:\n import socket\n hostname = socket.gethostname()\n formatter = logging.Formatter(\n f'%(asctime)s | {hostname} | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')\n else:\n formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')\n\n logging.root.setLevel(level)\n loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]\n for logger in loggers:\n logger.setLevel(level)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logging.root.addHandler(stream_handler)\n\n if log_file:\n file_handler = logging.FileHandler(filename=log_file)\n file_handler.setFormatter(formatter)\n logging.root.addHandler(file_handler)" }, { "identifier": "cosine_lr", "path": "src/training/scheduler.py", "snippet": "def cosine_lr(optimizer, base_lr, warmup_length, steps):\n def _lr_adjuster(step):\n if step < warmup_length:\n lr = _warmup_lr(base_lr, warmup_length, step)\n else:\n e = step - warmup_length\n es = steps - warmup_length\n lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr\n assign_learning_rate(optimizer, lr)\n return lr\n return _lr_adjuster" }, { "identifier": "const_lr", "path": "src/training/scheduler.py", "snippet": "def const_lr(optimizer, base_lr, warmup_length, steps):\n def _lr_adjuster(step):\n if step < warmup_length:\n lr = _warmup_lr(base_lr, warmup_length, step)\n else:\n lr = base_lr\n assign_learning_rate(optimizer, lr)\n return lr\n return _lr_adjuster" }, { "identifier": "const_lr_cooldown", "path": "src/training/scheduler.py", "snippet": "def const_lr_cooldown(optimizer, base_lr, warmup_length, steps, cooldown_steps, cooldown_power=1.0, cooldown_end_lr=0.):\n def _lr_adjuster(step):\n start_cooldown_step = steps - cooldown_steps\n if step < warmup_length:\n lr = _warmup_lr(base_lr, warmup_length, step)\n else:\n if step < start_cooldown_step:\n lr = base_lr\n else:\n e = step - start_cooldown_step\n es = steps - start_cooldown_step\n # linear decay if power == 1; polynomial decay otherwise;\n decay = (1 - (e/es)) ** cooldown_power\n lr = decay * (base_lr - cooldown_end_lr) + cooldown_end_lr\n assign_learning_rate(optimizer, lr)\n return lr\n return _lr_adjuster" }, { "identifier": "train_one_epoch", "path": "src/training/train.py", "snippet": "def train_one_epoch(model, data, loss, epoch, optimizer, scaler, scheduler, dist_model, args, tb_writer=None):\n device = torch.device(args.device)\n autocast = get_autocast(args.precision)\n input_dtype = get_input_dtype(args.precision)\n\n\n model.train()\n if args.distill:\n dist_model.eval()\n\n data['train'].set_epoch(epoch) # set epoch in process safe manner via sampler or shared_epoch\n dataloader = data['train'].dataloader\n num_batches_per_epoch = dataloader.num_batches // args.accum_freq\n sample_digits = math.ceil(math.log(dataloader.num_samples + 1, 10))\n\n if args.accum_freq > 1:\n accum_images, accum_texts, accum_features = [], [], {}\n\n losses_m = {}\n batch_time_m = AverageMeter()\n data_time_m = AverageMeter()\n end = time.time()\n for i, batch in enumerate(dataloader):\n i_accum = i // args.accum_freq\n step = num_batches_per_epoch * epoch + i_accum\n\n if not args.skip_scheduler:\n scheduler(step)\n\n images, texts = batch\n images = images.to(device=device, dtype=input_dtype, non_blocking=True)\n texts = texts.to(device=device, non_blocking=True)\n\n data_time_m.update(time.time() - end)\n optimizer.zero_grad()\n\n if args.accum_freq == 1:\n with autocast():\n model_out = model(images, texts)\n logit_scale = model_out[\"logit_scale\"]\n if args.distill:\n with torch.no_grad():\n dist_model_out = dist_model(images, texts)\n model_out.update({f'dist_{k}' : v for k, v in dist_model_out.items()})\n losses = loss(**model_out, output_dict=True)\n\n total_loss = sum(losses.values())\n losses[\"loss\"] = total_loss\n\n backward(total_loss, scaler)\n else:\n # First, cache the features without any gradient tracking.\n with torch.no_grad():\n with autocast():\n model_out = model(images, texts)\n model_out.pop(\"logit_scale\")\n for key, val in model_out.items():\n if key in accum_features:\n accum_features[key].append(val)\n else:\n accum_features[key] = [val]\n\n accum_images.append(images)\n accum_texts.append(texts)\n\n # If (i + 1) % accum_freq is not zero, move on to the next batch.\n if ((i + 1) % args.accum_freq) > 0:\n # FIXME this makes data time logging unreliable when accumulating\n continue\n\n # Now, ready to take gradients for the last accum_freq batches.\n # Re-do the forward pass for those batches, and use the cached features from the other batches as negatives.\n # Call backwards each time, but only step optimizer at the end.\n optimizer.zero_grad()\n for j in range(args.accum_freq):\n images = accum_images[j]\n texts = accum_texts[j]\n with autocast():\n model_out = model(images, texts)\n logit_scale = model_out.pop(\"logit_scale\")\n inputs = {}\n for key, val in accum_features.items():\n accumulated = accum_features[key]\n inputs[key] = torch.cat(accumulated[:j] + [model_out[key]] + accumulated[j + 1:])\n losses = loss(**inputs, logit_scale=logit_scale, output_dict=True)\n del inputs\n total_loss = sum(losses.values())\n losses[\"loss\"] = total_loss\n backward(total_loss, scaler)\n\n if scaler is not None:\n if args.horovod:\n optimizer.synchronize()\n scaler.unscale_(optimizer)\n if args.grad_clip_norm is not None:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)\n with optimizer.skip_synchronize():\n scaler.step(optimizer)\n else:\n if args.grad_clip_norm is not None:\n scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)\n scaler.step(optimizer)\n scaler.update()\n else:\n if args.grad_clip_norm is not None:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)\n optimizer.step()\n\n # reset gradient accum, if enabled\n if args.accum_freq > 1:\n accum_images, accum_texts, accum_features = [], [], {}\n\n # Note: we clamp to 4.6052 = ln(100), as in the original paper.\n with torch.no_grad():\n unwrap_model(model).logit_scale.clamp_(0, math.log(100))\n\n batch_time_m.update(time.time() - end)\n end = time.time()\n batch_count = i_accum + 1\n if is_master(args) and (i_accum % args.log_every_n_steps == 0 or batch_count == num_batches_per_epoch):\n batch_size = len(images)\n num_samples = batch_count * batch_size * args.accum_freq * args.world_size\n samples_per_epoch = dataloader.num_samples\n percent_complete = 100.0 * batch_count / num_batches_per_epoch\n\n # NOTE loss is coarsely sampled, just master node and per log update\n for key, val in losses.items():\n if key not in losses_m:\n losses_m[key] = AverageMeter()\n losses_m[key].update(val.item(), batch_size)\n\n logit_scale_scalar = logit_scale.item()\n loss_log = \" \".join(\n [\n f\"{loss_name.capitalize()}: {loss_m.val:#.5g} ({loss_m.avg:#.5g})\" \n for loss_name, loss_m in losses_m.items()\n ]\n )\n samples_per_second = args.accum_freq * args.batch_size * args.world_size / batch_time_m.val\n samples_per_second_per_gpu = args.accum_freq * args.batch_size / batch_time_m.val\n logging.info(\n f\"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] \"\n f\"Data (t): {data_time_m.avg:.3f} \"\n f\"Batch (t): {batch_time_m.avg:.3f}, {samples_per_second:#g}/s, {samples_per_second_per_gpu:#g}/s/gpu \"\n f\"LR: {optimizer.param_groups[0]['lr']:5f} \"\n f\"Logit Scale: {logit_scale_scalar:.3f} \" + loss_log\n )\n\n # Save train loss / etc. Using non avg meter values as loggers have their own smoothing\n log_data = {\n \"data_time\": data_time_m.val,\n \"batch_time\": batch_time_m.val,\n \"samples_per_second\": samples_per_second,\n \"samples_per_second_per_gpu\": samples_per_second_per_gpu,\n \"scale\": logit_scale_scalar,\n \"lr\": optimizer.param_groups[0][\"lr\"]\n } \n log_data.update({name:val.val for name,val in losses_m.items()})\n\n for name, val in log_data.items():\n name = \"train/\" + name\n if tb_writer is not None:\n tb_writer.add_scalar(name, val, step)\n if args.wandb:\n assert wandb is not None, 'Please install wandb.'\n wandb.log({name: val, 'step': step})\n\n # resetting batch / data time meters per log window\n batch_time_m.reset()\n data_time_m.reset()\n # end for" }, { "identifier": "evaluate", "path": "src/training/train.py", "snippet": "def evaluate(model, data, epoch, args, tb_writer=None):\n metrics = {}\n if not is_master(args):\n return metrics\n device = torch.device(args.device)\n model.eval()\n\n zero_shot_metrics = zero_shot_eval(model, data, epoch, args)\n metrics.update(zero_shot_metrics)\n\n autocast = get_autocast(args.precision)\n input_dtype = get_input_dtype(args.precision)\n\n if 'val' in data and (args.val_frequency and ((epoch % args.val_frequency) == 0 or epoch == args.epochs)):\n dataloader = data['val'].dataloader\n num_samples = 0\n samples_per_val = dataloader.num_samples\n\n # FIXME this does not scale past small eval datasets\n # all_image_features @ all_text_features will blow up memory and compute very quickly\n cumulative_loss = 0.0\n cumulative_gen_loss = 0.0\n all_image_features, all_text_features = [], []\n with torch.no_grad():\n for i, batch in enumerate(dataloader):\n images, texts = batch\n images = images.to(device=device, dtype=input_dtype, non_blocking=True)\n texts = texts.to(device=device, non_blocking=True)\n\n with autocast():\n model_out = model(images, texts)\n image_features = model_out[\"image_features\"]\n text_features = model_out[\"text_features\"]\n logit_scale = model_out[\"logit_scale\"]\n # features are accumulated in CPU tensors, otherwise GPU memory exhausted quickly\n # however, system RAM is easily exceeded and compute time becomes problematic\n all_image_features.append(image_features.cpu())\n all_text_features.append(text_features.cpu())\n logit_scale = logit_scale.mean()\n logits_per_image = logit_scale * image_features @ text_features.t()\n logits_per_text = logits_per_image.t()\n\n batch_size = images.shape[0]\n labels = torch.arange(batch_size, device=device).long()\n total_loss = (\n F.cross_entropy(logits_per_image, labels) +\n F.cross_entropy(logits_per_text, labels)\n ) / 2\n\n gen_loss = maybe_compute_generative_loss(model_out)\n\n cumulative_loss += total_loss * batch_size\n num_samples += batch_size\n if is_master(args) and (i % 100) == 0:\n logging.info(\n f\"Eval Epoch: {epoch} [{num_samples} / {samples_per_val}]\\t\"\n f\"Clip Loss: {cumulative_loss / num_samples:.6f}\\t\")\n\n if gen_loss is not None:\n cumulative_gen_loss += gen_loss * batch_size\n logging.info(\n f\"Generative Loss: {cumulative_gen_loss / num_samples:.6f}\\t\")\n\n val_metrics = get_clip_metrics(\n image_features=torch.cat(all_image_features),\n text_features=torch.cat(all_text_features),\n logit_scale=logit_scale.cpu(),\n )\n loss = cumulative_loss / num_samples\n metrics.update(\n {**val_metrics, \"clip_val_loss\": loss.item(), \"epoch\": epoch, \"num_samples\": num_samples}\n )\n if gen_loss is not None:\n gen_loss = cumulative_gen_loss / num_samples\n metrics.update({\"val_generative_loss\": gen_loss.item()})\n\n if not metrics:\n return metrics\n\n logging.info(\n f\"Eval Epoch: {epoch} \"\n + \"\\t\".join([f\"{k}: {round(v, 4):.4f}\" for k, v in metrics.items()])\n )\n\n if args.save_logs:\n for name, val in metrics.items():\n if tb_writer is not None:\n tb_writer.add_scalar(f\"val/{name}\", val, epoch)\n\n with open(os.path.join(args.checkpoint_path, \"results.jsonl\"), \"a+\") as f:\n f.write(json.dumps(metrics))\n f.write(\"\\n\")\n\n if args.wandb:\n assert wandb is not None, 'Please install wandb.'\n for name, val in metrics.items():\n wandb.log({f\"val/{name}\": val, 'epoch': epoch})\n\n return metrics" }, { "identifier": "pt_load", "path": "src/training/file_utils.py", "snippet": "def pt_load(file_path, map_location=None):\n if file_path.startswith('s3'):\n logging.info('Loading remote checkpoint, which may take a bit.')\n of = fsspec.open(file_path, \"rb\")\n with of as f:\n out = torch.load(f, map_location=map_location)\n return out" }, { "identifier": "check_exists", "path": "src/training/file_utils.py", "snippet": "def check_exists(file_path):\n try:\n with fsspec.open(file_path):\n pass\n except FileNotFoundError:\n return False\n return True" }, { "identifier": "start_sync_process", "path": "src/training/file_utils.py", "snippet": "def start_sync_process(sync_every, local_dir, remote_dir, protocol):\n p = multiprocessing.Process(target=keep_running_remote_sync, args=(sync_every, local_dir, remote_dir, protocol))\n return p" }, { "identifier": "remote_sync", "path": "src/training/file_utils.py", "snippet": "def remote_sync(local_dir, remote_dir, protocol):\n logging.info('Starting remote sync.')\n if protocol == 's3':\n return remote_sync_s3(local_dir, remote_dir)\n elif protocol == 'fsspec':\n return remote_sync_fsspec(local_dir, remote_dir)\n else:\n logging.error('Remote protocol not known')\n return False" }, { "identifier": "natural_key", "path": "src/training/main.py", "snippet": "def natural_key(string_):\n \"\"\"See http://www.codinghorror.com/blog/archives/001018.html\"\"\"\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', string_.lower())]" }, { "identifier": "get_latest_checkpoint", "path": "src/training/main.py", "snippet": "def get_latest_checkpoint(path: str, remote : bool):\n # as writen, this glob recurses, so can pick up checkpoints across multiple sub-folders\n if remote:\n result = subprocess.run([\"aws\", \"s3\", \"ls\", path + \"/\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n print(result)\n if result.returncode == 1:\n return None\n checkpoints = [os.path.join(path, x.split(' ')[-1]) for x in result.stdout.decode().split('\\n')[:-1]]\n else:\n checkpoints = glob.glob(path + '**/*.pt', recursive=True)\n if checkpoints:\n checkpoints = sorted(checkpoints, key=natural_key)\n return checkpoints[-1]\n return None" }, { "identifier": "copy_codebase", "path": "src/training/main.py", "snippet": "def copy_codebase(args):\n from shutil import copytree, ignore_patterns\n new_code_path = os.path.join(args.logs, args.name, \"code\")\n if os.path.exists(new_code_path):\n print(\n f\"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment.\"\n )\n return -1\n print(f\"Copying codebase to {new_code_path}\")\n current_code_path = os.path.realpath(__file__)\n for _ in range(3):\n current_code_path = os.path.dirname(current_code_path)\n copytree(current_code_path, new_code_path, ignore=ignore_patterns('log', 'logs', 'wandb'))\n print(\"Done copying code.\")\n return 1" }, { "identifier": "parse_args", "path": "params.py", "snippet": "def parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--root-data-dir\",\n type=str,\n default=None,\n help=\"Root directory to datasets\",\n )\n parser.add_argument(\n \"--train-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with training data. When using webdataset, multiple datasources can be combined using the `::` separator.\",\n )\n parser.add_argument(\n \"--train-data-upsampling-factors\",\n type=str,\n default=None,\n help=(\n \"When using multiple data sources with webdataset and sampling with replacement, this can be used to upsample specific data sources. \"\n \"Similar to --train-data, this should be a string with as many numbers as there are data sources, separated by `::` (e.g. 1::2::0.5) \"\n \"By default, datapoints are sampled uniformly regardless of the dataset sizes.\"\n )\n )\n parser.add_argument(\n \"--val-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with validation data\",\n )\n parser.add_argument(\n \"--train-num-samples\",\n type=int,\n default=None,\n help=\"Number of samples in dataset. Required for webdataset if not available in info file.\",\n )\n parser.add_argument(\n \"--val-num-samples\",\n type=int,\n default=None,\n help=\"Number of samples in dataset. Useful for webdataset if not available in info file.\",\n )\n parser.add_argument(\n \"--dataset-type\",\n choices=[\"webdataset\", \"csv\", \"synthetic\", \"auto\"],\n default=\"auto\",\n help=\"Which type of dataset to process.\"\n )\n parser.add_argument(\n \"--dataset-resampled\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use sampling with replacement for webdataset shard selection.\"\n )\n parser.add_argument(\n \"--csv-separator\",\n type=str,\n default=\"\\t\",\n help=\"For csv-like datasets, which separator to use.\"\n )\n parser.add_argument(\n \"--csv-img-key\",\n type=str,\n default=\"filepath\",\n help=\"For csv-like datasets, the name of the key for the image paths.\"\n )\n parser.add_argument(\n \"--csv-caption-key\",\n type=str,\n default=\"title\",\n help=\"For csv-like datasets, the name of the key for the captions.\"\n )\n parser.add_argument(\n \"--imagenet-val\",\n type=str,\n default=None,\n help=\"Path to imagenet val set for conducting zero shot evaluation.\",\n )\n parser.add_argument(\n \"--imagenet-v2\",\n type=str,\n default=None,\n help=\"Path to imagenet v2 for conducting zero shot evaluation.\",\n )\n parser.add_argument(\n \"--logs\",\n type=str,\n default=\"./logs/\",\n help=\"Where to store tensorboard logs. Use None to avoid storing logs.\",\n )\n parser.add_argument(\n \"--log-local\",\n action=\"store_true\",\n default=False,\n help=\"log files on local master, otherwise global master only.\",\n )\n parser.add_argument(\n \"--name\",\n type=str,\n default=None,\n help=\"Optional identifier for the experiment when storing logs. Otherwise use current time.\",\n )\n parser.add_argument(\n \"--workers\", type=int, default=1, help=\"Number of dataloader workers per GPU.\"\n )\n parser.add_argument(\n \"--batch-size\", type=int, default=64, help=\"Batch size per GPU.\"\n )\n parser.add_argument(\n \"--epochs\", type=int, default=32, help=\"Number of epochs to train for.\"\n )\n parser.add_argument(\n \"--epochs-cooldown\", type=int, default=None,\n help=\"When scheduler w/ cooldown used, perform cooldown from total_epochs - cooldown_epochs onwards.\"\n )\n parser.add_argument(\"--lr\", type=float, default=None, help=\"Learning rate.\")\n parser.add_argument(\"--beta1\", type=float, default=None, help=\"Adam beta 1.\")\n parser.add_argument(\"--beta2\", type=float, default=None, help=\"Adam beta 2.\")\n parser.add_argument(\"--eps\", type=float, default=None, help=\"Adam epsilon.\")\n parser.add_argument(\"--wd\", type=float, default=0.2, help=\"Weight decay.\")\n parser.add_argument(\n \"--warmup\", type=int, default=10000, help=\"Number of steps to warmup for.\"\n )\n parser.add_argument(\n \"--use-bn-sync\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use batch norm sync.\")\n parser.add_argument(\n \"--skip-scheduler\",\n action=\"store_true\",\n default=False,\n help=\"Use this flag to skip the learning rate decay.\",\n )\n parser.add_argument(\n \"--lr-scheduler\",\n type=str,\n default='cosine',\n help=\"LR scheduler. One of: 'cosine', 'const' (constant), 'const-cooldown' (constant w/ cooldown). Default: cosine\",\n )\n parser.add_argument(\n \"--lr-cooldown-end\", type=float, default=0.0,\n help=\"End learning rate for cooldown schedule. Default: 0\"\n )\n parser.add_argument(\n \"--lr-cooldown-power\", type=float, default=1.0,\n help=\"Power for polynomial cooldown schedule. Default: 1.0 (linear decay)\"\n )\n parser.add_argument(\n \"--save-frequency\", type=int, default=1, help=\"How often to save checkpoints.\"\n )\n parser.add_argument(\n \"--save-most-recent\",\n action=\"store_true\",\n default=False,\n help=\"Always save the most recent model trained to epoch_latest.pt.\",\n )\n parser.add_argument(\n \"--zeroshot-frequency\", type=int, default=2, help=\"How often to run zero shot.\"\n )\n parser.add_argument(\n \"--val-frequency\", type=int, default=1, help=\"How often to run evaluation with val data.\"\n )\n parser.add_argument(\n \"--resume\",\n default=None,\n type=str,\n help=\"path to latest checkpoint (default: none)\",\n )\n parser.add_argument(\n \"--precision\",\n choices=[\"amp\", \"amp_bf16\", \"amp_bfloat16\", \"bf16\", \"fp16\", \"fp32\"],\n default=\"amp\",\n help=\"Floating point precision.\"\n )\n parser.add_argument(\n \"--model\",\n type=str,\n default=\"RN50\",\n help=\"Name of the vision backbone to use.\",\n )\n parser.add_argument(\n \"--pretrained\",\n default='',\n type=str,\n help=\"Use a pretrained CLIP model weights with the specified tag or file path.\",\n )\n parser.add_argument(\n \"--pretrained-image\",\n default=False,\n action='store_true',\n help=\"Load imagenet pretrained weights for image tower backbone if available.\",\n )\n parser.add_argument(\n \"--lock-image\",\n default=False,\n action='store_true',\n help=\"Lock full image tower by disabling gradients.\",\n )\n parser.add_argument(\n \"--lock-image-unlocked-groups\",\n type=int,\n default=0,\n help=\"Leave last n image tower layer groups unlocked.\",\n )\n parser.add_argument(\n \"--lock-image-freeze-bn-stats\",\n default=False,\n action='store_true',\n help=\"Freeze BatchNorm running stats in image tower for any locked layers.\",\n )\n parser.add_argument(\n '--image-mean', type=float, nargs='+', default=None, metavar='MEAN',\n help='Override default image mean value of dataset')\n parser.add_argument(\n '--image-std', type=float, nargs='+', default=None, metavar='STD',\n help='Override default image std deviation of of dataset')\n parser.add_argument('--aug-cfg', nargs='*', default={}, action=ParseKwargs)\n parser.add_argument(\n \"--grad-checkpointing\",\n default=False,\n action='store_true',\n help=\"Enable gradient checkpointing.\",\n )\n parser.add_argument(\n \"--local-loss\",\n default=False,\n action=\"store_true\",\n help=\"calculate loss w/ local features @ global (instead of realizing full global @ global matrix)\"\n )\n parser.add_argument(\n \"--gather-with-grad\",\n default=False,\n action=\"store_true\",\n help=\"enable full distributed gradient for feature gather\"\n )\n parser.add_argument(\n '--force-image-size', type=int, nargs='+', default=None,\n help='Override default image size'\n )\n parser.add_argument(\n \"--force-quick-gelu\",\n default=False,\n action='store_true',\n help=\"Force use of QuickGELU activation for non-OpenAI transformer models.\",\n )\n parser.add_argument(\n \"--force-patch-dropout\",\n default=None,\n type=float,\n help=\"Override the patch dropout during training, for fine tuning with no dropout near the end as in the paper\",\n )\n parser.add_argument(\n \"--force-custom-text\",\n default=False,\n action='store_true',\n help=\"Force use of CustomTextCLIP model (separate text-tower).\",\n )\n parser.add_argument(\n \"--torchscript\",\n default=False,\n action='store_true',\n help=\"torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'\",\n )\n parser.add_argument(\n \"--trace\",\n default=False,\n action='store_true',\n help=\"torch.jit.trace the model for inference / eval only\",\n )\n parser.add_argument(\n \"--accum-freq\", type=int, default=1, help=\"Update the model every --acum-freq steps.\"\n )\n # arguments for distributed training\n parser.add_argument(\n \"--dist-url\",\n default=\"env://\",\n type=str,\n help=\"url used to set up distributed training\",\n )\n parser.add_argument(\n \"--dist-backend\", default=\"nccl\", type=str, help=\"distributed backend\"\n )\n parser.add_argument(\n \"--report-to\",\n default='',\n type=str,\n help=\"Options are ['wandb', 'tensorboard', 'wandb,tensorboard']\"\n )\n parser.add_argument(\n \"--wandb-notes\",\n default='',\n type=str,\n help=\"Notes if logging with wandb\"\n )\n parser.add_argument(\n \"--wandb-project-name\",\n type=str,\n default='open-clip',\n help=\"Name of the project if logging with wandb.\",\n )\n parser.add_argument(\n \"--debug\",\n default=False,\n action=\"store_true\",\n help=\"If true, more information is logged.\"\n )\n parser.add_argument(\n \"--copy-codebase\",\n default=False,\n action=\"store_true\",\n help=\"If true, we copy the entire base on the log directory, and execute from there.\"\n )\n parser.add_argument(\n \"--horovod\",\n default=False,\n action=\"store_true\",\n help=\"Use horovod for distributed training.\"\n )\n parser.add_argument(\n \"--ddp-static-graph\",\n default=False,\n action='store_true',\n help=\"Enable static graph optimization for DDP in PyTorch >= 1.11.\",\n )\n parser.add_argument(\n \"--no-set-device-rank\",\n default=False,\n action=\"store_true\",\n help=\"Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).\"\n )\n parser.add_argument(\n \"--seed\", type=int, default=0, help=\"Default random seed.\"\n )\n parser.add_argument(\n \"--grad-clip-norm\", type=float, default=None, help=\"Gradient clip.\"\n )\n parser.add_argument(\n \"--lock-text\",\n default=False,\n action='store_true',\n help=\"Lock full text tower by disabling gradients.\",\n )\n parser.add_argument(\n \"--lock-text-unlocked-layers\",\n type=int,\n default=0,\n help=\"Leave last n image tower layer groups unlocked.\",\n )\n parser.add_argument(\n \"--lock-text-freeze-layer-norm\",\n default=False,\n action='store_true',\n help=\"Freeze BatchNorm running stats in image tower for any locked layers.\",\n )\n parser.add_argument(\n \"--log-every-n-steps\",\n type=int,\n default=100,\n help=\"Log every n steps to tensorboard/console/wandb.\",\n )\n parser.add_argument(\n \"--coca-caption-loss-weight\",\n type=float,\n default=2.0,\n help=\"Weight assigned to caption loss in CoCa.\"\n )\n parser.add_argument(\n \"--coca-contrastive-loss-weight\",\n type=float,\n default=1.0,\n help=\"Weight assigned to contrastive loss when training CoCa.\"\n )\n parser.add_argument(\n \"--remote-sync\",\n type=str,\n default=None,\n help=\"Optinoally sync with a remote path specified by this arg\",\n )\n parser.add_argument(\n \"--remote-sync-frequency\",\n type=int,\n default=300,\n help=\"How frequently to sync to a remote directly if --remote-sync is not None.\",\n )\n parser.add_argument(\n \"--remote-sync-protocol\",\n choices=[\"s3\", \"fsspec\"],\n default=\"s3\",\n help=\"How to do the remote sync backup if --remote-sync is not None.\",\n )\n parser.add_argument(\n \"--delete-previous-checkpoint\",\n default=False,\n action=\"store_true\",\n help=\"If true, delete previous checkpoint after storing a new one.\"\n )\n parser.add_argument(\n \"--distill-model\",\n default=None,\n help='Which model arch to distill from, if any.'\n )\n parser.add_argument(\n \"--distill-pretrained\",\n default=None,\n help='Which pre-trained weights to distill from, if any.'\n )\n # newly added flag for adding random rotation into data augmentation\n parser.add_argument(\n \"--random-rotation\",\n action=\"store_true\",\n default=False,\n help=\"If True, add random rotation into image transform for data augmentation (only for training).\"\n )\n # newly added for testing zero-shot and linear probe classification (custom dataset)\n parser.add_argument(\n \"--datasets-for-testing\",\n nargs='*',\n type=str,\n default=None,\n help=\"A list of names of datasets for testing zero-shot classification testing\",\n )\n parser.add_argument(\n \"--classification-mode\",\n type=str,\n default=\"multiclass\",\n help=\"Choose either binary or multiclass\",\n )\n parser.add_argument(\n \"--test-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with test data (e.g., for testing zero-shot classification)\",\n )\n parser.add_argument(\n \"--classnames\",\n type=str,\n default=None,\n help=\"Path to txt file containing class names\",\n )\n parser.add_argument(\n \"--test-data-name\",\n type=str,\n default=None,\n help=\"The name of the test data (e.g., RSICD, EuroSat)\",\n )\n parser.add_argument(\n \"--csv-class-key\",\n type=str,\n default=\"label\",\n help=\"For csv-like datasets, the name of the key for image labels (for classification).\"\n )\n parser.add_argument(\n \"--csv-actual-label-key\",\n type=str,\n default=\"binary\",\n help=\"If classification_model=binary, then specify the name of the key for actual binary labels (i.e., 0/1).\"\n )\n parser.add_argument(\n \"--alpha\",\n type=float,\n default=None,\n help=\"The regularization multiplier of logistic regression to try for linear probing. If None, do a search.\"\n )\n parser.add_argument(\n \"--samples-per-class\",\n type=str,\n default=None,\n help=\"Numbers of samples per class to train logistic regression for linear probing. If None, use full dataset.\"\n )\n parser.add_argument(\n \"--test-result-save-path\",\n type=str,\n default=None,\n help=\"The path to save test results as a pickle file.\"\n )\n parser.add_argument(\n \"--debugging\",\n action=\"store_true\",\n default=False,\n help=\"Whether to use debugging mode, which will return more information.\"\n )\n \n args = parser.parse_args(args)\n\n # If some params are not passed, we use the default values based on model name.\n default_params = get_default_params(args.model)\n for name, val in default_params.items():\n if getattr(args, name) is None:\n setattr(args, name, val)\n\n return args" } ]
import glob import json import logging import os import re import subprocess import sys import random import numpy as np import torch import wandb import torch.utils.tensorboard as tensorboard import horovod.torch as hvd from datetime import datetime from torch import optim from torch.cuda.amp import GradScaler from torchvision import transforms from src.open_clip.factory import create_model_and_transforms, get_tokenizer, create_loss from src.open_clip.model import trace_model from src.training.data import get_data from src.training.distributed import is_master, init_distributed_device, broadcast_object from src.training.logger import setup_logging from src.training.scheduler import cosine_lr, const_lr, const_lr_cooldown from src.training.train import train_one_epoch, evaluate from src.training.file_utils import pt_load, check_exists, start_sync_process, remote_sync from src.training.main import natural_key, get_latest_checkpoint, copy_codebase from test_zero_shot_classification import * from params import parse_args
13,164
scaler = None if args.train_data or args.dataset_type == "synthetic": assert not args.trace, 'Cannot train with traced model' exclude = lambda n, p: p.ndim < 2 or "bn" in n or "ln" in n or "bias" in n or 'logit_scale' in n include = lambda n, p: not exclude(n, p) named_parameters = list(model.named_parameters()) gain_or_bias_params = [p for n, p in named_parameters if exclude(n, p) and p.requires_grad] rest_params = [p for n, p in named_parameters if include(n, p) and p.requires_grad] optimizer = optim.AdamW( [ {"params": gain_or_bias_params, "weight_decay": 0.}, {"params": rest_params, "weight_decay": args.wd}, ], lr=args.lr, betas=(args.beta1, args.beta2), eps=args.eps, ) if args.horovod: optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters()) hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) scaler = GradScaler() if args.precision == "amp" else None # optionally resume from a checkpoint start_epoch = 0 if args.resume is not None: checkpoint = pt_load(args.resume, map_location='cpu') if 'epoch' in checkpoint: # resuming a train checkpoint w/ epoch and optimizer state start_epoch = checkpoint["epoch"] sd = checkpoint["state_dict"] if not args.distributed and next(iter(sd.items()))[0].startswith('module'): sd = {k[len('module.'):]: v for k, v in sd.items()} model.load_state_dict(sd) if optimizer is not None: optimizer.load_state_dict(checkpoint["optimizer"]) if scaler is not None and 'scaler' in checkpoint: scaler.load_state_dict(checkpoint['scaler']) logging.info(f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})") else: # loading a bare (model only) checkpoint for fine-tune or evaluation model.load_state_dict(checkpoint) logging.info(f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})") # initialize datasets data = get_data(args, (preprocess_train, preprocess_val), epoch=start_epoch, tokenizer=get_tokenizer(args.model)) assert len(data), 'At least one train or eval dataset must be specified.' # initialize benchmark dataloaders for testing zero-shot classification if args.datasets_for_testing is not None or args.test_data_name is not None: test_dataloaders = get_test_dataloaders(args, preprocess_val) else: test_dataloaders = None # create scheduler if train scheduler = None if 'train' in data and optimizer is not None: total_steps = (data["train"].dataloader.num_batches // args.accum_freq) * args.epochs if args.lr_scheduler == "cosine": scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps) elif args.lr_scheduler == "const": scheduler = const_lr(optimizer, args.lr, args.warmup, total_steps) elif args.lr_scheduler == "const-cooldown": assert args.epochs_cooldown is not None,\ "Please specify the number of cooldown epochs for this lr schedule." cooldown_steps = (data["train"].dataloader.num_batches // args.accum_freq) * args.epochs_cooldown scheduler = const_lr_cooldown( optimizer, args.lr, args.warmup, total_steps, cooldown_steps, args.lr_cooldown_power, args.lr_cooldown_end) else: logging.error( f'Unknown scheduler, {args.lr_scheduler}. Available options are: cosine, const, const-cooldown.') exit(1) # determine if this worker should save logs and checkpoints. only do so if it is rank == 0 args.save_logs = args.logs and args.logs.lower() != 'none' and is_master(args) writer = None if args.save_logs and args.tensorboard: assert tensorboard is not None, "Please install tensorboard." writer = tensorboard.SummaryWriter(args.tensorboard_path) if args.wandb and is_master(args): assert wandb is not None, 'Please install wandb.' logging.debug('Starting wandb.') args.train_sz = data["train"].dataloader.num_samples if args.val_data is not None: args.val_sz = data["val"].dataloader.num_samples # you will have to configure this for your project! wandb.init( project=args.wandb_project_name, name=args.name, id=args.name, notes=args.wandb_notes, tags=[], resume='auto' if args.resume == "latest" else None, config=vars(args), ) if args.debug: wandb.watch(model, log='all') wandb.save(params_file) logging.debug('Finished loading wandb.') if 'train' not in data: evaluate(model, data, start_epoch, args, writer) if test_dataloaders is not None: eval_metrics = zero_shot_eval_during_training(model, test_dataloaders, start_epoch, args, tb_writer=writer) print(eval_metrics) return loss = create_loss(args) for epoch in range(start_epoch, args.epochs): if is_master(args): logging.info(f'Start epoch {epoch}')
""" Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt """ try: except ImportError: wandb = None try: except ImportError: tensorboard = None try: except ImportError: hvd = None # from src.open_clip import create_model_and_transforms, trace_model, get_tokenizer, create_loss LATEST_CHECKPOINT_NAME = "epoch_latest.pt" def RandomRotationNew(image): angle = random.choice([0, 90, 180, 270]) image = transforms.functional.rotate(image, angle) return image def zero_shot_eval_during_training(model, test_dataloaders, epoch, args, tb_writer=None): logging.info('Starting zero-shot evaluation.') zero_shot_metrics = {} for dataset_name in test_dataloaders: logging.info(f'Evaluating zero-shot classification for dataset {dataset_name}') results = test_zero_shot_classification(model, test_dataloaders[dataset_name]['dataloader'], test_dataloaders[dataset_name]['labels'], test_dataloaders[dataset_name]['is_binary'], args, dataset_name=dataset_name, debugging=args.debugging) for k, v in results.items(): if type(v) in [float, int, np.float16, np.float32, np.float64, np.int8, np.int16, np.int32, np.int64]: zero_shot_metrics[k] = v logging.info( f"Zero-Shot Eval Epoch: {epoch} " + "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in zero_shot_metrics.items()]) ) if args.save_logs: for name, val in zero_shot_metrics.items(): if tb_writer is not None: tb_writer.add_scalar(f"val/{name}", val, epoch) with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f: f.write(json.dumps(zero_shot_metrics)) f.write("\n") # if args.wandb: # assert wandb is not None, 'Please install wandb.' # for name, val in zero_shot_metrics.items(): # wandb.log({f"val/{name}": val, 'epoch': epoch}) logging.info('Finished zero-shot evaluation.') return zero_shot_metrics def train_and_test(args): args = parse_args(args) if torch.cuda.is_available(): # This enables tf32 on Ampere GPUs which is only 8% slower than # float16 and almost as accurate as float32 # This was a default in pytorch until 1.12 torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False # fully initialize distributed device environment device = init_distributed_device(args) # get the name of the experiments if args.name is None: # sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule? model_name_safe = args.model.replace('/', '-') date_str = datetime.now().strftime("%Y_%m_%d-%H_%M_%S") if args.distributed: # sync date_str from master to all ranks date_str = broadcast_object(args, date_str) args.name = '-'.join([ date_str, f"model_{model_name_safe}", f"lr_{args.lr}", f"b_{args.batch_size}", f"j_{args.workers}", f"p_{args.precision}", ]) resume_latest = args.resume == 'latest' log_base_path = os.path.join(args.logs, args.name) args.log_path = None if is_master(args, local=args.log_local): os.makedirs(log_base_path, exist_ok=True) log_filename = f'out-{args.rank}' if args.log_local else 'out.log' args.log_path = os.path.join(log_base_path, log_filename) if os.path.exists(args.log_path) and not resume_latest: print( "Error. Experiment already exists. Use --name {} to specify a new experiment." ) return -1 # Setup text logger args.log_level = logging.DEBUG if args.debug else logging.INFO setup_logging(args.log_path, args.log_level) # Setup wandb, tensorboard, checkpoint logging args.wandb = 'wandb' in args.report_to or 'all' in args.report_to args.tensorboard = 'tensorboard' in args.report_to or 'all' in args.report_to args.checkpoint_path = os.path.join(log_base_path, "checkpoints") if is_master(args): args.tensorboard_path = os.path.join(log_base_path, "tensorboard") if args.tensorboard else '' for dirname in [args.tensorboard_path, args.checkpoint_path]: if dirname: os.makedirs(dirname, exist_ok=True) else: args.tensorboard_path = '' if resume_latest: resume_from = None checkpoint_path = args.checkpoint_path # If using remote_sync, need to check the remote instead of the local checkpoints folder. if args.remote_sync is not None: checkpoint_path = os.path.join(args.remote_sync, args.name, "checkpoints") if args.save_most_recent: print('Error. Cannot use save-most-recent with remote_sync and resume latest.') return -1 if args.remote_sync_protocol != 's3': print('Error. Sync protocol not supported when using resume latest.') return -1 if is_master(args): # Checking for existing checkpoint via master rank only. It is possible for # different rank processes to see different files if a shared file-system is under # stress, however it's very difficult to fully work around such situations. if args.save_most_recent: # if --save-most-recent flag is set, look for latest at a fixed filename resume_from = os.path.join(checkpoint_path, LATEST_CHECKPOINT_NAME) if not os.path.exists(resume_from): # If no latest checkpoint has been saved yet, don't try to resume resume_from = None else: # otherwise, list checkpoint dir contents and pick the newest checkpoint resume_from = get_latest_checkpoint(checkpoint_path, remote=args.remote_sync is not None) if resume_from: logging.info(f'Found latest resume checkpoint at {resume_from}.') else: logging.info(f'No latest resume checkpoint found in {checkpoint_path}.') if args.distributed: # sync found checkpoint path to all ranks resume_from = broadcast_object(args, resume_from) args.resume = resume_from if args.copy_codebase: copy_codebase(args) # start the sync proces if remote-sync is not None remote_sync_process = None if is_master(args) and args.remote_sync is not None: # first make sure it works result = remote_sync( os.path.join(args.logs, args.name), os.path.join(args.remote_sync, args.name), args.remote_sync_protocol ) if result: logging.info('remote sync successful.') else: logging.info('Error: remote sync failed. Exiting.') return -1 # if all looks good, start a process to do this every args.remote_sync_frequency seconds remote_sync_process = start_sync_process( args.remote_sync_frequency, os.path.join(args.logs, args.name), os.path.join(args.remote_sync, args.name), args.remote_sync_protocol ) remote_sync_process.start() if args.precision == 'fp16': logging.warning( 'It is recommended to use AMP mixed-precision instead of FP16. ' 'FP16 support needs further verification and tuning, especially for train.') if args.horovod: logging.info( f'Running in horovod mode with multiple processes / nodes. Device: {args.device}.' f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.') elif args.distributed: logging.info( f'Running in distributed mode with multiple processes. Device: {args.device}.' f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.') else: logging.info(f'Running with a single process. Device {args.device}.') dist_model = None args.distill = args.distill_model is not None and args.distill_pretrained is not None if args.distill: #FIXME: support distillation with grad accum. assert args.accum_freq == 1 #FIXME: support distillation with coca. assert 'coca' not in args.model.lower() if isinstance(args.force_image_size, (tuple, list)) and len(args.force_image_size) == 1: # arg is nargs, single (square) image size list -> int args.force_image_size = args.force_image_size[0] random_seed(args.seed, 0) model, preprocess_train, preprocess_val = create_model_and_transforms( args.model, args.pretrained, precision=args.precision, device=device, jit=args.torchscript, force_quick_gelu=args.force_quick_gelu, force_custom_text=args.force_custom_text, force_patch_dropout=args.force_patch_dropout, force_image_size=args.force_image_size, pretrained_image=args.pretrained_image, image_mean=args.image_mean, image_std=args.image_std, aug_cfg=args.aug_cfg, output_dict=True, ) if args.random_rotation: # add random rotation step into preprocess_train for i, trans in enumerate(preprocess_train.transforms): if type(trans) == transforms.transforms.ToTensor: # insert random rotation right before ToTensor preprocess_train.transforms.insert(i, transforms.Lambda(RandomRotationNew)) break if args.distill: # FIXME: currenlty assumes the model your distilling from has the same tokenizer & transforms. dist_model, _, _ = create_model_and_transforms( args.distill_model, args.distill_pretrained, device=device, precision=args.precision, output_dict=True, ) random_seed(args.seed, args.rank) if args.trace: model = trace_model(model, batch_size=args.batch_size, device=device) if args.lock_image: # lock image tower as per LiT - https://arxiv.org/abs/2111.07991 model.lock_image_tower( unlocked_groups=args.lock_image_unlocked_groups, freeze_bn_stats=args.lock_image_freeze_bn_stats) if args.lock_text: model.lock_text_tower( unlocked_layers=args.lock_text_unlocked_layers, freeze_layer_norm=args.lock_text_freeze_layer_norm) if args.grad_checkpointing: model.set_grad_checkpointing() if is_master(args): logging.info("Model:") logging.info(f"{str(model)}") logging.info("Params:") params_file = os.path.join(args.logs, args.name, "params.txt") with open(params_file, "w") as f: for name in sorted(vars(args)): val = getattr(args, name) logging.info(f" {name}: {val}") f.write(f"{name}: {val}\n") if args.distributed and not args.horovod: if args.use_bn_sync: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) ddp_args = {} if args.ddp_static_graph: # this doesn't exist in older PyTorch, arg only added if enabled ddp_args['static_graph'] = True model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], **ddp_args) if args.distill: dist_model = torch.nn.parallel.DistributedDataParallel(dist_model, device_ids=[device], **ddp_args) # create optimizer and scaler optimizer = None scaler = None if args.train_data or args.dataset_type == "synthetic": assert not args.trace, 'Cannot train with traced model' exclude = lambda n, p: p.ndim < 2 or "bn" in n or "ln" in n or "bias" in n or 'logit_scale' in n include = lambda n, p: not exclude(n, p) named_parameters = list(model.named_parameters()) gain_or_bias_params = [p for n, p in named_parameters if exclude(n, p) and p.requires_grad] rest_params = [p for n, p in named_parameters if include(n, p) and p.requires_grad] optimizer = optim.AdamW( [ {"params": gain_or_bias_params, "weight_decay": 0.}, {"params": rest_params, "weight_decay": args.wd}, ], lr=args.lr, betas=(args.beta1, args.beta2), eps=args.eps, ) if args.horovod: optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters()) hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) scaler = GradScaler() if args.precision == "amp" else None # optionally resume from a checkpoint start_epoch = 0 if args.resume is not None: checkpoint = pt_load(args.resume, map_location='cpu') if 'epoch' in checkpoint: # resuming a train checkpoint w/ epoch and optimizer state start_epoch = checkpoint["epoch"] sd = checkpoint["state_dict"] if not args.distributed and next(iter(sd.items()))[0].startswith('module'): sd = {k[len('module.'):]: v for k, v in sd.items()} model.load_state_dict(sd) if optimizer is not None: optimizer.load_state_dict(checkpoint["optimizer"]) if scaler is not None and 'scaler' in checkpoint: scaler.load_state_dict(checkpoint['scaler']) logging.info(f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})") else: # loading a bare (model only) checkpoint for fine-tune or evaluation model.load_state_dict(checkpoint) logging.info(f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})") # initialize datasets data = get_data(args, (preprocess_train, preprocess_val), epoch=start_epoch, tokenizer=get_tokenizer(args.model)) assert len(data), 'At least one train or eval dataset must be specified.' # initialize benchmark dataloaders for testing zero-shot classification if args.datasets_for_testing is not None or args.test_data_name is not None: test_dataloaders = get_test_dataloaders(args, preprocess_val) else: test_dataloaders = None # create scheduler if train scheduler = None if 'train' in data and optimizer is not None: total_steps = (data["train"].dataloader.num_batches // args.accum_freq) * args.epochs if args.lr_scheduler == "cosine": scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps) elif args.lr_scheduler == "const": scheduler = const_lr(optimizer, args.lr, args.warmup, total_steps) elif args.lr_scheduler == "const-cooldown": assert args.epochs_cooldown is not None,\ "Please specify the number of cooldown epochs for this lr schedule." cooldown_steps = (data["train"].dataloader.num_batches // args.accum_freq) * args.epochs_cooldown scheduler = const_lr_cooldown( optimizer, args.lr, args.warmup, total_steps, cooldown_steps, args.lr_cooldown_power, args.lr_cooldown_end) else: logging.error( f'Unknown scheduler, {args.lr_scheduler}. Available options are: cosine, const, const-cooldown.') exit(1) # determine if this worker should save logs and checkpoints. only do so if it is rank == 0 args.save_logs = args.logs and args.logs.lower() != 'none' and is_master(args) writer = None if args.save_logs and args.tensorboard: assert tensorboard is not None, "Please install tensorboard." writer = tensorboard.SummaryWriter(args.tensorboard_path) if args.wandb and is_master(args): assert wandb is not None, 'Please install wandb.' logging.debug('Starting wandb.') args.train_sz = data["train"].dataloader.num_samples if args.val_data is not None: args.val_sz = data["val"].dataloader.num_samples # you will have to configure this for your project! wandb.init( project=args.wandb_project_name, name=args.name, id=args.name, notes=args.wandb_notes, tags=[], resume='auto' if args.resume == "latest" else None, config=vars(args), ) if args.debug: wandb.watch(model, log='all') wandb.save(params_file) logging.debug('Finished loading wandb.') if 'train' not in data: evaluate(model, data, start_epoch, args, writer) if test_dataloaders is not None: eval_metrics = zero_shot_eval_during_training(model, test_dataloaders, start_epoch, args, tb_writer=writer) print(eval_metrics) return loss = create_loss(args) for epoch in range(start_epoch, args.epochs): if is_master(args): logging.info(f'Start epoch {epoch}')
train_one_epoch(model, data, loss, epoch, optimizer, scaler, scheduler, dist_model, args, tb_writer=writer)
12
2023-12-19 11:50:56+00:00
16k
penghao-wu/vstar
VisualSearch/train.py
[ { "identifier": "VSMForCausalLM", "path": "VisualSearch/model/VSM.py", "snippet": "class VSMForCausalLM(LlavaLlamaForCausalLM):\n\tdef __init__(\n\t\tself,\n\t\tconfig,\n\t\t**kwargs,\n\t):\n\t\tif not hasattr(config, \"train_mask_decoder\"):\n\t\t\tconfig.mm_use_im_start_end = kwargs.pop(\"use_mm_start_end\", True)\n\t\t\tconfig.mm_vision_tower = kwargs.get(\n\t\t\t\t\"vision_tower\", \"openai/clip-vit-large-patch14\"\n\t\t\t)\n\t\t\tself.ce_loss_weight = kwargs.pop(\"ce_loss_weight\", None)\n\t\t\tself.dice_loss_weight = kwargs.pop(\"dice_loss_weight\", None)\n\t\t\tself.bce_loss_weight = kwargs.pop(\"bce_loss_weight\", None)\n\t\t\tself.det_loss_weight = kwargs.pop(\"det_loss_weight\", None)\n\t\telse:\n\t\t\tconfig.mm_vision_tower = config.vision_tower\n\n\t\tself.loc_token_idx = kwargs.pop(\"loc_token_idx\")\n\n\t\tsuper().__init__(config)\n\n\t\tself.model = VSMModel(config, **kwargs)\n\n\t\tself.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n\t\t# Initialize weights and apply final processing\n\t\tself.post_init()\n\n\tdef get_visual_embs(self, pixel_values: torch.FloatTensor):\n\t\twith torch.no_grad():\n\t\t\timage_embeddings = self.model.owlvit.get_visual_embs(pixel_values)\n\t\treturn image_embeddings\n\n\tdef forward(self, **kwargs):\n\t\tif \"past_key_values\" in kwargs:\n\t\t\treturn super().forward(**kwargs)\n\t\treturn self.model_forward(**kwargs)\n\n\tdef model_forward(\n\t\tself,\n\t\timages: torch.FloatTensor,\n\t\timages_clip: torch.FloatTensor,\n\t\tinput_ids: torch.LongTensor,\n\t\tlabels: torch.LongTensor,\n\t\tattention_masks: torch.LongTensor,\n\t\toffset: torch.LongTensor,\n\t\tmasks_list: List[torch.FloatTensor],\n\t\tlabel_list: List[torch.Tensor],\n\t\tbboxes_labels_list: List[torch.FloatTensor],\n\t\tbboxes_valid_list: torch.Tensor,\n\t\tmasks_valid_list: List[torch.Tensor],\n\t\tresize_list: List[tuple],\n\t\tinference: bool = False,\n\t\t**kwargs,\n\t):\n\t\timage_embeddings = self.get_visual_embs(images)\n\t\tbatch_size = image_embeddings.shape[0]\n\t\tassert batch_size == len(offset) - 1\n\n\t\tloc_token_mask = input_ids[:, 1:] == self.loc_token_idx\n\t\tloc_token_mask = torch.cat(\n\t\t\t[\n\t\t\t\tloc_token_mask,\n\t\t\t\ttorch.zeros((loc_token_mask.shape[0], 1)).bool().cuda(),\n\t\t\t],\n\t\t\tdim=1,\n\t\t)\n\t\t# hack for IMAGE_TOKEN_INDEX (we suppose that there is only one image, and it is in the front)\n\t\tloc_token_mask = torch.cat(\n\t\t\t[torch.zeros((loc_token_mask.shape[0], 255)).bool().cuda(), loc_token_mask],\n\t\t\tdim=1,\n\t\t)\n\n\t\tif inference:\n\t\t\tn_batch = 1\n\t\t\tlength = input_ids.shape[0]\n\t\t\tassert images_clip.shape[0] == 1\n\t\t\timages_clip_extend = images_clip.expand(length, -1, -1, -1).contiguous()\n\n\t\t\toutput_hidden_states = []\n\t\t\tfor i in range(n_batch):\n\t\t\t\tstart_i, end_i = i * length, min((i + 1) * length, input_ids.shape[0])\n\t\t\t\toutput_i = super().forward(\n\t\t\t\t\timages=images_clip_extend[: end_i - start_i],\n\t\t\t\t\tattention_mask=attention_masks[start_i:end_i],\n\t\t\t\t\tinput_ids=input_ids[start_i:end_i],\n\t\t\t\t\toutput_hidden_states=True,\n\t\t\t\t)\n\t\t\t\toutput_hidden_states.append(output_i.hidden_states)\n\t\t\t\ttorch.cuda.empty_cache()\n\n\t\t\toutput_hidden_states_list = []\n\t\t\toutput_hidden_states_level = torch.cat(output_hidden_states, dim=0)\n\t\t\toutput_hidden_states_list.append(output_hidden_states_level)\n\t\t\toutput_hidden_states = output_hidden_states_list\n\t\t\toutput = None\n\n\t\telse:\n\t\t\timages_clip_list = []\n\t\t\tfor i in range(len(offset) - 1):\n\t\t\t\tstart_i, end_i = offset[i], offset[i + 1]\n\t\t\t\timages_clip_i = (\n\t\t\t\t\timages_clip[i]\n\t\t\t\t\t.unsqueeze(0)\n\t\t\t\t\t.expand(end_i - start_i, -1, -1, -1)\n\t\t\t\t\t.contiguous()\n\t\t\t\t)\n\t\t\t\timages_clip_list.append(images_clip_i)\n\t\t\timages_clip = torch.cat(images_clip_list, dim=0)\n\n\t\t\toutput = super().forward(\n\t\t\t\timages=images_clip,\n\t\t\t\tattention_mask=attention_masks,\n\t\t\t\tinput_ids=input_ids,\n\t\t\t\tlabels=labels,\n\t\t\t\toutput_hidden_states=True,\n\t\t\t)\n\t\t\toutput_hidden_states = output.hidden_states\n\n\t\t# seg\n\t\thidden_states_seg = []\n\t\tassert len(self.model.text_hidden_fcs_seg) == 1\n\t\thidden_states_seg.append(self.model.text_hidden_fcs_seg[0](output_hidden_states[-1]))\n\n\t\tlast_hidden_state_seg = torch.stack(hidden_states_seg, dim=-1).sum(dim=-1)\n\n\t\t# det\n\t\thidden_states_det = []\n\n\t\tassert len(self.model.text_hidden_fcs_det) == 1\n\t\thidden_states_det.append(self.model.text_hidden_fcs_det[0](output_hidden_states[-1]))\n\t\tlast_hidden_state_det = torch.stack(hidden_states_det, dim=-1).sum(dim=-1)\n\n\t\tpred_embeddings_seg = last_hidden_state_seg[loc_token_mask]\n\t\tpred_embeddings_det = last_hidden_state_det[loc_token_mask]\n\t\tloc_token_counts = loc_token_mask.int().sum(-1) # [bs, ]\n\n\t\tloc_token_offset = loc_token_counts.cumsum(-1)\n\t\tloc_token_offset = torch.cat(\n\t\t\t[torch.zeros(1).long().cuda(), loc_token_offset], dim=0\n\t\t)\n\n\t\tloc_token_offset = loc_token_offset[offset]\n\n\t\tpred_embeddings_seg_ = []\n\t\tfor i in range(len(loc_token_offset) - 1):\n\t\t\tstart_i, end_i = loc_token_offset[i], loc_token_offset[i + 1]\n\t\t\tpred_embeddings_seg_.append(pred_embeddings_seg[start_i:end_i])\n\t\tpred_embeddings_seg = pred_embeddings_seg_\n\n\t\tpred_embeddings_det_ = []\n\t\tfor i in range(len(loc_token_offset) - 1):\n\t\t\tstart_i, end_i = loc_token_offset[i], loc_token_offset[i + 1]\n\t\t\tpred_embeddings_det_.append(pred_embeddings_det[start_i:end_i])\n\t\tpred_embeddings_det = pred_embeddings_det_\n\n\t\t# seg branch \n\t\tmultimask_output = False\n\t\tpred_masks = []\n\t\tfor i in range(len(pred_embeddings_seg)):\n\t\t\t(\n\t\t\t\tsparse_embeddings,\n\t\t\t\tdense_embeddings,\n\t\t\t) = self.model.prompt_encoder(\n\t\t\t\tpoints=None,\n\t\t\t\tboxes=None,\n\t\t\t\tmasks=None,\n\t\t\t\ttext_embeds=pred_embeddings_seg[i].unsqueeze(1),\n\t\t\t)\n\t\t\tsparse_embeddings = sparse_embeddings.to(pred_embeddings_seg[i].dtype)\n\t\t\tlow_res_masks, iou_predictions = self.model.mask_decoder(\n\t\t\t\timage_embeddings=self.model.visual_projection(image_embeddings[i].unsqueeze(0)).permute(0, 3, 1, 2),\n\t\t\t\timage_pe=self.model.prompt_encoder.get_dense_pe(),\n\t\t\t\tsparse_prompt_embeddings=sparse_embeddings,\n\t\t\t\tdense_prompt_embeddings=dense_embeddings,\n\t\t\t\tmultimask_output=multimask_output,\n\t\t\t)\n\t\t\tpred_mask = F.interpolate(\n\t\t\tlow_res_masks, label_list[i].shape, mode=\"bilinear\", align_corners=False\n\t\t)\n\t\t\tpred_masks.append(pred_mask[:, 0])\n\n\t\tgt_masks = masks_list\n\n\t\t# det branch\n\t\tdetection_result_batch = []\n\t\tfor i in range(len(pred_embeddings_det)):\n\t\t\tbs = pred_embeddings_det[i].shape[0]\n\t\t\tdetection_result = self.model.owlvit(image_embeddings[i].unsqueeze(0).repeat(bs, 1, 1, 1), pred_embeddings_det[i].unsqueeze(1))\n\t\t\tdetection_result_batch.append(detection_result)\n\n\n\t\tpred_logits = torch.cat([detection_result['pred_logits'] for detection_result in detection_result_batch], 0)\n\t\tpred_boxes = torch.cat([detection_result['pred_boxes'] for detection_result in detection_result_batch], 0)\n\t\tif inference:\n\t\t\treturn {\n\t\t\t\t\"pred_masks\": pred_masks,\n\t\t\t\t\"gt_masks\": gt_masks,\n\t\t\t\t\"pred_logits\": pred_logits,\n\t\t\t\t\"pred_boxes\": pred_boxes,\n\t\t\t\t\"gt_bboxes\": bboxes_labels_list\n\t\t\t}\n\t\t\n\t\tnum_boxes = 0\n\t\tfor bboxes_labels, bboxes_valid in zip(bboxes_labels_list, bboxes_valid_list):\n\t\t\tif bboxes_valid:\n\t\t\t\tnum_boxes += len(bboxes_labels)\n\t\tnum_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=image_embeddings.device)\n\t\tnum_boxes = torch.clamp(num_boxes, min=1).item()\n\t\t\n\t\tdetection_result_batch = {'pred_logits':pred_logits, 'pred_boxes':pred_boxes}\n\n\t\ttarget_det = []\n\t\tall_bboxes_valid = []\n\t\tfor bboxes_label, bboxes_valid in zip(bboxes_labels_list, bboxes_valid_list):\n\t\t\ttarget_det.append({\"labels\":torch.zeros(len(bboxes_label)).to(bboxes_label.device, torch.long), \"boxes\":bboxes_label})\n\t\t\tif bboxes_valid:\n\t\t\t\tall_bboxes_valid.append(torch.ones((min(24*24, len(bboxes_label)), 1)).to(bboxes_label.device, torch.long))\n\t\t\telse:\n\t\t\t\tall_bboxes_valid.append(torch.zeros((min(24*24, len(bboxes_label)), 1)).to(bboxes_label.device, torch.long))\n\t\tall_bboxes_valid = torch.cat(all_bboxes_valid, 0)\n\t\t\n\t\tloss_dict = self.model.owlvit.criterion(detection_result_batch, target_det, num_boxes)\n\n\t\tfor loss_k, loss_v in loss_dict.items():\n\t\t\tif \"loss_ce\" in loss_k:\n\t\t\t\tloss_dict[loss_k] = (loss_v*bboxes_valid_list.unsqueeze(-1)).mean()\n\t\t\telse:\n\t\t\t\tloss_dict[loss_k] = (loss_v*all_bboxes_valid).sum()\n\n\t\tweight_dict = self.model.owlvit.criterion.weight_dict\n\t\tdetection_loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)\n\t\tdetection_loss = detection_loss*self.det_loss_weight\n\n\t\tmodel_output = output\n\t\toutput = model_output.logits\n\n\t\tce_loss = model_output.loss\n\t\tce_loss = ce_loss * self.ce_loss_weight\n\t\tmask_bce_loss = 0\n\t\tmask_dice_loss = 0\n\t\tnum_masks = 0\n\t\tfor batch_idx in range(len(pred_masks)):\n\t\t\tgt_mask = gt_masks[batch_idx]\n\t\t\tpred_mask = pred_masks[batch_idx]\n\t\t\tmasks_valid = masks_valid_list[batch_idx]\n\n\t\t\tmask_bce_loss += (\n\t\t\t\tsigmoid_ce_loss(pred_mask, gt_mask, num_masks=gt_mask.shape[0])\n\t\t\t\t* gt_mask.shape[0] * masks_valid\n\t\t\t).sum()\n\t\t\tmask_dice_loss += (\n\t\t\t\tdice_loss(pred_mask, gt_mask, num_masks=gt_mask.shape[0])\n\t\t\t\t* gt_mask.shape[0] * masks_valid\n\t\t\t).sum()\n\t\t\tnum_masks += masks_valid.sum()\n\n\t\tmask_bce_loss = self.bce_loss_weight * mask_bce_loss / (num_masks + 1e-8)\n\t\tmask_dice_loss = self.dice_loss_weight * mask_dice_loss / (num_masks + 1e-8)\n\t\tmask_loss = mask_bce_loss + mask_dice_loss\n\n\t\tloss = ce_loss + mask_loss + detection_loss\n\n\t\treturn {\n\t\t\t\"loss\": loss,\n\t\t\t\"ce_loss\": ce_loss,\n\t\t\t\"mask_bce_loss\": mask_bce_loss,\n\t\t\t\"mask_dice_loss\": mask_dice_loss,\n\t\t\t\"mask_loss\": mask_loss,\n\t\t\t\"detection_loss\": detection_loss,\n\t\t\t\"detection_loss_ce\": loss_dict['loss_ce'],\n\t\t\t\"detection_loss_bbox\": loss_dict['loss_bbox'],\n\t\t\t\"detection_loss_giou\": loss_dict['loss_giou'],\n\t\t}\n\n\tdef inference(\n\t\tself,\n\t\timages_clip,\n\t\timages,\n\t\tinput_ids,\n\t\tresize_list,\n\t\toriginal_size_list,\n\t\tmax_new_tokens=32,\n\t\ttokenizer=None,\n\t\tmode = 'vqa'\n\t):\n\t\tassert mode in ['vqa', 'segmentation', 'detection']\n\t\twith torch.no_grad():\n\t\t\toutputs = self.generate(\n\t\t\t\timages=images_clip,\n\t\t\t\tinput_ids=input_ids,\n\t\t\t\tmax_new_tokens=max_new_tokens,\n\t\t\t\tnum_beams=1,\n\t\t\t\toutput_hidden_states=True,\n\t\t\t\treturn_dict_in_generate=True,\n\t\t\t)\n\t\t\toutput_hidden_states = outputs.hidden_states[-1]\n\t\t\toutput_ids = outputs.sequences\n\n\t\t\tif mode == 'vqa':\n\t\t\t\treturn output_ids, None, None\n\n\t\t\tloc_token_mask = output_ids[:, 1:] == self.loc_token_idx\n\t\t\t# hack for IMAGE_TOKEN_INDEX (we suppose that there is only one image, and it is in the front)\n\t\t\tloc_token_mask = torch.cat(\n\t\t\t\t[\n\t\t\t\t\ttorch.zeros((loc_token_mask.shape[0], 255)).bool().cuda(),\n\t\t\t\t\tloc_token_mask,\n\t\t\t\t],\n\t\t\t\tdim=1,\n\t\t\t)\n\n\t\t\t# seg\n\t\t\thidden_states_seg = []\n\t\t\tassert len(self.model.text_hidden_fcs_seg) == 1\n\t\t\thidden_states_seg.append(self.model.text_hidden_fcs_seg[0](output_hidden_states))\n\n\t\t\tlast_hidden_state_seg = torch.stack(hidden_states_seg, dim=-1).sum(dim=-1)\n\n\t\t\t# det\n\t\t\thidden_states_det = []\n\n\t\t\tassert len(self.model.text_hidden_fcs_det) == 1\n\t\t\thidden_states_det.append(self.model.text_hidden_fcs_det[0](output_hidden_states))\n\t\t\tlast_hidden_state_det = torch.stack(hidden_states_det, dim=-1).sum(dim=-1)\n\n\t\t\tpred_embeddings_seg = last_hidden_state_seg[loc_token_mask]\n\t\t\tpred_embeddings_det = last_hidden_state_det[loc_token_mask]\n\t\t\tloc_token_counts = loc_token_mask.int().sum(-1) # [bs, ]\n\n\t\t\tloc_token_offset = loc_token_counts.cumsum(-1)\n\t\t\tloc_token_offset = torch.cat(\n\t\t\t\t[torch.zeros(1).long().cuda(), loc_token_offset], dim=0\n\t\t\t)\n\n\n\t\t\tpred_embeddings_seg_ = []\n\t\t\tfor i in range(len(loc_token_offset) - 1):\n\t\t\t\tstart_i, end_i = loc_token_offset[i], loc_token_offset[i + 1]\n\t\t\t\tpred_embeddings_seg_.append(pred_embeddings_seg[start_i:end_i])\n\t\t\tpred_embeddings_seg = pred_embeddings_seg_\n\n\t\t\tpred_embeddings_det_ = []\n\t\t\tfor i in range(len(loc_token_offset) - 1):\n\t\t\t\tstart_i, end_i = loc_token_offset[i], loc_token_offset[i + 1]\n\t\t\t\tpred_embeddings_det_.append(pred_embeddings_det[start_i:end_i])\n\t\t\tpred_embeddings_det = pred_embeddings_det_\n\n\t\t\timage_embeddings = self.get_visual_embs(images)\n\n\t\t\tmultimask_output = False\n\t\t\tpred_masks = []\n\t\t\tfor i in range(len(pred_embeddings_seg)):\n\t\t\t\t(\n\t\t\t\t\tsparse_embeddings,\n\t\t\t\t\tdense_embeddings,\n\t\t\t\t) = self.model.prompt_encoder(\n\t\t\t\t\tpoints=None,\n\t\t\t\t\tboxes=None,\n\t\t\t\t\tmasks=None,\n\t\t\t\t\ttext_embeds=pred_embeddings_seg[i].unsqueeze(1),\n\t\t\t\t)\n\n\t\t\t\tsparse_embeddings = sparse_embeddings.to(pred_embeddings_seg[i].dtype)\n\t\t\t\tlow_res_masks, iou_predictions = self.model.mask_decoder(\n\t\t\t\t\timage_embeddings=self.model.visual_projection(image_embeddings[i].unsqueeze(0)).permute(0, 3, 1, 2),\n\t\t\t\t\timage_pe=self.model.prompt_encoder.get_dense_pe(),\n\t\t\t\t\tsparse_prompt_embeddings=sparse_embeddings,\n\t\t\t\t\tdense_prompt_embeddings=dense_embeddings,\n\t\t\t\t\tmultimask_output=multimask_output,\n\t\t\t\t)\n\t\t\t\tpred_mask = F.interpolate(\n\t\t\t\tlow_res_masks.float(), original_size_list[i], mode=\"bilinear\", align_corners=False\n\t\t\t)\n\t\t\t\tpred_masks.append(pred_mask[:, 0])\n\n\t\t\tif mode == 'segmentation':\n\t\t\t\treturn None, pred_masks, None\n\n\t\t\t# detection model\n\t\t\tdetection_result_batch = []\n\t\t\tfor i in range(len(pred_embeddings_det)):\n\t\t\t\tbs = pred_embeddings_det[i].shape[0]\n\t\t\t\tdetection_result = self.model.owlvit(image_embeddings[i].unsqueeze(0).repeat(bs, 1, 1, 1), pred_embeddings_det[i].unsqueeze(1))\n\t\t\t\tdetection_result_batch.append(detection_result)\n\n\n\t\t\tpred_logits = torch.cat([detection_result['pred_logits'] for detection_result in detection_result_batch], 0)\n\t\t\tpred_boxes = torch.cat([detection_result['pred_boxes'] for detection_result in detection_result_batch], 0)\n\t\t\tdetection_result_batch = {'pred_logits':pred_logits, 'pred_boxes':pred_boxes}\n\n\t\treturn None, pred_masks, detection_result_batch" }, { "identifier": "conversation", "path": "VisualSearch/model/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):" }, { "identifier": "HybridDataset", "path": "VisualSearch/utils/dataset.py", "snippet": "class HybridDataset(torch.utils.data.Dataset):\n\tpixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n\tpixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n\timg_size = 1024\n\tignore_label = 255\n\n\tdef __init__(\n\t\tself,\n\t\tbase_dir,\n\t\ttokenizer,\n\t\tvision_tower,\n\t\tsamples_per_epoch=500 * 8 * 2 * 10,\n\t\tprecision: str = \"fp32\",\n\t\tnum_classes_per_sample: int = 3,\n\t\texclude_val=False,\n\t\tdataset=\"general_segdet||refer_seg||vqa||reason_seg\",\n\t\tsample_rate=[9, 3, 3, 1],\n\t\tgeneral_segdet_data=\"objects365||cocostuff||paco_lvis\",\n\t\tgeneral_segdet_sample_rate=[2,1,1],\n\t\trefer_seg_data=\"refclef||refcoco||refcoco+||refcocog\",\n\t\tvqa_data=\"possible_locations_conv_86k||llava_instruct_80k\",\n\t\tvqa_sample_rate=[2,1],\n\t):\n\t\tself.exclude_val = exclude_val\n\t\tself.dataset = dataset\n\t\tself.samples_per_epoch = samples_per_epoch\n\t\tself.num_classes_per_sample = num_classes_per_sample\n\t\tsample_rate = np.array(sample_rate)\n\t\tself.sample_rate = sample_rate / sample_rate.sum()\n\n\t\tself.base_dir = base_dir\n\t\tself.tokenizer = tokenizer\n\t\tself.precision = precision\n\n\t\tself.datasets = dataset.split(\"||\")\n\n\t\tself.all_datasets = []\n\t\tfor dataset in self.datasets:\n\t\t\tif dataset == \"general_segdet\":\n\t\t\t\tself.all_datasets.append(\n\t\t\t\t\tSegDetDataset(\n\t\t\t\t\t\tbase_dir,\n\t\t\t\t\t\ttokenizer,\n\t\t\t\t\t\tvision_tower,\n\t\t\t\t\t\tsamples_per_epoch,\n\t\t\t\t\t\tprecision,\n\t\t\t\t\t\tnum_classes_per_sample,\n\t\t\t\t\t\texclude_val,\n\t\t\t\t\t\tgeneral_segdet_data,\n\t\t\t\t\t\tgeneral_segdet_sample_rate,\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\telif dataset == \"refer_seg\":\n\t\t\t\tself.all_datasets.append(\n\t\t\t\t\tReferSegDataset(\n\t\t\t\t\t\tbase_dir,\n\t\t\t\t\t\ttokenizer,\n\t\t\t\t\t\tvision_tower,\n\t\t\t\t\t\tsamples_per_epoch,\n\t\t\t\t\t\tprecision,\n\t\t\t\t\t\tnum_classes_per_sample,\n\t\t\t\t\t\texclude_val,\n\t\t\t\t\t\trefer_seg_data,\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\telif dataset == \"vqa\":\n\t\t\t\tself.all_datasets.append(\n\t\t\t\t\tVQADataset(\n\t\t\t\t\t\tbase_dir,\n\t\t\t\t\t\ttokenizer,\n\t\t\t\t\t\tvision_tower,\n\t\t\t\t\t\tsamples_per_epoch,\n\t\t\t\t\t\tprecision,\n\t\t\t\t\t\tnum_classes_per_sample,\n\t\t\t\t\t\texclude_val,\n\t\t\t\t\t\tvqa_data,\n\t\t\t\t\t\tvqa_sample_rate,\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\telif dataset == \"mixed_grounding\":\n\t\t\t\tself.all_datasets.append(\n\t\t\t\t\tMixedGroundingDataset(\n\t\t\t\t\t\tbase_dir,\n\t\t\t\t\t\ttokenizer,\n\t\t\t\t\t\tvision_tower,\n\t\t\t\t\t\tsamples_per_epoch,\n\t\t\t\t\t\tprecision,\n\t\t\t\t\t\tnum_classes_per_sample,\n\t\t\t\t\t\texclude_val,\n\t\t\t\t\t)\n\t\t\t\t)\n\n\tdef __len__(self):\n\t\treturn self.samples_per_epoch\n\n\tdef __getitem__(self, idx):\n\t\tind = np.random.choice(list(range(len(self.datasets))), p=self.sample_rate)\n\t\tdata = self.all_datasets[ind]\n\t\tinference = False\n\t\treturn *data[0], inference" }, { "identifier": "ValDataset", "path": "VisualSearch/utils/dataset.py", "snippet": "class ValDataset(torch.utils.data.Dataset):\n\tpixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n\tpixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n\timg_size = 1024\n\tignore_label = 255\n\n\tdef __init__(\n\t\tself,\n\t\tbase_dir,\n\t\ttokenizer,\n\t\tvision_tower,\n\t\tval_dataset,\n\t):\n\t\tself.base_dir = base_dir\n\t\tsplits = val_dataset.split(\"|\")\n\t\tif len(splits) == 2:\n\t\t\tds, split = splits\n\t\t\timages = glob.glob(\n\t\t\t\tos.path.join(self.base_dir, \"reason_seg\", ds, split, \"*.jpg\")\n\t\t\t)\n\t\t\tself.images = images\n\t\t\tself.data_type = \"reason_seg\"\n\t\telif len(splits) == 3:\n\t\t\tself.base_dir = os.path.join(self.base_dir, 'refer_seg')\n\t\t\tds, splitBy, split = splits\n\t\t\trefer_api = REFER(self.base_dir, ds, splitBy)\n\t\t\tref_ids_val = refer_api.getRefIds(split=split)\n\t\t\timages_ids_val = refer_api.getImgIds(ref_ids=ref_ids_val)\n\t\t\trefs_val = refer_api.loadRefs(ref_ids=ref_ids_val)\n\t\t\trefer_seg_ds = {}\n\t\t\trefer_seg_ds[\"images\"] = []\n\t\t\tloaded_images = refer_api.loadImgs(image_ids=images_ids_val)\n\t\t\tfor item in loaded_images:\n\t\t\t\titem = item.copy()\n\t\t\t\tif ds == \"refclef\":\n\t\t\t\t\titem[\"file_name\"] = os.path.join(\n\t\t\t\t\t\tself.base_dir, \"images/saiapr_tc-12\", item[\"file_name\"]\n\t\t\t\t\t)\n\t\t\t\telif ds in [\"refcoco\", \"refcoco+\", \"refcocog\", \"grefcoco\"]:\n\t\t\t\t\titem[\"file_name\"] = os.path.join(\n\t\t\t\t\t\tself.base_dir,\n\t\t\t\t\t\t\"images/mscoco/images/train2014\",\n\t\t\t\t\t\titem[\"file_name\"],\n\t\t\t\t\t)\n\t\t\t\trefer_seg_ds[\"images\"].append(item)\n\t\t\trefer_seg_ds[\"annotations\"] = refer_api.Anns # anns_val\n\n\t\t\timg2refs = {}\n\t\t\tfor ref in refs_val:\n\t\t\t\timage_id = ref[\"image_id\"]\n\t\t\t\timg2refs[image_id] = img2refs.get(image_id, []) + [\n\t\t\t\t\tref,\n\t\t\t\t]\n\t\t\trefer_seg_ds[\"img2refs\"] = img2refs\n\t\t\tself.refer_seg_ds = refer_seg_ds\n\t\t\tself.data_type = \"refer_seg\"\n\n\t\tself.ds = ds\n\t\tself.tokenizer = tokenizer\n\t\tself.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n\t\tself.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n\tdef __len__(self):\n\t\tif self.data_type == \"refer_seg\":\n\t\t\treturn len(self.refer_seg_ds[\"images\"])\n\t\telse:\n\t\t\treturn len(self.images)\n\n\tdef preprocess(self, x: torch.Tensor) -> torch.Tensor:\n\t\t\"\"\"Normalize pixel values and pad to a square input.\"\"\"\n\t\t# Normalize colors\n\t\tx = (x - self.pixel_mean) / self.pixel_std\n\n\t\t# Pad\n\t\th, w = x.shape[-2:]\n\t\tpadh = self.img_size - h\n\t\tpadw = self.img_size - w\n\t\tx = F.pad(x, (0, padw, 0, padh))\n\t\treturn x\n\n\tdef __getitem__(self, idx):\n\t\tif self.data_type == \"refer_seg\":\n\t\t\trefer_seg_ds = self.refer_seg_ds\n\t\t\timages = refer_seg_ds[\"images\"]\n\t\t\tannotations = refer_seg_ds[\"annotations\"]\n\t\t\timg2refs = refer_seg_ds[\"img2refs\"]\n\n\t\t\timage_info = images[idx]\n\t\t\timage_path = image_info[\"file_name\"]\n\t\t\timage_id = image_info[\"id\"]\n\n\t\t\trefs = img2refs[image_id]\n\t\t\tif len(refs) == 0:\n\t\t\t\traise ValueError(\"image {} has no refs\".format(image_id))\n\n\t\t\tsents = []\n\t\t\tann_ids = []\n\t\t\tfor ref in refs:\n\t\t\t\tfor sent in ref[\"sentences\"]:\n\t\t\t\t\tsents.append(sent[\"sent\"].strip().lower())\n\t\t\t\t\tann_ids.append(ref[\"ann_id\"])\n\n\t\t\tsampled_sents = sents\n\t\t\tsampled_ann_ids = ann_ids\n\t\t\timage = cv2.imread(image_path)\n\t\t\timage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\t\t\tis_sentence = False\n\t\telse:\n\t\t\timage_path = self.images[idx]\n\t\t\timage = cv2.imread(image_path)\n\t\t\timage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\t\t\tjson_path = image_path.replace(\".jpg\", \".json\")\n\t\t\tmask_json, sampled_sents, is_sentence = get_mask_from_json(json_path, image)\n\t\t\tsampled_sents = [sampled_sents[0]]\n\n\t\tconversations = []\n\t\tconv = conversation_lib.default_conversation.copy()\n\t\ti = 0\n\t\twhile i < len(sampled_sents):\n\t\t\tconv.messages = []\n\t\t\ttext = sampled_sents[i].strip()\n\t\t\tif is_sentence:\n\t\t\t\tconv.append_message(\n\t\t\t\t\tconv.roles[0],\n\t\t\t\t\tDEFAULT_IMAGE_TOKEN\n\t\t\t\t\t+ \"\\n {} Please output segmentation mask.\".format(text),\n\t\t\t\t)\n\t\t\t\tconv.append_message(conv.roles[1], \"[LOC].\")\n\t\t\telse:\n\t\t\t\tconv.append_message(\n\t\t\t\t\tconv.roles[0],\n\t\t\t\t\tDEFAULT_IMAGE_TOKEN\n\t\t\t\t\t+ \"\\n Please locate the {} in this image.\".format(\n\t\t\t\t\t\ttext\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t\tconv.append_message(conv.roles[1], \"Sure, [LOC].\")\n\t\t\tconversations.append(conv.get_prompt())\n\t\t\ti += 1\n\n\t\t# preprocess image for clip\n\t\timage_clip = self.clip_image_processor.preprocess(\n\t\t\t\texpand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n\t\toriginal_size = image.shape[:2]\n\n\t\timage = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n\t\tresize = image.shape[:2]\n\n\t\tif self.data_type == \"refer_seg\":\n\t\t\tmasks = []\n\t\t\tbboxes_labels = []\n\t\t\tfor i, ann_id in enumerate(sampled_ann_ids):\n\t\t\t\tann = annotations[ann_id]\n\t\t\t\tcur_bboxes = [ann['bbox']]\n\t\t\t\tcur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n\t\t\t\t# xywh to x1y1x2y2\n\t\t\t\tcur_bboxes[:, 2:] += cur_bboxes[:, :2]\n\t\t\t\tcur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n\t\t\t\tcur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n\t\t\t\tkeep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n\t\t\t\tcur_bboxes = cur_bboxes[keep]\n\t\t\t\tcur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n\t\t\t\tcur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n\t\t\t\tif len(cur_bboxes) == 0:\n\t\t\t\t\treturn self.__getitem__(0)\n\t\t\t\tbboxes_labels.append(cur_bboxes)\n\t\t\t\tif len(ann[\"segmentation\"]) == 0 and sampled_sents[i] != \"\":\n\t\t\t\t\tm = np.zeros((image_info[\"height\"], image_info[\"width\"], 1))\n\t\t\t\telse:\n\t\t\t\t\tif type(ann[\"segmentation\"][0]) == list: # polygon\n\t\t\t\t\t\trle = mask.frPyObjects(\n\t\t\t\t\t\t\tann[\"segmentation\"],\n\t\t\t\t\t\t\timage_info[\"height\"],\n\t\t\t\t\t\t\timage_info[\"width\"],\n\t\t\t\t\t\t)\n\t\t\t\t\telse:\n\t\t\t\t\t\trle = ann[\"segmentation\"]\n\t\t\t\t\t\tfor i in range(len(rle)):\n\t\t\t\t\t\t\tif not isinstance(rle[i][\"counts\"], bytes):\n\t\t\t\t\t\t\t\trle[i][\"counts\"] = rle[i][\"counts\"].encode()\n\t\t\t\t\tm = mask.decode(rle)\n\t\t\t\tm = np.sum(\n\t\t\t\t\tm, axis=2\n\t\t\t\t) # sometimes there are multiple binary map (corresponding to multiple segs)\n\t\t\t\tm = m.astype(np.uint8) # convert to np.uint8\n\t\t\t\tmasks.append(m)\n\t\telse:\n\t\t\tmasks = [mask_json]\n\t\tbboxes_valid = [1]*len(bboxes_labels)\n\t\tmasks_valid = [1]*len(bboxes_labels)\n\t\tmasks = np.stack(masks, axis=0)\n\t\tmasks = torch.from_numpy(masks)\n\t\tlabels = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label\n\t\tinference = True\n\n\t\treturn (\n\t\t\timage_path,\n\t\t\timage,\n\t\t\timage_clip,\n\t\t\tconversations,\n\t\t\tmasks,\n\t\t\tlabels,\n\t\t\tbboxes_labels,\n\t\t\tbboxes_valid,\n\t\t\tmasks_valid,\n\t\t\tresize,\n\t\t\tNone,\n\t\t\tNone,\n\t\t\tinference,\n\t\t)" }, { "identifier": "collate_fn", "path": "VisualSearch/utils/dataset.py", "snippet": "def collate_fn(\n\tbatch, tokenizer=None, conv_type=\"llava_v1\", use_mm_start_end=True, local_rank=-1\n):\n\timage_path_list = []\n\timages_list = []\n\timages_clip_list = []\n\tconversation_list = []\n\tmasks_list = []\n\tlabel_list = []\n\tbboxes_labels_list = []\n\tbboxes_valid_list = []\n\tmasks_valid_list = []\n\tresize_list = []\n\tquestions_list = []\n\tsampled_classes_list = []\n\toffset_list = [0]\n\tcnt = 0\n\tinferences = []\n\tfor (\n\t\timage_path,\n\t\timages,\n\t\timages_clip,\n\t\tconversations,\n\t\tmasks,\n\t\tlabel,\n\t\tbboxes_labels,\n\t\tbboxes_valid,\n\t\tmasks_valid,\n\t\tresize,\n\t\tquestions,\n\t\tsampled_classes,\n\t\tinference,\n\t) in batch:\n\t\timage_path_list.append(image_path)\n\t\timages_list.append(images)\n\t\timages_clip_list.append(images_clip)\n\t\tconversation_list.extend(conversations)\n\t\tlabel_list.append(label)\n\t\tmasks_list.append(masks.float())\n\t\tbboxes_labels_list.extend(bboxes_labels)\n\t\tbboxes_valid_list.extend(bboxes_valid)\n\t\tmasks_valid_list.append(torch.tensor(masks_valid))\n\t\tresize_list.append(resize)\n\t\tquestions_list.append(questions)\n\t\tsampled_classes_list.append(sampled_classes)\n\t\tcnt += len(conversations)\n\t\toffset_list.append(cnt)\n\t\tinferences.append(inference)\n\n\tif use_mm_start_end:\n\t\t# replace <image> token\n\t\tfor i in range(len(conversation_list)):\n\t\t\treplace_token = DEFAULT_IMAGE_TOKEN\n\t\t\treplace_token = (\n\t\t\t\tDEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN\n\t\t\t)\n\t\t\tconversation_list[i] = conversation_list[i].replace(\n\t\t\t\tDEFAULT_IMAGE_TOKEN, replace_token\n\t\t\t)\n\tinput_ids = [\n\t\ttokenizer_image_token(prompt, tokenizer, return_tensors=\"pt\")\n\t\tfor prompt in conversation_list\n\t]\n\tinput_ids = torch.nn.utils.rnn.pad_sequence(\n\t\tinput_ids, batch_first=True, padding_value=tokenizer.pad_token_id\n\t)\n\tattention_masks = input_ids.ne(tokenizer.pad_token_id)\n\n\tfor i in range(len(bboxes_valid_list)):\n\t\tbboxes_valid = bboxes_valid_list[i]\n\t\tattention_mask = attention_masks[i]\n\t\tif not bboxes_valid:\n\t\t\tattention_mask = attention_mask & input_ids[i].ne(tokenizer(\"[LOC]\", add_special_tokens=False).input_ids[0])\n\t\t\tattention_masks[i] = attention_mask\n\n\tconv = conversation_lib.default_conversation.copy()\n\ttargets = input_ids.clone()\n\n\tif conv_type == \"llava_v1\":\n\t\tsep = conv.sep + conv.roles[1] + \": \"\n\telse:\n\t\tsep = \"[/INST] \"\n\tfor conversation, target in zip(conversation_list, targets):\n\t\ttotal_len = int(target.ne(tokenizer.pad_token_id).sum())\n\n\t\trounds = conversation.split(conv.sep2)\n\t\tcur_len = 1\n\t\ttarget[:cur_len] = IGNORE_INDEX\n\t\tfor i, rou in enumerate(rounds):\n\t\t\tif rou == \"\":\n\t\t\t\tbreak\n\n\t\t\tparts = rou.split(sep)\n\t\t\t# if len(parts) != 2:\n\t\t\t# break\n\t\t\tassert len(parts) == 2, (len(parts), rou)\n\t\t\tparts[0] += sep\n\n\t\t\tif DEFAULT_IMAGE_TOKEN in conversation:\n\t\t\t\tround_len = len(tokenizer_image_token(rou, tokenizer))\n\t\t\t\tinstruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2\n\t\t\telse:\n\t\t\t\tround_len = len(tokenizer(rou).input_ids)\n\t\t\t\tinstruction_len = len(tokenizer(parts[0]).input_ids) - 2\n\n\t\t\ttarget[cur_len : cur_len + instruction_len] = IGNORE_INDEX\n\n\t\t\tcur_len += round_len\n\t\ttarget[cur_len:] = IGNORE_INDEX\n\n\t\tif False:\n\t\t\tz = target.clone()\n\t\t\tz = torch.where(z == IGNORE_INDEX, tokenizer.unk_token_id, z)\n\t\t\tif local_rank == 0:\n\t\t\t\tprint(\n\t\t\t\t\t\"conversation: \",\n\t\t\t\t\tconversation,\n\t\t\t\t\t\"tokenizer.decode(z): \",\n\t\t\t\t\ttokenizer.decode(z),\n\t\t\t\t)\n\n\t\tif cur_len < tokenizer.model_max_length:\n\t\t\tassert cur_len == total_len\n\n\tif inferences[0] == False:\n\t\ttruncate_len = tokenizer.model_max_length - 255\n\n\t\tif input_ids.shape[1] > truncate_len:\n\t\t\tinput_ids = input_ids[:, :truncate_len]\n\t\t\ttargets = targets[:, :truncate_len]\n\t\t\tattention_masks = attention_masks[:, :truncate_len]\n\n\treturn {\n\t\t\"image_paths\": image_path_list,\n\t\t\"images\": torch.stack(images_list, dim=0),\n\t\t\"images_clip\": torch.stack(images_clip_list, dim=0),\n\t\t\"input_ids\": input_ids,\n\t\t\"labels\": targets,\n\t\t\"bboxes_labels_list\": bboxes_labels_list,\n\t\t\"bboxes_valid_list\": torch.tensor(bboxes_valid_list),\n\t\t\"masks_valid_list\": masks_valid_list,\n\t\t\"attention_masks\": attention_masks,\n\t\t\"masks_list\": masks_list,\n\t\t\"label_list\": label_list,\n\t\t\"resize_list\": resize_list,\n\t\t\"offset\": torch.LongTensor(offset_list),\n\t\t\"questions_list\": questions_list,\n\t\t\"sampled_classes_list\": sampled_classes_list,\n\t\t\"inference\": inferences[0],\n\t\t\"conversation_list\": conversation_list,\n\t}" }, { "identifier": "DEFAULT_IM_END_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\"" }, { "identifier": "AverageMeter", "path": "VisualSearch/utils/utils.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=\":f\", summary_type=Summary.AVERAGE):\n self.name = name\n self.fmt = fmt\n self.summary_type = summary_type\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def all_reduce(self):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if isinstance(self.sum, np.ndarray):\n total = torch.tensor(\n self.sum.tolist()\n + [\n self.count,\n ],\n dtype=torch.float32,\n device=device,\n )\n else:\n total = torch.tensor(\n [self.sum, self.count], dtype=torch.float32, device=device\n )\n\n dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False)\n if total.shape[0] > 2:\n self.sum, self.count = total[:-1].cpu().numpy(), total[-1].cpu().item()\n else:\n self.sum, self.count = total.tolist()\n self.avg = self.sum / (self.count + 1e-5)\n\n def __str__(self):\n fmtstr = \"{name} {val\" + self.fmt + \"} ({avg\" + self.fmt + \"})\"\n return fmtstr.format(**self.__dict__)\n\n def summary(self):\n fmtstr = \"\"\n if self.summary_type is Summary.NONE:\n fmtstr = \"\"\n elif self.summary_type is Summary.AVERAGE:\n fmtstr = \"{name} {avg:.3f}\"\n elif self.summary_type is Summary.SUM:\n fmtstr = \"{name} {sum:.3f}\"\n elif self.summary_type is Summary.COUNT:\n fmtstr = \"{name} {count:.3f}\"\n else:\n raise ValueError(\"invalid summary type %r\" % self.summary_type)\n\n return fmtstr.format(**self.__dict__)" }, { "identifier": "ProgressMeter", "path": "VisualSearch/utils/utils.py", "snippet": "class ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print(\"\\t\".join(entries))\n\n def display_summary(self):\n entries = [\" *\"]\n entries += [meter.summary() for meter in self.meters]\n print(\" \".join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = \"{:\" + str(num_digits) + \"d}\"\n return \"[\" + fmt + \"/\" + fmt.format(num_batches) + \"]\"" }, { "identifier": "Summary", "path": "VisualSearch/utils/utils.py", "snippet": "class Summary(Enum):\n NONE = 0\n AVERAGE = 1\n SUM = 2\n COUNT = 3" }, { "identifier": "dict_to_cuda", "path": "VisualSearch/utils/utils.py", "snippet": "def dict_to_cuda(input_dict):\n for k, v in input_dict.items():\n if isinstance(input_dict[k], torch.Tensor):\n input_dict[k] = v.cuda(non_blocking=True)\n elif (\n isinstance(input_dict[k], list)\n and len(input_dict[k]) > 0\n and isinstance(input_dict[k][0], torch.Tensor)\n ):\n input_dict[k] = [ele.cuda(non_blocking=True) for ele in v]\n return input_dict" }, { "identifier": "intersectionAndUnionGPU", "path": "VisualSearch/utils/utils.py", "snippet": "def intersectionAndUnionGPU(output, target, K, ignore_index=255):\n # 'K' classes, output and target sizes are N or N * L or N * H * W, each value in range 0 to K - 1.\n assert output.dim() in [1, 2, 3]\n assert output.shape == target.shape\n output = output.view(-1)\n target = target.view(-1)\n output[target == ignore_index] = ignore_index\n intersection = output[output == target]\n area_intersection = torch.histc(intersection, bins=K, min=0, max=K - 1)\n area_output = torch.histc(output, bins=K, min=0, max=K - 1)\n area_target = torch.histc(target, bins=K, min=0, max=K - 1)\n area_union = area_output + area_target - area_intersection\n return area_intersection, area_union, area_target" } ]
import argparse import os import shutil import sys import time import deepspeed import torch import tqdm import transformers from functools import partial from peft import LoraConfig, get_peft_model from torch.utils.tensorboard import SummaryWriter from VisualSearch.model.VSM import VSMForCausalLM from VisualSearch.model.llava import conversation as conversation_lib from VisualSearch.utils.dataset import HybridDataset, ValDataset, collate_fn from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, AverageMeter, ProgressMeter, Summary, dict_to_cuda, intersectionAndUnionGPU)
12,750
"visual_projection", "prompt_encoder", "mask_decoder", "vision_tower", "mm_projector", "text_hidden_fcs_seg", "text_hidden_fcs_det", ] ] ) and any([x in name for x in lora_target_modules]) ): lora_module_names.add(name) return sorted(list(lora_module_names)) lora_alpha = args.lora_alpha lora_dropout = args.lora_dropout lora_target_modules = find_linear_layers( model, args.lora_target_modules.split(",") ) lora_config = LoraConfig( r=lora_r, lora_alpha=lora_alpha, target_modules=lora_target_modules, lora_dropout=lora_dropout, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, lora_config) model.print_trainable_parameters() model.resize_token_embeddings(len(tokenizer)) # make text_hidden_fcs, mask_decoder, lm_head, embed_tokens trainable for n, p in model.named_parameters(): if any( [ x in n for x in ["lm_head", "embed_tokens", "visual_projection", "prompt_encoder", "mask_decoder", "text_hidden_fcs_seg", "text_hidden_fcs_det", "owlvit.class_head", "owlvit.layer_norm"] ] ): # print("n: ", n, "p.shape: ", p.shape) p.requires_grad = True world_size = torch.cuda.device_count() print('world_size', world_size) args.distributed = world_size > 1 train_dataset = HybridDataset( args.dataset_dir, tokenizer, args.vision_tower, samples_per_epoch=args.batch_size * args.grad_accumulation_steps * args.steps_per_epoch * world_size, precision=args.precision, num_classes_per_sample=args.num_classes_per_sample, exclude_val=args.exclude_val, dataset=args.dataset, sample_rate=[float(x) for x in args.sample_rates.split(",")], general_segdet_data=args.general_segdet_data, general_segdet_sample_rate=[float(x) for x in args.general_segdet_sample_rates.split(",")], refer_seg_data=args.refer_seg_data, vqa_data=args.vqa_data, vqa_sample_rate=[float(x) for x in args.vqa_sample_rates.split(",")], ) if args.no_eval == False: val_dataset = ValDataset( args.dataset_dir, tokenizer, args.vision_tower, args.val_dataset, ) print( f"Training with {len(train_dataset)} examples and validating with {len(val_dataset)} examples." ) ds_config = { "train_micro_batch_size_per_gpu": args.batch_size, "gradient_accumulation_steps": args.grad_accumulation_steps, "optimizer": { "type": "AdamW", "params": { "lr": args.lr, "weight_decay": 0.0, "betas": (args.beta1, args.beta2), }, }, "scheduler": { "type": "WarmupDecayLR", "params": { "total_num_steps": args.epochs * args.steps_per_epoch, "warmup_min_lr": 0, "warmup_max_lr": args.lr, "warmup_num_steps": 100, "warmup_type": "linear", }, }, "fp16": { "enabled": args.precision == "fp16", }, "bf16": { "enabled": args.precision == "bf16", }, "gradient_clipping": 1.0, "zero_optimization": { "stage": 2, "contiguous_gradients": True, "overlap_comm": True, "reduce_scatter": True, "reduce_bucket_size": 5e8, "allgather_bucket_size": 5e8, }, } model_engine, optimizer, train_loader, scheduler = deepspeed.initialize( model=model, model_parameters=model.parameters(), training_data=train_dataset,
def parse_args(args): parser = argparse.ArgumentParser(description="VisualSearch Model Training") parser.add_argument("--local_rank", default=0, type=int, help="node rank") parser.add_argument( "--version", default="LLaVA-7B-v1.1" ) parser.add_argument( "--precision", default="bf16", type=str, choices=["fp32", "bf16", "fp16"], help="precision for training", ) parser.add_argument("--model_max_length", default=512, type=int) parser.add_argument("--lora_r", default=8, type=int) parser.add_argument( "--vision-tower", default="openai/clip-vit-large-patch14", type=str ) parser.add_argument("--load_in_8bit", action="store_true", default=False) parser.add_argument("--load_in_4bit", action="store_true", default=False) parser.add_argument( "--dataset", default="general_segdet||refer_seg||mixed_grounding||vqa", type=str ) parser.add_argument("--sample_rates", default="15,4,4,15", type=str) parser.add_argument( "--general_segdet_data", default="objects365||cocostuff||paco_lvis", type=str, ) parser.add_argument("--general_segdet_sample_rates", default="2,1,1", type=str) parser.add_argument( "--refer_seg_data", default="refclef||refcoco||refcoco+||refcocog", type=str ) parser.add_argument("--vqa_data", default="possible_locations_conv_86k||llava_instruct_80k", type=str) parser.add_argument("--vqa_sample_rates", default="2,1", type=str) parser.add_argument("--val_dataset", default="refcoco|unc|val", type=str) parser.add_argument("--dataset_dir", default="data", type=str) parser.add_argument("--log_base_dir", default="./runs", type=str) parser.add_argument("--exp_name", default="vsm", type=str) parser.add_argument("--epochs", default=40, type=int) parser.add_argument("--steps_per_epoch", default=2500, type=int) parser.add_argument( "--batch_size", default=4, type=int, help="batch size per device per step" ) parser.add_argument( "--grad_accumulation_steps", default=2, type=int, ) parser.add_argument("--val_batch_size", default=1, type=int) parser.add_argument("--workers", default=2, type=int) parser.add_argument("--lr", default=0.0001, type=float) parser.add_argument("--ce_loss_weight", default=1.0, type=float) parser.add_argument("--dice_loss_weight", default=0.5, type=float) parser.add_argument("--bce_loss_weight", default=2.0, type=float) parser.add_argument("--det_loss_weight", default=0.1, type=float) parser.add_argument("--lora_alpha", default=16, type=int) parser.add_argument("--lora_dropout", default=0.05, type=float) parser.add_argument("--lora_target_modules", default="q_proj,v_proj", type=str) parser.add_argument("--explanatory", default=0.1, type=float) parser.add_argument("--beta1", default=0.9, type=float) parser.add_argument("--beta2", default=0.95, type=float) parser.add_argument("--num_classes_per_sample", default=3, type=int) parser.add_argument("--exclude_val", action="store_true", default=False) parser.add_argument("--no_eval", action="store_true", default=False) parser.add_argument("--out_dim", default=512, type=int) parser.add_argument("--weight", type=str) parser.add_argument("--resume", default="", type=str) parser.add_argument("--print_freq", default=1, type=int) parser.add_argument("--start_epoch", default=0, type=int) parser.add_argument("--gradient_checkpointing", action="store_true", default=True) parser.add_argument("--train_mask_decoder", action="store_true", default=True) parser.add_argument("--use_mm_start_end", action="store_true", default=True) parser.add_argument("--auto_resume", action="store_true", default=False) parser.add_argument( "--conv_type", default="llava_v1", type=str, choices=["llava_v1", "llava_llama_2"], ) return parser.parse_args(args) def box_cxcywh_to_xyxy(x): x_c, y_c, w, h = x.unbind(1) b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] return torch.stack(b, dim=1) def iou(bbox1, bbox2): x1 = max(bbox1[0], bbox2[0]) y1 = max(bbox1[1], bbox2[1]) x2 = min(bbox1[2], bbox2[2]) y2 = min(bbox1[3], bbox2[3]) w1 = bbox1[2] - bbox1[0] h1 = bbox1[3] - bbox1[1] w2 = bbox2[2] - bbox2[0] h2 = bbox2[3] - bbox2[1] inter_area = max(0, x2 - x1) * max(0, y2 - y1) return inter_area/(w1*h1+w2*h2-inter_area) def main(args): args = parse_args(args) args.log_dir = os.path.join(args.log_base_dir, args.exp_name) if args.local_rank == 0: os.makedirs(args.log_dir, exist_ok=True) writer = SummaryWriter(args.log_dir) else: writer = None # Create model tokenizer = transformers.AutoTokenizer.from_pretrained( args.version, cache_dir=None, model_max_length=args.model_max_length, padding_side="right", use_fast=False, ) tokenizer.pad_token = tokenizer.unk_token num_added_tokens = tokenizer.add_tokens("[LOC]") args.loc_token_idx = tokenizer("[LOC]", add_special_tokens=False).input_ids[0] if args.use_mm_start_end: tokenizer.add_tokens( [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True ) model_args = { "train_mask_decoder": args.train_mask_decoder, "out_dim": args.out_dim, "ce_loss_weight": args.ce_loss_weight, "dice_loss_weight": args.dice_loss_weight, "bce_loss_weight": args.bce_loss_weight, "det_loss_weight" : args.det_loss_weight, "loc_token_idx": args.loc_token_idx, "vision_tower": args.vision_tower, "use_mm_start_end": args.use_mm_start_end, } torch_dtype = torch.float32 if args.precision == "bf16": torch_dtype = torch.bfloat16 elif args.precision == "fp16": torch_dtype = torch.half model = VSMForCausalLM.from_pretrained( args.version, torch_dtype=torch_dtype, low_cpu_mem_usage=True, **model_args ) model.config.eos_token_id = tokenizer.eos_token_id model.config.bos_token_id = tokenizer.bos_token_id model.config.pad_token_id = tokenizer.pad_token_id model.enable_input_require_grads() model.gradient_checkpointing_enable() model.get_model().initialize_vision_modules(model.get_model().config) vision_tower = model.get_model().get_vision_tower() vision_tower.to(dtype=torch_dtype, device=args.local_rank) model.get_model().initialize_lisa_modules(model.get_model().config) for p in vision_tower.parameters(): p.requires_grad = False for p in model.get_model().mm_projector.parameters(): p.requires_grad = True conversation_lib.default_conversation = conversation_lib.conv_templates[ args.conv_type ] lora_r = args.lora_r if lora_r > 0: def find_linear_layers(model, lora_target_modules): cls = torch.nn.Linear lora_module_names = set() for name, module in model.named_modules(): if ( isinstance(module, cls) and all( [ x not in name for x in [ "owlvit", "visual_projection", "prompt_encoder", "mask_decoder", "vision_tower", "mm_projector", "text_hidden_fcs_seg", "text_hidden_fcs_det", ] ] ) and any([x in name for x in lora_target_modules]) ): lora_module_names.add(name) return sorted(list(lora_module_names)) lora_alpha = args.lora_alpha lora_dropout = args.lora_dropout lora_target_modules = find_linear_layers( model, args.lora_target_modules.split(",") ) lora_config = LoraConfig( r=lora_r, lora_alpha=lora_alpha, target_modules=lora_target_modules, lora_dropout=lora_dropout, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, lora_config) model.print_trainable_parameters() model.resize_token_embeddings(len(tokenizer)) # make text_hidden_fcs, mask_decoder, lm_head, embed_tokens trainable for n, p in model.named_parameters(): if any( [ x in n for x in ["lm_head", "embed_tokens", "visual_projection", "prompt_encoder", "mask_decoder", "text_hidden_fcs_seg", "text_hidden_fcs_det", "owlvit.class_head", "owlvit.layer_norm"] ] ): # print("n: ", n, "p.shape: ", p.shape) p.requires_grad = True world_size = torch.cuda.device_count() print('world_size', world_size) args.distributed = world_size > 1 train_dataset = HybridDataset( args.dataset_dir, tokenizer, args.vision_tower, samples_per_epoch=args.batch_size * args.grad_accumulation_steps * args.steps_per_epoch * world_size, precision=args.precision, num_classes_per_sample=args.num_classes_per_sample, exclude_val=args.exclude_val, dataset=args.dataset, sample_rate=[float(x) for x in args.sample_rates.split(",")], general_segdet_data=args.general_segdet_data, general_segdet_sample_rate=[float(x) for x in args.general_segdet_sample_rates.split(",")], refer_seg_data=args.refer_seg_data, vqa_data=args.vqa_data, vqa_sample_rate=[float(x) for x in args.vqa_sample_rates.split(",")], ) if args.no_eval == False: val_dataset = ValDataset( args.dataset_dir, tokenizer, args.vision_tower, args.val_dataset, ) print( f"Training with {len(train_dataset)} examples and validating with {len(val_dataset)} examples." ) ds_config = { "train_micro_batch_size_per_gpu": args.batch_size, "gradient_accumulation_steps": args.grad_accumulation_steps, "optimizer": { "type": "AdamW", "params": { "lr": args.lr, "weight_decay": 0.0, "betas": (args.beta1, args.beta2), }, }, "scheduler": { "type": "WarmupDecayLR", "params": { "total_num_steps": args.epochs * args.steps_per_epoch, "warmup_min_lr": 0, "warmup_max_lr": args.lr, "warmup_num_steps": 100, "warmup_type": "linear", }, }, "fp16": { "enabled": args.precision == "fp16", }, "bf16": { "enabled": args.precision == "bf16", }, "gradient_clipping": 1.0, "zero_optimization": { "stage": 2, "contiguous_gradients": True, "overlap_comm": True, "reduce_scatter": True, "reduce_bucket_size": 5e8, "allgather_bucket_size": 5e8, }, } model_engine, optimizer, train_loader, scheduler = deepspeed.initialize( model=model, model_parameters=model.parameters(), training_data=train_dataset,
collate_fn=partial(
4
2023-12-15 14:58:24+00:00
16k
foocker/Bert-VITS2-Faster
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_spk: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 300)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, en_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n return (phones, spec, wav, sid, tone, language, bert, ja_bert, en_bert)\n\n def get_audio(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert_ori = torch.load(bert_path)\n assert bert_ori.shape[-1] == len(phone)\n except Exception as e:\n logger.warning(\"Bert load Failed\")\n logger.warning(e)\n\n if language_str == \"ZH\":\n bert = bert_ori\n ja_bert = torch.zeros(1024, len(phone))\n en_bert = torch.zeros(1024, len(phone))\n elif language_str == \"JP\":\n bert = torch.zeros(1024, len(phone))\n ja_bert = bert_ori\n en_bert = torch.zeros(1024, len(phone))\n elif language_str == \"EN\":\n bert = torch.zeros(1024, len(phone))\n ja_bert = torch.zeros(1024, len(phone))\n en_bert = bert_ori\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, en_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n en_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n en_bert_padded.zero_()\n\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n en_bert = row[8]\n en_bert_padded[i, :, : en_bert.size(1)] = en_bert\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n en_bert_padded,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=4,\n flow_share_parameter=False,\n use_transformer_flow=True,\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(\n self,\n x,\n x_lengths,\n y,\n y_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n emo=None,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, sid, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n # torch.save(self.enc_p.state_dict(), 'enc_p.pth')\n logw = self.sdp(x, x_mask, g=g, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n # torch.save(self.sdp.state_dict(), 'sdp.pth')\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n # y_lenghts 变更了\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n \n def infer_export(\n self,\n path,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None):\n \n x_cp = torch.LongTensor(x.clone().cpu())\n x_lengths_cp = torch.LongTensor(x_lengths.clone().cpu())\n sid_cp = torch.LongTensor(sid.clone().cpu())\n tone_cp = torch.LongTensor(tone.clone().cpu())\n language_cp = torch.LongTensor(language.clone().cpu())\n bert_cp = bert.clone().cpu()\n ja_bert_cp = ja_bert.clone().cpu()\n en_bert_cp = en_bert.clone().cpu()\n \n exported_onnx_dir = \"onnx_exports\"\n if not os.path.exists(f'{exported_onnx_dir}/{path}'):\n os.makedirs(f'{exported_onnx_dir}/{path}', exist_ok=True)\n print(f'{exported_onnx_dir}/{path}')\n \n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n self.emb_g.cpu()\n torch.onnx.export(\n self.emb_g,\n (sid_cp),\n f\"{exported_onnx_dir}/{path}/emb.onnx\",\n input_names=[\"sid\"],\n output_names=[\"g\"],\n verbose=False,\n opset_version=17,\n \n )\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n self.emb_g.to('cuda')\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n self.enc_p.eval()\n self.enc_p.to('cpu')\n\n torch.onnx.export(\n self.enc_p,\n (x_cp, x_lengths_cp, tone_cp, language_cp, bert_cp, ja_bert_cp, en_bert_cp, g.cpu()),\n f\"{exported_onnx_dir}/{path}/enc.onnx\",\n input_names=[\n \"x\",\n \"x_lengths\",\n \"tone\",\n \"language\",\n \"bert\",\n \"ja_bert\",\n \"en_bert\",\n \"g\",\n ],\n output_names=[\"xout\", \"m_p\", \"logs_p\", \"x_mask\"],\n dynamic_axes={\n \"x\": [1],\n \"x_lengths\": [0],\n \"tone\": [1],\n \"language\": [1],\n \"bert\": [2],\n \"ja_bert\": [2],\n \"en_bert\": [2],\n \"xout\": [2],\n \"m_p\": [2],\n \"logs_p\": [2],\n \"x_mask\": [2],\n },\n verbose=False,\n opset_version=17,\n )\n\n self.enc_p.to('cuda')\n print('start sdp!')\n \n logw = self.sdp(x, x_mask, g=g, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n\n self.sdp.eval()\n self.sdp.to('cpu')\n self.dp.to('cpu')\n\n noise_scale_w = 0.8*torch.ones((1,), dtype=torch.float32)\n \n # \n # sdp_state_dict = self.sdp.state_dict()\n # torch.save(sdp_state_dict, 'sdp_weights.pth')\n \n torch.onnx.export(\n self.sdp,\n (x.cpu(), x_mask.cpu(), g.cpu(), noise_scale_w.cpu()),\n f\"{exported_onnx_dir}/{path}/sdp.onnx\",\n input_names=[\"x\", \"x_mask\", \"g\", \"noise_scale_w\"],\n output_names=[\"logw\"],\n # dynamic_axes={\"x\": [0, 2], \"x_mask\": [0, 2], \"logw\": [0, 2]},\n dynamic_axes={\"x\": [2], \"x_mask\": [2], \"logw\": [2]},\n verbose=False,\n opset_version=17\n )\n torch.onnx.export(\n self.dp,\n (x.cpu(), x_mask.cpu(), g.cpu()),\n f\"{exported_onnx_dir}/{path}/dp.onnx\",\n input_names=[\"x\", \"x_mask\", \"g\"],\n output_names=[\"logw\"],\n # dynamic_axes={\"x\": [0, 2], \"x_mask\": [0, 2], \"logw\": [0, 2]},\n dynamic_axes={\"x\": [2], \"x_mask\": [2], \"logw\": [2]},\n verbose=False,\n opset_version=17,\n )\n \n self.sdp.to('cuda')\n self.dp.to('cuda')\n \n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n \n z = self.flow(z_p, y_mask, g=g, reverse=True)\n self.flow.to(\"cpu\")\n torch.onnx.export(\n self.flow,\n (z_p.cpu(), y_mask.cpu(), g.cpu()),\n f\"{exported_onnx_dir}/{path}/flow.onnx\",\n input_names=[\"z_p\", \"y_mask\", \"g\"],\n output_names=[\"z\"],\n # dynamic_axes={\"z_p\": [0, 2], \"y_mask\": [0, 2], \"z\": [0, 2]},\n dynamic_axes={\"z_p\": [2], \"y_mask\": [2], \"z\": [2]},\n verbose=False,\n opset_version=17,\n )\n self.flow.to(\"cuda\")\n \n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n self.dec.to('cpu')\n z_in = (z * y_mask)[:, :, :max_len]\n torch.onnx.export(\n self.dec,\n (z_in.cpu(), g.cpu()),\n f\"{exported_onnx_dir}/{path}/dec.onnx\",\n input_names=[\"z_in\", \"g\"],\n output_names=[\"o\"],\n # dynamic_axes={\"z_in\": [0, 2], \"o\": [0, 2]},\n dynamic_axes={\"z_in\": [2], \"o\": [2]},\n verbose=False,\n opset_version=17,\n )\n self.dec.to('cuda')\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import platform import os import torch import torch.distributed as dist import logging import argparse import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import generator_loss, discriminator_loss, feature_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
12,381
# 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=16, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank)
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cudnn.benchmark = True torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 torch.backends.cuda.enable_math_sdp(True) global_step = 0 def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) # 多卡训练设置 backend = "nccl" if platform.system() == "Windows": backend = "gloo" dist.init_process_group( backend=backend, init_method="env://", # If Windows,switch to gloo backend. ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=16, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank)
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank)
5
2023-12-18 09:53:41+00:00
16k
sinoyou/nelf-pro
nerfstudio/data/datamanagers/base_datamanager.py
[ { "identifier": "CameraOptimizerConfig", "path": "nerfstudio/cameras/camera_optimizers.py", "snippet": "class CameraOptimizerConfig(cfg.InstantiateConfig):\n \"\"\"Configuration of optimization for camera poses.\"\"\"\n\n _target: Type = field(default_factory=lambda: CameraOptimizer)\n\n mode: Literal[\"off\", \"SO3xR3\", \"SE3\"] = \"off\"\n \"\"\"Pose optimization strategy to use. If enabled, we recommend SO3xR3.\"\"\"\n\n position_noise_std: float = 0.0\n \"\"\"Noise to add to initial positions. Useful for debugging.\"\"\"\n\n orientation_noise_std: float = 0.0\n \"\"\"Noise to add to initial orientations. Useful for debugging.\"\"\"\n\n optimizer: AdamOptimizerConfig = AdamOptimizerConfig(lr=6e-4, eps=1e-15)\n \"\"\"ADAM parameters for camera optimization.\"\"\"\n\n scheduler: SchedulerConfig = SchedulerConfig(max_steps=10000)\n \"\"\"Learning rate scheduler for camera optimizer..\"\"\"\n\n param_group: tyro.conf.Suppress[str] = \"camera_opt\"\n \"\"\"Name of the parameter group used for pose optimization. Can be any string that doesn't conflict with other\n groups.\"\"\"" }, { "identifier": "CameraType", "path": "nerfstudio/cameras/cameras.py", "snippet": "class CameraType(Enum):\n \"\"\"Supported camera types.\"\"\"\n\n PERSPECTIVE = auto()\n FISHEYE = auto()\n EQUIRECTANGULAR = auto()" }, { "identifier": "RayBundle", "path": "nerfstudio/cameras/rays.py", "snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions: TensorType[..., 3]\n \"\"\"Unit ray direction vector\"\"\"\n pixel_area: TensorType[..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin\"\"\"\n directions_norm: Optional[TensorType[..., 1]] = None\n \"\"\"Norm of ray direction vector before normalization\"\"\"\n camera_indices: Optional[TensorType[..., 1]] = None\n \"\"\"Camera indices\"\"\"\n nears: Optional[TensorType[..., 1]] = None\n \"\"\"Distance along ray to start sampling\"\"\"\n fars: Optional[TensorType[..., 1]] = None\n \"\"\"Rays Distance along ray to stop sampling\"\"\"\n metadata: Optional[Dict[str, TensorType[\"num_rays\", \"latent_dims\"]]] = None\n \"\"\"Additional metadata or data needed for interpolation, will mimic shape of rays\"\"\"\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n probes: Optional[Probes] = None\n \"\"\"Probe Cameras Object. This object doesn't follow the same shape pattern as the other fields. \n Lazy broadcasting is used for preventing CUDA memory overflow. \"\"\"\n\n def set_camera_indices(self, camera_index: int) -> None:\n \"\"\"Sets all of the the camera indices to a specific camera index.\n\n Args:\n camera_index: Camera index.\n \"\"\"\n self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index\n\n def __len__(self):\n num_rays = torch.numel(self.origins) // self.origins.shape[-1]\n return num_rays\n\n def sample(self, num_rays: int) -> \"RayBundle\":\n \"\"\"Returns a RayBundle as a subset of rays.\n\n Args:\n num_rays: Number of rays in output RayBundle\n\n Returns:\n RayBundle with subset of rays.\n \"\"\"\n assert num_rays <= len(self)\n indices = random.sample(range(len(self)), k=num_rays)\n return self[indices]\n\n def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> \"RayBundle\":\n \"\"\"Flattens RayBundle and extracts chunk given start and end indicies.\n\n Args:\n start_idx: Start index of RayBundle chunk.\n end_idx: End index of RayBundle chunk.\n\n Returns:\n Flattened RayBundle with end_idx-start_idx rays.\n\n \"\"\"\n return self.flatten()[start_idx:end_idx]\n\n def get_ray_samples(\n self,\n bin_starts: TensorType[\"bs\":..., \"num_samples\", 1],\n bin_ends: TensorType[\"bs\":..., \"num_samples\", 1],\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_to_euclidean_fn: Optional[Callable] = None,\n ) -> RaySamples:\n \"\"\"Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.\n\n Args:\n bin_starts: Distance from origin to start of bin. (in Euclidean space)\n bin_ends: Distance from origin to end of bin. (in Euclidean space)\n spacing_starts: start point in normalized space. [0, 1]\n spacing_ends: end point in normalized space. [0, 1]\n\n Returns:\n Samples projected along ray.\n \"\"\"\n deltas = bin_ends - bin_starts\n if self.camera_indices is not None:\n camera_indices = self.camera_indices[..., None]\n else:\n camera_indices = None\n\n shaped_raybundle_fields = self[..., None]\n\n frustums = Frustums(\n origins=shaped_raybundle_fields.origins, # [..., 1, 3]\n directions=shaped_raybundle_fields.directions, # [..., 1, 3]\n starts=bin_starts, # [..., num_samples, 1]\n ends=bin_ends, # [..., num_samples, 1]\n pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]\n )\n\n ray_samples = RaySamples(\n frustums=frustums,\n camera_indices=camera_indices, # [..., 1, 1]\n deltas=deltas, # [..., num_samples, 1]\n spacing_starts=spacing_starts, # [..., num_samples, 1]\n spacing_ends=spacing_ends, # [..., num_samples, 1]\n spacing_to_euclidean_fn=spacing_to_euclidean_fn,\n metadata=shaped_raybundle_fields.metadata,\n times=None if self.times is None else self.times[..., None], # [..., 1, 1]\n probes=self.probes, # special class, not following the same shape pattern\n )\n\n return ray_samples" }, { "identifier": "InstantiateConfig", "path": "nerfstudio/configs/base_config.py", "snippet": "class InstantiateConfig(PrintableConfig): # pylint: disable=too-few-public-methods\n \"\"\"Config class for instantiating an the class specified in the _target attribute.\"\"\"\n\n _target: Type\n\n def setup(self, **kwargs) -> Any:\n \"\"\"Returns the instantiated object using the config.\"\"\"\n return self._target(self, **kwargs)" }, { "identifier": "NeLFProDataParserConfig", "path": "nerfstudio/data/dataparsers/nelfpro_dataparser.py", "snippet": "class NeLFProDataParserConfig(DataParserConfig):\n \"\"\"Configuration for the SpherRiFDataParser.\"\"\"\n\n _target: Type = field(default_factory=lambda: NeLFProDataParser)\n \n # raw dataset loader config\n raw_loader: Literal[\"llff\", \"kitti360\", \"bungee\", \"nerfstudio\"] = \"llff\"\n data: Path = Path(\"./data/please_fill_in_the_path_to_your_raw_dataset\")\n eval_interval: int = 8\n eval_type: Literal[\"dev\"] = \"dev\"\n\n # camera pose config\n scale_factor: float = 1.0\n downscale_factor: Optional[int] = None\n scenebox_scale: int = 1.0\n orientation_method: Literal[\"none\", \"up\", \"pca\"] = \"up\"\n center_poses: bool = True\n auto_scale_poses: bool = True\n\n # probe generation config\n data_num_core: int = 3\n data_num_basis: int = 64\n use_kmeans_core: bool = True\n use_fps_basis: bool = True\n factor_pos_noise_scale: float = 0.02\n\n # point cloud config\n point_cloud_sample_num: int = -1" }, { "identifier": "GeneralizedDataset", "path": "nerfstudio/data/datasets/base_dataset.py", "snippet": "class GeneralizedDataset(InputDataset):\n \"\"\"Dataset that returns images, possibly of different sizes.\n\n The only thing that separates this from the inputdataset is that this will return\n image / mask tensors inside a list, meaning when collate receives the images, it will\n simply concatenate the lists together. The concatenation of images of different sizes would\n fail otherwise.\n\n Args:\n dataparser_outputs: description of where and how to read input images.\n \"\"\"\n\n def __init__(self, dataparser_outputs: DataparserOutputs, scale_factor: float = 1.0):\n super().__init__(dataparser_outputs, scale_factor)\n\n h = None\n w = None\n all_hw_same = True\n for filename in track(\n self._dataparser_outputs.image_filenames, transient=True, description=\"Checking image sizes\"\n ):\n image = Image.open(filename)\n if h is None:\n h = image.height\n w = image.width\n\n if image.height != h or image.width != w:\n all_hw_same = False\n break\n\n self.all_hw_same = all_hw_same\n\n def get_data(self, image_idx: int) -> Dict:\n \"\"\"Returns the ImageDataset data as a dictionary.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n # If all images are the same size, we can just return the image and mask tensors in a regular way\n if self.all_hw_same:\n return super().get_data(image_idx)\n\n # Otherwise return them in a custom struct\n image = self.get_image(image_idx)\n data = {\"image_idx\": image_idx, 'image_filename': self._dataparser_outputs.image_filenames[image_idx].name}\n data[\"image\"] = BasicImages([image])\n for _, data_func_dict in self._dataparser_outputs.additional_inputs.items():\n assert \"func\" in data_func_dict, \"Missing function to process data: specify `func` in `additional_inputs`\"\n func = data_func_dict[\"func\"]\n assert \"kwargs\" in data_func_dict, \"No data to process: specify `kwargs` in `additional_inputs`\"\n data.update(func(image_idx, **data_func_dict[\"kwargs\"]))\n if self.has_masks:\n mask_filepath = self._dataparser_outputs.mask_filenames[image_idx]\n data[\"mask\"] = BasicImages([get_image_mask_tensor_from_path(filepath=mask_filepath)])\n metadata = self.get_metadata(data)\n data.update(metadata)\n return data" }, { "identifier": "InputDataset", "path": "nerfstudio/data/datasets/base_dataset.py", "snippet": "class InputDataset(Dataset):\n \"\"\"Dataset that returns images.\n\n Args:\n dataparser_outputs: description of where and how to read input images.\n scale_factor: The scaling factor for the dataparser outputs\n \"\"\"\n\n def __init__(self, dataparser_outputs: DataparserOutputs, scale_factor: float = 1.0):\n super().__init__()\n self._dataparser_outputs = dataparser_outputs\n self.has_masks = dataparser_outputs.mask_filenames is not None\n self.scale_factor = scale_factor\n self.scene_box = deepcopy(dataparser_outputs.scene_box)\n self.metadata = deepcopy(dataparser_outputs.metadata)\n self.cameras = deepcopy(dataparser_outputs.cameras)\n self.cameras.rescale_output_resolution(scaling_factor=scale_factor)\n self.image_cache = {}\n\n def __len__(self):\n return len(self._dataparser_outputs.image_filenames)\n\n def get_numpy_image(self, image_idx: int) -> npt.NDArray[np.uint8]:\n \"\"\"Returns the image of shape (H, W, 3 or 4).\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n image_filename = self._dataparser_outputs.image_filenames[image_idx]\n pil_image = Image.open(image_filename)\n if self.scale_factor != 1.0:\n width, height = pil_image.size\n newsize = (int(width * self.scale_factor), int(height * self.scale_factor))\n pil_image = pil_image.resize(newsize, resample=Image.BILINEAR)\n image = np.array(pil_image, dtype=\"uint8\") # shape is (h, w, 3 or 4)\n # mask_filename = str(image_filename).replace(\"dense/images\", \"masks\").replace(\".jpg\", \".npy\")\n # mask = np.load(mask_filename)\n # image = image * mask[..., None]\n\n assert len(image.shape) == 3\n assert image.dtype == np.uint8\n assert image.shape[2] in [3, 4], f\"Image shape of {image.shape} is in correct.\"\n return image\n\n def get_image(self, image_idx: int) -> TensorType[\"image_height\", \"image_width\", \"num_channels\"]:\n \"\"\"Returns a 3 channel image.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n image = torch.from_numpy(self.get_numpy_image(image_idx).astype(\"float32\") / 255.0)\n if self._dataparser_outputs.alpha_color is not None and image.shape[-1] == 4:\n assert image.shape[-1] == 4\n image = image[:, :, :3] * image[:, :, -1:] + self._dataparser_outputs.alpha_color * (1.0 - image[:, :, -1:])\n else:\n image = image[:, :, :3]\n return image\n\n def get_data(self, image_idx: int) -> Dict:\n \"\"\"Returns the ImageDataset data as a dictionary.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n if image_idx in self.image_cache:\n image = self.image_cache[image_idx]\n else:\n image = self.get_image(image_idx)\n self.image_cache[image_idx] = image\n\n data = {\"image_idx\": image_idx, 'image_filename': self._dataparser_outputs.image_filenames[image_idx].name}\n data[\"image\"] = image\n for _, data_func_dict in self._dataparser_outputs.additional_inputs.items():\n assert \"func\" in data_func_dict, \"Missing function to process data: specify `func` in `additional_inputs`\"\n func = data_func_dict[\"func\"]\n assert \"kwargs\" in data_func_dict, \"No data to process: specify `kwargs` in `additional_inputs`\"\n data.update(func(image_idx, **data_func_dict[\"kwargs\"]))\n if self.has_masks:\n mask_filepath = self._dataparser_outputs.mask_filenames[image_idx]\n data[\"mask\"] = get_image_mask_tensor_from_path(filepath=mask_filepath, scale_factor=self.scale_factor)\n metadata = self.get_metadata(data)\n data.update(metadata)\n return data\n\n # pylint: disable=no-self-use\n def get_metadata(self, data: Dict) -> Dict:\n \"\"\"Method that can be used to process any additional metadata that may be part of the model inputs.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n del data\n return {}\n\n def __getitem__(self, image_idx: int) -> Dict:\n data = self.get_data(image_idx)\n return data" }, { "identifier": "EquirectangularPixelSampler", "path": "nerfstudio/data/pixel_samplers.py", "snippet": "class EquirectangularPixelSampler(PixelSampler): # pylint: disable=too-few-public-methods\n \"\"\"Samples 'pixel_batch's from 'image_batch's. Assumes images are\n equirectangular and the sampling is done uniformly on the sphere.\n\n Args:\n num_rays_per_batch: number of rays to sample per batch\n keep_full_image: whether or not to include a reference to the full image in returned batch\n \"\"\"\n\n # overrides base method\n def sample(self, image_batch: Dict):\n\n pixel_batch = collate_image_dataset_batch_equirectangular(\n image_batch, self.num_rays_per_batch, keep_full_image=self.keep_full_image\n )\n return pixel_batch" }, { "identifier": "PixelSampler", "path": "nerfstudio/data/pixel_samplers.py", "snippet": "class PixelSampler: # pylint: disable=too-few-public-methods\n \"\"\"Samples 'pixel_batch's from 'image_batch's.\n\n Args:\n num_rays_per_batch: number of rays to sample per batch\n keep_full_image: whether or not to include a reference to the full image in returned batch\n \"\"\"\n\n def __init__(self, num_rays_per_batch: int, keep_full_image: bool = False) -> None:\n self.num_rays_per_batch = num_rays_per_batch\n self.keep_full_image = keep_full_image\n\n def set_num_rays_per_batch(self, num_rays_per_batch: int):\n \"\"\"Set the number of rays to sample per batch.\n\n Args:\n num_rays_per_batch: number of rays to sample per batch\n \"\"\"\n self.num_rays_per_batch = num_rays_per_batch\n\n def sample(self, image_batch: Dict):\n \"\"\"Sample an image batch and return a pixel batch.\n\n Args:\n image_batch: batch of images to sample from\n \"\"\"\n if isinstance(image_batch[\"image\"], list):\n image_batch = dict(image_batch.items()) # copy the dictioary so we don't modify the original\n pixel_batch = collate_image_dataset_batch_list(\n image_batch, self.num_rays_per_batch, keep_full_image=self.keep_full_image\n )\n elif isinstance(image_batch[\"image\"], BasicImages):\n image_batch = dict(image_batch.items()) # copy the dictioary so we don't modify the original\n image_batch[\"image\"] = image_batch[\"image\"].images\n if \"mask\" in image_batch:\n image_batch[\"mask\"] = image_batch[\"mask\"].images\n # TODO clean up\n if \"fg_mask\" in image_batch:\n image_batch[\"fg_mask\"] = image_batch[\"fg_mask\"].images\n if \"sparse_pts\" in image_batch:\n image_batch[\"sparse_pts\"] = image_batch[\"sparse_pts\"].images\n pixel_batch = collate_image_dataset_batch_list(\n image_batch, self.num_rays_per_batch, keep_full_image=self.keep_full_image\n )\n elif isinstance(image_batch[\"image\"], torch.Tensor):\n pixel_batch = collate_image_dataset_batch(\n image_batch, self.num_rays_per_batch, keep_full_image=self.keep_full_image\n )\n else:\n raise ValueError(\"image_batch['image'] must be a list or torch.Tensor\")\n return pixel_batch" }, { "identifier": "CacheDataloader", "path": "nerfstudio/data/utils/dataloaders.py", "snippet": "class CacheDataloader(DataLoader):\n \"\"\"Collated image dataset that implements caching of default-pytorch-collatable data.\n Creates batches of the InputDataset return type.\n\n Args:\n dataset: Dataset to sample from.\n num_samples_to_collate: How many images to sample rays for each batch. -1 for all images.\n num_times_to_repeat_images: How often to collate new images. -1 to never pick new images.\n device: Device to perform computation.\n collate_fn: The function we will use to collate our training data\n \"\"\"\n\n def __init__(\n self,\n dataset: Dataset,\n num_images_to_sample_from: int = -1,\n num_times_to_repeat_images: int = -1,\n device: Union[torch.device, str] = \"cpu\",\n collate_fn=nerfstudio_collate,\n **kwargs,\n ):\n self.dataset = dataset\n super().__init__(dataset=dataset, **kwargs) # This will set self.dataset\n self.num_times_to_repeat_images = num_times_to_repeat_images\n self.cache_all_images = (num_images_to_sample_from == -1) or (num_images_to_sample_from >= len(self.dataset))\n self.num_images_to_sample_from = len(self.dataset) if self.cache_all_images else num_images_to_sample_from\n self.device = device\n self.collate_fn = collate_fn\n self.num_workers = kwargs.get(\"num_workers\", 0)\n\n self.num_repeated = self.num_times_to_repeat_images # starting value\n self.first_time = True\n\n self.cached_collated_batch = None\n if self.cache_all_images:\n CONSOLE.print(f\"Caching all {len(self.dataset)} images.\")\n if len(self.dataset) > 500:\n CONSOLE.print(\n \"[bold yellow]Warning: If you run out of memory, try reducing the number of images to sample from.\"\n )\n self.cached_collated_batch = self._get_collated_batch()\n elif self.num_times_to_repeat_images == -1:\n CONSOLE.print(\n f\"Caching {self.num_images_to_sample_from} out of {len(self.dataset)} images, without resampling.\"\n )\n else:\n CONSOLE.print(\n f\"Caching {self.num_images_to_sample_from} out of {len(self.dataset)} images, \"\n f\"resampling every {self.num_times_to_repeat_images} iters.\"\n )\n\n def __getitem__(self, idx):\n return self.dataset.__getitem__(idx)\n\n def _get_batch_list(self):\n \"\"\"Returns a list of batches from the dataset attribute.\"\"\"\n\n # todo: to enable local image caching, the samples indices should be consecutive rather than random. \n # each time when we switches the batch size, we should print out the image name lists. (in a sorting manner)\n # indices = random.sample(range(len(self.dataset)), k=self.num_images_to_sample_from)\n \n # consecutive sampling\n start_indices = random.sample(range(len(self.dataset)), 1)\n indices_circle_list = list(range(len(self.dataset))) + list(range(len(self.dataset)))\n indices = indices_circle_list[start_indices[0]:start_indices[0]+self.num_images_to_sample_from]\n random.shuffle(indices)\n\n # start_or_end_indices = random.sample(range(len(self.dataset) - self.num_images_to_sample_from + 1), 1)[0]\n # indices_list = list(range(len(self.dataset)))\n # indices = indices_list[start_or_end_indices:start_or_end_indices+self.num_images_to_sample_from]\n # # random.shuffle(indices)\n\n batch_list = []\n results = []\n\n num_threads = int(self.num_workers) * 4\n num_threads = min(num_threads, multiprocessing.cpu_count() - 1)\n num_threads = max(num_threads, 1)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:\n for idx in indices:\n res = executor.submit(self.dataset.__getitem__, idx)\n results.append(res)\n\n for res in track(\n results, description=\"Loading data batch\", transient=True, disable=(self.num_images_to_sample_from == 1)\n ):\n batch_list.append(res.result())\n\n # print out filenames\n cached_image_filenames = [batch[\"image_filename\"] for batch in batch_list]\n sorted_cached_image_filenames = sorted(cached_image_filenames)\n CONSOLE.print(f\"New Loaded Image filenames: {sorted_cached_image_filenames}\")\n\n return batch_list\n\n def _get_collated_batch(self):\n \"\"\"Returns a collated batch.\"\"\"\n batch_list = self._get_batch_list()\n collated_batch = self.collate_fn(batch_list)\n collated_batch = get_dict_to_torch(collated_batch, device=self.device, exclude=[\"image\"])\n return collated_batch\n\n def __iter__(self):\n while True:\n if self.cache_all_images:\n collated_batch = self.cached_collated_batch\n elif self.first_time or (\n self.num_times_to_repeat_images != -1 and self.num_repeated >= self.num_times_to_repeat_images\n ):\n # trigger a reset\n self.num_repeated = 0\n collated_batch = self._get_collated_batch()\n # possibly save a cached item\n self.cached_collated_batch = collated_batch if self.num_times_to_repeat_images != 0 else None\n self.first_time = False\n else:\n collated_batch = self.cached_collated_batch\n self.num_repeated += 1\n yield collated_batch" }, { "identifier": "FixedIndicesEvalDataloader", "path": "nerfstudio/data/utils/dataloaders.py", "snippet": "class FixedIndicesEvalDataloader(EvalDataloader):\n \"\"\"Dataloader that returns a fixed set of indices.\n\n Args:\n input_dataset: InputDataset to load data from\n image_indices: List of image indices to load data from. If None, then use all images.\n device: Device to load data to\n \"\"\"\n\n def __init__(\n self,\n input_dataset: InputDataset,\n image_indices: Optional[Tuple[int]] = None,\n device: Union[torch.device, str] = \"cpu\",\n **kwargs,\n ):\n super().__init__(input_dataset, device, **kwargs)\n if image_indices is None:\n self.image_indices = list(range(len(input_dataset)))\n else:\n self.image_indices = image_indices\n self.count = 0\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count < len(self.image_indices):\n image_idx = self.image_indices[self.count]\n ray_bundle, batch = self.get_data_from_image_idx(image_idx)\n self.count += 1\n return ray_bundle, batch\n raise StopIteration" }, { "identifier": "RandIndicesEvalDataloader", "path": "nerfstudio/data/utils/dataloaders.py", "snippet": "class RandIndicesEvalDataloader(EvalDataloader):\n \"\"\"Dataloader that returns random images.\n\n Args:\n input_dataset: InputDataset to load data from\n device: Device to load data to\n \"\"\"\n\n def __init__(\n self,\n input_dataset: InputDataset,\n device: Union[torch.device, str] = \"cpu\",\n **kwargs,\n ):\n super().__init__(input_dataset, device, **kwargs)\n self.count = 0\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count < 1:\n image_indices = range(self.cameras.size)\n image_idx = random.choice(image_indices)\n ray_bundle, batch = self.get_data_from_image_idx(image_idx)\n self.count += 1\n return ray_bundle, batch\n raise StopIteration" }, { "identifier": "nerfstudio_collate", "path": "nerfstudio/data/utils/nerfstudio_collate.py", "snippet": "def nerfstudio_collate(\n batch, extra_mappings: Union[Dict[type, Callable], None] = None\n): # pylint: disable=too-many-return-statements\n r\"\"\"\n This is the default pytorch collate function, but with support for nerfstudio types. All documentation\n below is copied straight over from pytorch's default_collate function, python version 3.8.13,\n pytorch version '1.12.1+cu113'. Custom nerfstudio types are accounted for at the end, and extra\n mappings can be passed in to handle custom types. These mappings are from types: callable (types\n being like int or float or the return value of type(3.), etc). The only code before we parse for custom types that\n was changed from default pytorch was the addition of the extra_mappings argument, a find and replace operation\n from default_collate to nerfstudio_collate, and the addition of the nerfstudio_collate_err_msg_format variable.\n\n\n Function that takes in a batch of data and puts the elements within the batch\n into a tensor with an additional outer dimension - batch size. The exact output type can be\n a :class:`torch.Tensor`, a `Sequence` of :class:`torch.Tensor`, a\n Collection of :class:`torch.Tensor`, or left unchanged, depending on the input type.\n This is used as the default function for collation when\n `batch_size` or `batch_sampler` is defined in :class:`~torch.utils.data.DataLoader`.\n\n Here is the general input type (based on the type of the element within the batch) to output type mapping:\n\n * :class:`torch.Tensor` -> :class:`torch.Tensor` (with an added outer dimension batch size)\n * NumPy Arrays -> :class:`torch.Tensor`\n * `float` -> :class:`torch.Tensor`\n * `int` -> :class:`torch.Tensor`\n * `str` -> `str` (unchanged)\n * `bytes` -> `bytes` (unchanged)\n * `Mapping[K, V_i]` -> `Mapping[K, nerfstudio_collate([V_1, V_2, ...])]`\n * `NamedTuple[V1_i, V2_i, ...]` -> `NamedTuple[nerfstudio_collate([V1_1, V1_2, ...]),\n nerfstudio_collate([V2_1, V2_2, ...]), ...]`\n * `Sequence[V1_i, V2_i, ...]` -> `Sequence[nerfstudio_collate([V1_1, V1_2, ...]),\n nerfstudio_collate([V2_1, V2_2, ...]), ...]`\n\n Args:\n batch: a single batch to be collated\n\n Examples:\n >>> # Example with a batch of `int`s:\n >>> nerfstudio_collate([0, 1, 2, 3])\n tensor([0, 1, 2, 3])\n >>> # Example with a batch of `str`s:\n >>> nerfstudio_collate(['a', 'b', 'c'])\n ['a', 'b', 'c']\n >>> # Example with `Map` inside the batch:\n >>> nerfstudio_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}])\n {'A': tensor([ 0, 100]), 'B': tensor([ 1, 100])}\n >>> # Example with `NamedTuple` inside the batch:\n >>> Point = namedtuple('Point', ['x', 'y'])\n >>> nerfstudio_collate([Point(0, 0), Point(1, 1)])\n Point(x=tensor([0, 1]), y=tensor([0, 1]))\n >>> # Example with `Tuple` inside the batch:\n >>> nerfstudio_collate([(0, 1), (2, 3)])\n [tensor([0, 2]), tensor([1, 3])]\n >>> # Example with `List` inside the batch:\n >>> nerfstudio_collate([[0, 1], [2, 3]])\n [tensor([0, 2]), tensor([1, 3])]\n \"\"\"\n if extra_mappings is None:\n extra_mappings = {}\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor): # pylint: disable=no-else-return\n out = None\n if torch.utils.data.get_worker_info() is not None:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum(x.numel() for x in batch)\n storage = elem.storage()._new_shared(numel, device=elem.device) # pylint: disable=protected-access\n out = elem.new(storage).resize_(len(batch), *list(elem.size()))\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == \"numpy\" and elem_type.__name__ != \"str_\" and elem_type.__name__ != \"string_\":\n # pylint: disable=no-else-return, consider-using-in\n if elem_type.__name__ == \"ndarray\" or elem_type.__name__ == \"memmap\":\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(NERFSTUDIO_COLLATE_ERR_MSG_FORMAT.format(elem.dtype))\n\n return nerfstudio_collate([torch.as_tensor(b) for b in batch], extra_mappings=extra_mappings)\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, collections.abc.Mapping):\n try:\n return elem_type(\n {key: nerfstudio_collate([d[key] for d in batch], extra_mappings=extra_mappings) for key in elem}\n )\n except TypeError:\n # The mapping type may not support `__init__(iterable)`.\n return {key: nerfstudio_collate([d[key] for d in batch], extra_mappings=extra_mappings) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, \"_fields\"): # namedtuple\n return elem_type(*(nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in zip(*batch)))\n elif isinstance(elem, collections.abc.Sequence):\n # check to make sure that the elements in batch have consistent size\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError(\"each element in list of batch should be of equal size\")\n transposed = list(zip(*batch)) # It may be accessed twice, so we use a list.\n\n if isinstance(elem, tuple):\n return [\n nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed\n ] # Backwards compatibility.\n else:\n try:\n return elem_type([nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed])\n except TypeError:\n # The sequence type may not support `__init__(iterable)` (e.g., `range`).\n return [nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed]\n\n # NerfStudio types supported below\n\n elif isinstance(elem, Cameras):\n # If a camera, just concatenate along the batch dimension. In the future, this may change to stacking\n assert all((isinstance(cam, Cameras) for cam in batch))\n assert all((cam.distortion_params is None for cam in batch)) or all(\n (cam.distortion_params is not None for cam in batch)\n ), \"All cameras must have distortion parameters or none of them should have distortion parameters.\\\n Generalized batching will be supported in the future.\"\n\n # If no batch dimension exists, then we need to stack everything and create a batch dimension on 0th dim\n if elem.shape == ():\n op = torch.stack\n # If batch dimension exists, then we need to concatenate along the 0th dimension\n else:\n op = torch.cat\n\n return Cameras(\n op([cameras.camera_to_worlds for cameras in batch], dim=0),\n op([cameras.fx for cameras in batch], dim=0),\n op([cameras.fy for cameras in batch], dim=0),\n op([cameras.cx for cameras in batch], dim=0),\n op([cameras.cy for cameras in batch], dim=0),\n height=op([cameras.height for cameras in batch], dim=0),\n width=op([cameras.width for cameras in batch], dim=0),\n distortion_params=op(\n [\n cameras.distortion_params\n if cameras.distortion_params is not None\n else torch.zeros_like(cameras.distortion_params)\n for cameras in batch\n ],\n dim=0,\n ),\n camera_type=op([cameras.camera_type for cameras in batch], dim=0),\n times=torch.stack(\n [cameras.times if cameras.times is not None else -torch.ones_like(cameras.times) for cameras in batch],\n dim=0,\n ),\n )\n\n elif isinstance(elem, BasicImages):\n assert all((isinstance(elem, BasicImages) for elem in batch))\n all_images = []\n for images in batch:\n all_images.extend(images.images)\n return BasicImages(all_images)\n\n for type_key in extra_mappings:\n if isinstance(elem, type_key):\n return extra_mappings[type_key](batch)\n\n raise TypeError(NERFSTUDIO_COLLATE_ERR_MSG_FORMAT.format(elem_type))" }, { "identifier": "TrainingCallback", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallback:\n \"\"\"Callback class used during training.\n The function 'func' with 'args' and 'kwargs' will be called every 'update_every_num_iters' training iterations,\n including at iteration 0. The function is called after the training iteration.\n\n Args:\n where_to_run: List of locations for when to run callbak (before/after iteration)\n func: The function that will be called.\n update_every_num_iters: How often to call the function `func`.\n iters: Tuple of iteration steps to perform callback\n args: args for the function 'func'.\n kwargs: kwargs for the function 'func'.\n \"\"\"\n\n def __init__(\n self,\n where_to_run: List[TrainingCallbackLocation],\n func: Callable,\n update_every_num_iters: Optional[int] = None,\n iters: Optional[Tuple[int, ...]] = None,\n args: Optional[List] = None,\n kwargs: Optional[Dict] = None,\n ):\n assert (\n \"step\" in signature(func).parameters.keys()\n ), f\"'step: int' must be an argument in the callback function 'func': {func.__name__}\"\n self.where_to_run = where_to_run\n self.update_every_num_iters = update_every_num_iters\n self.iters = iters\n self.func = func\n self.args = args if args is not None else []\n self.kwargs = kwargs if kwargs is not None else {}\n\n def run_callback(self, step: int):\n \"\"\"Callback to run after training step\n\n Args:\n step: current iteration step\n \"\"\"\n if self.update_every_num_iters is not None:\n if step % self.update_every_num_iters == 0:\n self.func(*self.args, **self.kwargs, step=step)\n elif self.iters is not None:\n if step in self.iters:\n self.func(*self.args, **self.kwargs, step=step)\n\n def run_callback_at_location(self, step: int, location: TrainingCallbackLocation):\n \"\"\"Runs the callback if it's supposed to be run at the given location.\n\n Args:\n step: current iteration step\n location: when to run callback (before/after iteration)\n \"\"\"\n if location in self.where_to_run:\n self.run_callback(step=step)" }, { "identifier": "TrainingCallbackAttributes", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallbackAttributes:\n \"\"\"Attributes that can be used to configure training callbacks.\n The callbacks can be specified in the Dataloader or Model implementations.\n Instead of providing access to the entire Trainer object, we only provide these attributes.\n This should be least prone to errors and fairly clean from a user perspective.\"\"\"\n\n # TODO(ethan): type this without circular imports\n optimizers: Optional[InitVar]\n \"\"\"optimizers for training\"\"\"\n grad_scaler: Optional[InitVar]\n \"\"\"gradient scalers\"\"\"\n pipeline: Optional[InitVar]\n \"\"\"reference to training pipeline\"\"\"\n config: TrainerConfig\n \"\"\"the trainer config\"\"\"" }, { "identifier": "RayGenerator", "path": "nerfstudio/model_components/ray_generators.py", "snippet": "class RayGenerator(nn.Module):\n \"\"\"torch.nn Module for generating rays.\n This class is the interface between the scene's cameras/camera optimizer and the ray sampler.\n\n Args:\n cameras: Camera objects containing camera info.\n pose_optimizer: pose optimization module, for optimizing noisy camera intrisics/extrinsics.\n \"\"\"\n\n def __init__(self, cameras: Cameras, pose_optimizer: CameraOptimizer) -> None:\n super().__init__()\n self.cameras = cameras\n self.pose_optimizer = pose_optimizer\n self.image_coords = nn.Parameter(cameras.get_image_coords(), requires_grad=False)\n\n def forward(self, ray_indices: TensorType[\"num_rays\", 3]) -> RayBundle:\n \"\"\"Index into the cameras to generate the rays.\n\n Args:\n ray_indices: Contains camera, row, and col indicies for target rays.\n \"\"\"\n c = ray_indices[:, 0] # camera indices\n y = ray_indices[:, 1] # row indices\n x = ray_indices[:, 2] # col indices\n coords = self.image_coords[y, x]\n\n camera_opt_to_camera = self.pose_optimizer(c)\n\n ray_bundle = self.cameras.generate_rays(\n camera_indices=c.unsqueeze(-1),\n coords=coords,\n camera_opt_to_camera=camera_opt_to_camera,\n )\n return ray_bundle" }, { "identifier": "BasicImages", "path": "nerfstudio/utils/images.py", "snippet": "class BasicImages:\n \"\"\"This is a very primitive struct for holding images, especially for when these images\n are of different heights / widths.\n\n The purpose of this is to have a special struct wrapping around a list so that the\n nerfstudio_collate fn and other parts of the code recognise this as a struct to leave alone\n instead of reshaping or concatenating into a single tensor (since this will likely be used\n for cases where we have images of different sizes and shapes).\n\n This only has one batch dimension and will likely be replaced down the line with some\n TensorDataclass alternative that supports arbitrary batches.\n \"\"\"\n\n def __init__(self, images: List):\n assert isinstance(images, List)\n assert not images or isinstance(\n images[0], torch.Tensor\n ), f\"Input should be a list of tensors, not {type(images[0]) if isinstance(images, List) else type(images)}\"\n self.images = images\n\n def to(self, device):\n \"\"\"Move the images to the given device.\"\"\"\n assert isinstance(device, torch.device)\n return BasicImages([image.to(device) for image in self.images])" }, { "identifier": "IterableWrapper", "path": "nerfstudio/utils/misc.py", "snippet": "class IterableWrapper: # pylint: disable=too-few-public-methods\n \"\"\"A helper that will allow an instance of a class to return multiple kinds of iterables bound\n to different functions of that class.\n\n To use this, take an instance of a class. From that class, pass in the <instance>.<new_iter_function>\n and <instance>.<new_next_function> to the IterableWrapper constructor. By passing in the instance's\n functions instead of just the class's functions, the self argument should automatically be accounted\n for.\n\n Args:\n new_iter: function that will be called instead as the __iter__() function\n new_next: function that will be called instead as the __next__() function\n length: length of the iterable. If -1, the iterable will be infinite.\n\n\n Attributes:\n new_iter: object's pointer to the function we are calling for __iter__()\n new_next: object's pointer to the function we are calling for __next__()\n length: length of the iterable. If -1, the iterable will be infinite.\n i: current index of the iterable.\n\n \"\"\"\n\n i: int\n\n def __init__(self, new_iter: Callable, new_next: Callable, length: int = -1):\n self.new_iter = new_iter\n self.new_next = new_next\n self.length = length\n\n def __next__(self):\n if self.length != -1 and self.i >= self.length:\n raise StopIteration\n self.i += 1\n return self.new_next()\n\n def __iter__(self):\n self.new_iter()\n self.i = 0\n return self" } ]
from abc import abstractmethod from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Tuple, Type, Union from rich.progress import Console from torch import nn from torch.nn import Parameter from torch.utils.data import Dataset from torch.utils.data.distributed import DistributedSampler from typing_extensions import Literal from nerfstudio.cameras.camera_optimizers import CameraOptimizerConfig from nerfstudio.cameras.cameras import CameraType from nerfstudio.cameras.rays import RayBundle from nerfstudio.configs.base_config import InstantiateConfig from nerfstudio.data.dataparsers.nelfpro_dataparser import NeLFProDataParserConfig from nerfstudio.data.datasets.base_dataset import GeneralizedDataset, InputDataset from nerfstudio.data.pixel_samplers import EquirectangularPixelSampler, PixelSampler from nerfstudio.data.utils.dataloaders import ( CacheDataloader, FixedIndicesEvalDataloader, RandIndicesEvalDataloader, ) from nerfstudio.data.utils.nerfstudio_collate import nerfstudio_collate from nerfstudio.engine.callbacks import TrainingCallback, TrainingCallbackAttributes from nerfstudio.model_components.ray_generators import RayGenerator from nerfstudio.utils.images import BasicImages from nerfstudio.utils.misc import IterableWrapper import torch import tyro
10,883
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Datamanager. """ from __future__ import annotations CONSOLE = Console(width=120) AnnotatedDataParserUnion = tyro.conf.OmitSubcommandPrefixes[ # Omit prefixes of flags in subcommands. tyro.extras.subcommand_type_from_defaults( { # "placeholder": NeLFProDataParserConfig(), # placeholder for default (not used), make sure ns-train cmd works.
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Datamanager. """ from __future__ import annotations CONSOLE = Console(width=120) AnnotatedDataParserUnion = tyro.conf.OmitSubcommandPrefixes[ # Omit prefixes of flags in subcommands. tyro.extras.subcommand_type_from_defaults( { # "placeholder": NeLFProDataParserConfig(), # placeholder for default (not used), make sure ns-train cmd works.
"nelfpro-data": NeLFProDataParserConfig(),
4
2023-12-15 20:07:22+00:00
16k
MingtaoGuo/AnimateAnyone_unofficial
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
12,462
return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', 'txt', 'vision']: xc = batch[cond_key] xc = rearrange(xc, 'b h w c -> b c h w') elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', 'txt', 'vision']: xc = batch[cond_key] xc = rearrange(xc, 'b h w c -> b c h w') elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
9
2023-12-16 03:31:33+00:00
16k
yasserben/CLOUDS
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "clouds/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = (\n \"MultiScaleMaskedTransformerDecoder\"\n )\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\n \"res3\",\n \"res4\",\n \"res5\",\n ]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75\n\n # Resizing disabled for Synthia\n cfg.INPUT.RESIZE = CN()\n cfg.INPUT.RESIZE.ENABLED = True\n cfg.INPUT.RESIZE.SIZE_TRAIN = (1280, 720)\n\n # Saving Pseudo Labels during test time\n cfg.MODEL.SAVE_PSEUDO_LABELS = False\n\n # for the Dataset repeat factor\n # cfg.DATASETS.TRAIN_REPEAT_FACTOR = [(\"sd_v99\",5.0), (\"cityscapes_train\",1.0)]" }, { "identifier": "add_clouds_config", "path": "clouds/config.py", "snippet": "def add_clouds_config(cfg):\n # CLOUDS model config\n cfg.MODEL.CLOUDS = CN()\n cfg.MODEL.CLOUDS.CLIP_MODEL_NAME = \"convnext_large_d_320\"\n cfg.MODEL.CLOUDS.CLIP_PRETRAINED_WEIGHTS = \"laion2b_s29b_b131k_ft_soup\"\n cfg.MODEL.CLOUDS.EMBED_DIM = 768\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_ALPHA = 0.4\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_BETA = 0.8\n cfg.MODEL.CLOUDS.ENSEMBLE_ON_VALID_MASK = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_EMA = False\n cfg.MODEL.CLOUDS.SAM = CN()\n cfg.MODEL.CLOUDS.SAM.ENABLED = False\n cfg.MODEL.CLOUDS.SAM.MOBILE = True\n cfg.MODEL.CLOUDS.SAM.MINIBATCH = False\n cfg.MODEL.CLOUDS.SAM.SIZE_THRESHOLD = 5000\n cfg.MODEL.CLOUDS.SAM.EROSION = False\n cfg.MODEL.CLOUDS.SAM.EROSION_SIZE = 3\n cfg.MODEL.CLOUDS.SAM.NUM_POINTS = 5\n cfg.MODEL.CLOUDS.SAM.SELECTION_MODE = \"random\"\n cfg.MODEL.CLOUDS.SAM.RM_INTERSECTION = True\n cfg.MODEL.CLOUDS.SAM.REFINEMENT = False\n cfg.MODEL.CLOUDS.SAM.ALPHA_EMA = 0.999\n cfg.MODEL.CLOUDS.OVERWRITING = True\n cfg.MODEL.CLOUDS.ITERATION_UPDATE = 100" }, { "identifier": "add_wandb_config", "path": "clouds/config.py", "snippet": "def add_wandb_config(cfg):\n # Wandb\n cfg.WANDB = CN()\n cfg.WANDB.PROJECT = \"clouds\"\n cfg.WANDB.NAME = None\n # use flash attention\n cfg.MODEL.FLASH = False" }, { "identifier": "add_prerocessing_training_set_config", "path": "clouds/config.py", "snippet": "def add_prerocessing_training_set_config(cfg):\n cfg.INPUT.FLIP = True\n cfg.INPUT.INITIAL_HEIGHT = 1052\n cfg.INPUT.INITIAL_WIDTH = 1914\n cfg.INPUT.RESIZE_HEIGHT = 720\n cfg.INPUT.RESIZE_WIDTH = 1280\n cfg.INPUT.PL_THRESHOLD = 0.0\n\n cfg.DATASETS.SOURCE_FACTOR = 1.0\n cfg.DATASETS.TARGET_FACTOR = 1.0" }, { "identifier": "add_repeat_factors", "path": "clouds/config.py", "snippet": "def add_repeat_factors(cfg):\n # for the Dataset repeat factor\n if (\n len(cfg.DATASETS.TRAIN) == 2\n and cfg.DATALOADER.SAMPLER_TRAIN == \"WeightedTrainingSampler\"\n ):\n if \"sd\" in cfg.DATASETS.TRAIN[0]:\n target_dataset = cfg.DATASETS.TRAIN[0]\n source_dataset = cfg.DATASETS.TRAIN[1]\n else:\n target_dataset = cfg.DATASETS.TRAIN[1]\n source_dataset = cfg.DATASETS.TRAIN[0]\n\n TRAIN_REPEAT_FACTOR = [\n (target_dataset, cfg.DATASETS.TARGET_FACTOR),\n (source_dataset, cfg.DATASETS.SOURCE_FACTOR),\n ]\n cfg.DATASETS.TRAIN_REPEAT_FACTOR = TRAIN_REPEAT_FACTOR\n return cfg\n else:\n return cfg" }, { "identifier": "MapperTrain", "path": "clouds/data/dataset_mappers/mapper_train.py", "snippet": "class MapperTrain:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations_src,\n augmentations_sd,\n augmentations_photo,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens_src = augmentations_src\n self.tfm_gens_sd = augmentations_sd\n self.tfm_gens_photometric = augmentations_photo\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(\n f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations_src}\"\n )\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n augs_src = []\n augs_sd = []\n augs_photometric = []\n # Build augmentation\n if cfg.INPUT.RESIZE.ENABLED:\n augs_src.append(\n T.ResizeScale(\n min_scale=0.5,\n max_scale=2.0,\n target_height=cfg.INPUT.INITIAL_HEIGHT,\n target_width=cfg.INPUT.INITIAL_WIDTH,\n interp=Image.BILINEAR,\n )\n )\n if cfg.INPUT.CROP.ENABLED:\n augs_src.append(\n T.FixedSizeCrop(\n (768, 768),\n pad=True,\n seg_pad_value=255,\n pad_value=0,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs_src.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs_photometric.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n if cfg.INPUT.FLIP:\n augs_src.append(T.RandomFlip())\n augs_sd.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations_src\": augs_src,\n \"augmentations_sd\": augs_sd,\n \"augmentations_photo\": augs_photometric,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert (\n self.is_train\n ), \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\n \"double\"\n )\n else:\n sem_seg_gt = np.full(\n (dataset_dict[\"height\"], dataset_dict[\"width\"]), self.ignore_label\n ).astype(\"double\")\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n if not (\"generated\" in str(dataset_dict[\"image_id\"])):\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_src, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n else:\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_sd, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n aug_input_photo, transforms = T.apply_transform_gens(\n self.tfm_gens_photometric, aug_input\n )\n image_aug = aug_input_photo.image\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = torch.as_tensor(\n np.ascontiguousarray(image_aug.transpose(2, 0, 1))\n )\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = F.pad(image_aug, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(\n sem_seg_gt, padding_size, value=self.ignore_label\n ).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n dataset_dict[\"image_aug\"] = image_aug\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\n \"Semantic segmentation dataset should not have 'annotations'.\"\n )\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros(\n (0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1])\n )\n else:\n masks = BitMasks(\n torch.stack(\n [\n torch.from_numpy(np.ascontiguousarray(x.copy()))\n for x in masks\n ]\n )\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MapperTest", "path": "clouds/data/dataset_mappers/mapper_test.py", "snippet": "class MapperTest:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by the model.\n\n This is the default callable to be used to map your dataset dict into training data.\n You may need to follow it to implement your own one for customized logic,\n such as a different way to read or transform images.\n See :doc:`/tutorials/data_loading` for details.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies cropping/geometric transforms to the image and annotations\n 3. Prepare data and annotations to Tensor and :class:`Instances`\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n # if recompute_boxes:\n # assert use_instance_mask, \"recompute_boxes requires instance masks\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = augmentations\n self.image_format = image_format\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True):\n augs = [T.ResizeShortestEdge(short_edge_length=[1024], sample_style=\"choice\")]\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n\n\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n # USER: Write your own image loading if it's not from a file\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.image_format)\n utils.check_image_size(dataset_dict, image)\n\n # USER: Remove if you don't do semantic/panoptic segmentation.\n if \"sem_seg_file_name\" in dataset_dict:\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\"), \"L\").squeeze(2)\n else:\n sem_seg_gt = None\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transformation = T.apply_transform_gens(self.augmentations, aug_input)\n image, sem_seg_gt = aug_input.image, aug_input.sem_seg\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n dataset_dict['height'] = dataset_dict[\"image\"].shape[1]\n dataset_dict['width'] = dataset_dict[\"image\"].shape[2]\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"sem_seg_file_name\", None)\n return dataset_dict\n\n return dataset_dict" }, { "identifier": "CityscapesSemSegEvaluator", "path": "clouds/evaluation/cityscapes_evaluation.py", "snippet": "class CityscapesSemSegEvaluator(CityscapesEvaluator):\n \"\"\"\n Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.\n\n Note:\n * It does not work in multi-machine distributed training.\n * It contains a synchronization, therefore has to be used on all ranks.\n * Only the main process runs evaluation.\n \"\"\"\n\n def process(self, inputs, outputs):\n from cityscapesscripts.helpers.labels import trainId2label\n for input, output in zip(inputs, outputs):\n file_name = input[\"file_name\"]\n basename = os.path.splitext(os.path.basename(file_name))[0]\n pred_filename = os.path.join(self._temp_dir, basename + \"_pred.png\")\n\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device).numpy()\n pred = 255 * np.ones(output.shape, dtype=np.uint8)\n for train_id, label in trainId2label.items():\n if label.ignoreInEval:\n continue\n pred[output == train_id] = label.id\n Image.fromarray(pred).save(pred_filename)\n\n\n def evaluate(self):\n comm.synchronize()\n if comm.get_rank() > 0:\n return\n # Load the Cityscapes eval script *after* setting the required env var,\n # since the script reads CITYSCAPES_DATASET into global variables at load time.\n import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval\n\n self._logger.info(\"Evaluating results under {} ...\".format(self._temp_dir))\n\n # set some global states in cityscapes evaluation API, before evaluating\n cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)\n cityscapes_eval.args.predictionWalk = None\n cityscapes_eval.args.JSONOutput = False\n cityscapes_eval.args.colorized = False\n\n # These lines are adopted from\n # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa\n gt_dir = PathManager.get_local_path(self._metadata.gt_dir)\n groundTruthImgList = glob.glob(\n os.path.join(gt_dir, \"*\", \"*_gtFine_labelIds.png\")\n )\n assert len(\n groundTruthImgList\n ), \"Cannot find any ground truth images to use for evaluation. Searched for: {}\".format(\n cityscapes_eval.args.groundTruthSearch\n )\n predictionImgList = []\n for gt in groundTruthImgList:\n predictionImgList.append(\n cityscapes_eval.getPrediction(cityscapes_eval.args, gt)\n )\n results = cityscapes_eval.evaluateImgLists(\n predictionImgList, groundTruthImgList, cityscapes_eval.args\n )\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": 100.0 * results[\"averageScoreClasses\"],\n \"IoU.road\": 100.0 * results[\"classScores\"][\"road\"],\n \"IoU.sidewalk\": 100.0 * results[\"classScores\"][\"sidewalk\"],\n \"IoU.building\": 100.0 * results[\"classScores\"][\"building\"],\n \"IoU.wall\": 100.0 * results[\"classScores\"][\"wall\"],\n \"IoU.fence\": 100.0 * results[\"classScores\"][\"fence\"],\n \"IoU.pole\": 100.0 * results[\"classScores\"][\"pole\"],\n \"IoU.traffic light\": 100.0 * results[\"classScores\"][\"traffic light\"],\n \"IoU.traffic sign\": 100.0 * results[\"classScores\"][\"traffic sign\"],\n \"IoU.vegetation\": 100.0 * results[\"classScores\"][\"vegetation\"],\n \"IoU.terrain\": 100.0 * results[\"classScores\"][\"terrain\"],\n \"IoU.sky\": 100.0 * results[\"classScores\"][\"sky\"],\n \"IoU.person\": 100.0 * results[\"classScores\"][\"person\"],\n \"IoU.rider\": 100.0 * results[\"classScores\"][\"rider\"],\n \"IoU.car\": 100.0 * results[\"classScores\"][\"car\"],\n \"IoU.truck\": 100.0 * results[\"classScores\"][\"truck\"],\n \"IoU.bus\": 100.0 * results[\"classScores\"][\"bus\"],\n \"IoU.train\": 100.0 * results[\"classScores\"][\"train\"],\n \"IoU.motorcycle\": 100.0 * results[\"classScores\"][\"motorcycle\"],\n \"IoU.bicycle\": 100.0 * results[\"classScores\"][\"bicycle\"],\n }\n if not self._save_pl:\n self._working_dir.cleanup()\n return ret" }, { "identifier": "ClassicalSemSegEvaluator", "path": "clouds/evaluation/semantic_evaluation.py", "snippet": "class ClassicalSemSegEvaluator(DatasetEvaluator):\n \"\"\"\n Evaluate semantic segmentation metrics.\n \"\"\"\n\n def __init__(\n self,\n dataset_name,\n distributed=True,\n output_dir=None,\n *,\n sem_seg_loading_fn=load_image_into_numpy_array,\n num_classes=None,\n ignore_label=None,\n save_pl=False,\n ):\n \"\"\"\n Args:\n dataset_name (str): name of the dataset to be evaluated.\n distributed (bool): if True, will collect results from all ranks for evaluation.\n Otherwise, will evaluate the results in the current process.\n output_dir (str): an output directory to dump results.\n sem_seg_loading_fn: function to read sem seg file and load into numpy array.\n Default provided, but projects can customize.\n num_classes, ignore_label: deprecated argument\n \"\"\"\n self._logger = logging.getLogger(__name__)\n if num_classes is not None:\n self._logger.warn(\n \"SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata.\"\n )\n if ignore_label is not None:\n self._logger.warn(\n \"SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata.\"\n )\n self._dataset_name = dataset_name\n self._distributed = distributed\n self._output_dir = output_dir\n\n self._cpu_device = torch.device(\"cpu\")\n\n self.input_file_to_gt_file = {\n dataset_record[\"file_name\"]: dataset_record[\"sem_seg_file_name\"]\n for dataset_record in DatasetCatalog.get(dataset_name)\n }\n\n meta = MetadataCatalog.get(dataset_name)\n # Dict that maps contiguous training ids to COCO category ids\n try:\n c2d = meta.stuff_dataset_id_to_contiguous_id\n self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}\n except AttributeError:\n self._contiguous_id_to_dataset_id = None\n self._class_names = meta.stuff_classes\n self.sem_seg_loading_fn = sem_seg_loading_fn\n self._num_classes = len(meta.stuff_classes)\n if num_classes is not None:\n assert (\n self._num_classes == num_classes\n ), f\"{self._num_classes} != {num_classes}\"\n self._ignore_label = (\n ignore_label if ignore_label is not None else meta.ignore_label\n )\n\n # This is because cv2.erode did not work for int datatype. Only works for uint8.\n self._compute_boundary_iou = True\n if not _CV2_IMPORTED:\n self._compute_boundary_iou = False\n self._logger.warn(\n \"\"\"Boundary IoU calculation requires OpenCV. B-IoU metrics are\n not going to be computed because OpenCV is not available to import.\"\"\"\n )\n if self._num_classes >= np.iinfo(np.uint8).max:\n self._compute_boundary_iou = False\n self._logger.warn(\n f\"\"\"SemSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation!\n B-IoU metrics are not going to be computed. Max allowed value (exclusive)\n for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}.\n The number of classes of dataset {self._dataset_name} is {self._num_classes}\"\"\"\n )\n self._save_pl = save_pl\n\n def reset(self):\n self._conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._b_conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._predictions = []\n\n def process(self, inputs, outputs):\n \"\"\"\n Args:\n inputs: the inputs to a model.\n It is a list of dicts. Each dict corresponds to an image and\n contains keys like \"height\", \"width\", \"file_name\".\n outputs: the outputs of a model. It is either list of semantic segmentation predictions\n (Tensor [H, W]) or list of dicts with key \"sem_seg\" that contains semantic\n segmentation prediction in the same format.\n \"\"\"\n for input, output in zip(inputs, outputs):\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device)\n pred = np.array(output, dtype=int)\n gt = input[\"sem_seg\"].numpy()\n\n gt[gt == self._ignore_label] = self._num_classes\n\n self._conf_matrix += np.bincount(\n (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._compute_boundary_iou:\n b_gt = self._mask_to_boundary(gt.astype(np.uint8))\n b_pred = self._mask_to_boundary(pred.astype(np.uint8))\n\n self._b_conf_matrix += np.bincount(\n (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._save_pl:\n self._predictions.extend(\n [dict(file_name=input[\"file_name\"], pred=pred)]\n )\n else:\n self._predictions.extend(\n self.encode_json_sem_seg(pred, input[\"file_name\"])\n )\n\n def evaluate(self):\n \"\"\"\n Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):\n\n * Mean intersection-over-union averaged across classes (mIoU)\n * Frequency Weighted IoU (fwIoU)\n * Mean pixel accuracy averaged across classes (mACC)\n * Pixel Accuracy (pACC)\n \"\"\"\n if self._distributed:\n synchronize()\n conf_matrix_list = all_gather(self._conf_matrix)\n b_conf_matrix_list = all_gather(self._b_conf_matrix)\n self._predictions = all_gather(self._predictions)\n self._predictions = list(itertools.chain(*self._predictions))\n if not is_main_process():\n return\n\n self._conf_matrix = np.zeros_like(self._conf_matrix)\n for conf_matrix in conf_matrix_list:\n self._conf_matrix += conf_matrix\n\n self._b_conf_matrix = np.zeros_like(self._b_conf_matrix)\n for b_conf_matrix in b_conf_matrix_list:\n self._b_conf_matrix += b_conf_matrix\n\n if self._output_dir:\n first_elem = self._predictions[0]\n if \"bdd\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"bdd_eval_pl\")\n elif \"mapillary\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"mapillary_eval_pl\")\n PathManager.mkdirs(self._output_dir)\n if self._save_pl:\n # A function that will iterate over the list of dictionnaries and write the corresponding image\n # in the output directory\n def write_image_from_dict(dict):\n filename = os.path.join(\n self._output_dir,\n dict[\"file_name\"].split(\"/\")[-1].split(\".\")[0] + \"_pred.png\",\n )\n pred = dict[\"pred\"]\n pred = get_rgb_from_semantic_map_maxed(pred)\n # pred = Image.fromarray(pred)\n pred.save(filename)\n\n # We apply the function to the list of dictionnaries\n list(map(write_image_from_dict, self._predictions))\n\n else:\n file_path = os.path.join(self._output_dir, \"sem_seg_predictions.json\")\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(self._predictions))\n\n acc = np.full(self._num_classes, np.nan, dtype=float)\n iou = np.full(self._num_classes, np.nan, dtype=float)\n tp = self._conf_matrix.diagonal()[:-1].astype(float)\n pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(float)\n class_weights = pos_gt / np.sum(pos_gt)\n pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(float)\n acc_valid = pos_gt > 0\n acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]\n union = pos_gt + pos_pred - tp\n iou_valid = np.logical_and(acc_valid, union > 0)\n iou[iou_valid] = tp[iou_valid] / union[iou_valid]\n macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)\n miou = np.sum(iou[iou_valid]) / np.sum(iou_valid)\n fiou = np.sum(iou[iou_valid] * class_weights[iou_valid])\n pacc = np.sum(tp) / np.sum(pos_gt)\n\n if self._compute_boundary_iou:\n b_iou = np.full(self._num_classes, np.nan, dtype=float)\n b_tp = self._b_conf_matrix.diagonal()[:-1].astype(float)\n b_pos_gt = np.sum(self._b_conf_matrix[:-1, :-1], axis=0).astype(float)\n b_pos_pred = np.sum(self._b_conf_matrix[:-1, :-1], axis=1).astype(float)\n b_union = b_pos_gt + b_pos_pred - b_tp\n b_iou_valid = b_union > 0\n b_iou[b_iou_valid] = b_tp[b_iou_valid] / b_union[b_iou_valid]\n\n res = {}\n res[\"mIoU\"] = 100 * miou\n res[\"fwIoU\"] = 100 * fiou\n for i, name in enumerate(self._class_names):\n res[f\"IoU-{name}\"] = 100 * iou[i]\n if self._compute_boundary_iou:\n res[f\"BoundaryIoU-{name}\"] = 100 * b_iou[i]\n res[f\"min(IoU, B-Iou)-{name}\"] = 100 * min(iou[i], b_iou[i])\n res[\"mACC\"] = 100 * macc\n res[\"pACC\"] = 100 * pacc\n for i, name in enumerate(self._class_names):\n res[f\"ACC-{name}\"] = 100 * acc[i]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"sem_seg_evaluation.pth\")\n with PathManager.open(file_path, \"wb\") as f:\n torch.save(res, f)\n results = OrderedDict({\"sem_seg\": res})\n self._logger.info(results)\n\n def get_miou_value_from_dict(dict, subkey):\n for key, value in dict.items():\n if subkey in key and \"IoU\" in key:\n if np.isnan(value):\n return 0\n else:\n return value\n\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": results[\"sem_seg\"][\"mIoU\"],\n \"IoU.road\": get_miou_value_from_dict(results[\"sem_seg\"], \"road\"),\n \"IoU.sidewalk\": get_miou_value_from_dict(results[\"sem_seg\"], \"sidewalk\"),\n \"IoU.building\": get_miou_value_from_dict(results[\"sem_seg\"], \"building\"),\n \"IoU.wall\": get_miou_value_from_dict(results[\"sem_seg\"], \"wall\"),\n \"IoU.fence\": get_miou_value_from_dict(results[\"sem_seg\"], \"fence\"),\n \"IoU.pole\": get_miou_value_from_dict(results[\"sem_seg\"], \"pole\"),\n \"IoU.traffic light\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic light\"\n ),\n \"IoU.traffic sign\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic sign\"\n ),\n \"IoU.vegetation\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"vegetation\"\n ),\n \"IoU.terrain\": get_miou_value_from_dict(results[\"sem_seg\"], \"terrain\"),\n \"IoU.sky\": get_miou_value_from_dict(results[\"sem_seg\"], \"sky\"),\n \"IoU.person\": get_miou_value_from_dict(results[\"sem_seg\"], \"person\"),\n \"IoU.rider\": get_miou_value_from_dict(results[\"sem_seg\"], \"rider\"),\n \"IoU.car\": get_miou_value_from_dict(results[\"sem_seg\"], \"car\"),\n \"IoU.truck\": get_miou_value_from_dict(results[\"sem_seg\"], \"truck\"),\n \"IoU.bus\": get_miou_value_from_dict(results[\"sem_seg\"], \"bus\"),\n \"IoU.train\": get_miou_value_from_dict(results[\"sem_seg\"], \"train\"),\n \"IoU.motorcycle\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"motorcycle\"\n ),\n \"IoU.bicycle\": get_miou_value_from_dict(results[\"sem_seg\"], \"bicycle\"),\n }\n return ret\n\n def encode_json_sem_seg(self, sem_seg, input_file_name):\n \"\"\"\n Convert semantic segmentation to COCO stuff format with segments encoded as RLEs.\n See http://cocodataset.org/#format-results\n \"\"\"\n json_list = []\n for label in np.unique(sem_seg):\n if self._contiguous_id_to_dataset_id is not None:\n assert (\n label in self._contiguous_id_to_dataset_id\n ), \"Label {} is not in the metadata info for {}\".format(\n label, self._dataset_name\n )\n dataset_id = self._contiguous_id_to_dataset_id[label]\n else:\n dataset_id = int(label)\n mask = (sem_seg == label).astype(np.uint8)\n mask_rle = mask_util.encode(np.array(mask[:, :, None], order=\"F\"))[0]\n mask_rle[\"counts\"] = mask_rle[\"counts\"].decode(\"utf-8\")\n json_list.append(\n {\n \"file_name\": input_file_name,\n \"category_id\": dataset_id,\n \"segmentation\": mask_rle,\n }\n )\n return json_list\n\n def _mask_to_boundary(self, mask: np.ndarray, dilation_ratio=0.02):\n assert mask.ndim == 2, \"mask_to_boundary expects a 2-dimensional image\"\n h, w = mask.shape\n diag_len = np.sqrt(h ** 2 + w ** 2)\n dilation = max(1, int(round(dilation_ratio * diag_len)))\n kernel = np.ones((3, 3), dtype=np.uint8)\n\n padded_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)\n eroded_mask_with_padding = cv2.erode(padded_mask, kernel, iterations=dilation)\n eroded_mask = eroded_mask_with_padding[1:-1, 1:-1]\n boundary = mask - eroded_mask\n return boundary" }, { "identifier": "PersoEvalHook", "path": "clouds/engine/hooks.py", "snippet": "class PersoEvalHook(HookBase):\n \"\"\"\n Run an evaluation function periodically, and at the end of training.\n\n It is executed every ``eval_period`` iterations and after the last iteration.\n \"\"\"\n\n def __init__(self, eval_period, eval_function, eval_after_train=True):\n \"\"\"\n Args:\n eval_period (int): the period to run `eval_function`. Set to 0 to\n not evaluate periodically (but still evaluate after the last iteration\n if `eval_after_train` is True).\n eval_function (callable): a function which takes no arguments, and\n returns a nested dict of evaluation metrics.\n eval_after_train (bool): whether to evaluate after the last iteration\n\n Note:\n This hook must be enabled in all or none workers.\n If you would like only certain workers to perform evaluation,\n give other workers a no-op function (`eval_function=lambda: None`).\n \"\"\"\n self._period = eval_period\n self._func = eval_function\n self._eval_after_train = eval_after_train\n\n def _do_eval(self):\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)\n\n # Evaluation may take different time among workers.\n # A barrier make them start the next iteration together.\n comm.synchronize()\n\n def before_train(self):\n \"\"\"\n Called before the first iteration.\n \"\"\"\n if \"debug\" in self.trainer.cfg.OUTPUT_DIR:\n pass\n else:\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(\n **flattened_results, smoothing_hint=False\n )\n\n def after_step(self):\n next_iter = self.trainer.iter + 1\n if self._period > 0 and next_iter % self._period == 0:\n # do the last eval in after_train\n if next_iter != self.trainer.max_iter:\n self._do_eval()\n\n def after_train(self):\n # This condition is to prevent the eval from running after a failed training\n if self._eval_after_train and self.trainer.iter + 1 >= self.trainer.max_iter:\n self._do_eval()\n # func is likely a closure that holds reference to the trainer\n # therefore we clean it to avoid circular reference in the end\n del self._func" }, { "identifier": "WandbWriter", "path": "clouds/utils/events.py", "snippet": "class WandbWriter(EventWriter):\n \"\"\"\n Write all scalars to a tensorboard file.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Args:\n log_dir (str): the directory to save the output events\n kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`\n \"\"\"\n self._last_write = -1\n self._group_rules = [\n (IsIn(\"/\"), BaseRule()),\n (IsIn(\"loss\"), Prefix(\"train\")),\n # (IsIn(\"sem_seg\"), Prefix(\"val\")),\n (\n IsInList([\"lr\", \"time\", \"eta_seconds\", \"rank_data_time\", \"data_time\"]),\n Prefix(\"stats\"),\n ),\n ]\n\n def write(self):\n storage = get_event_storage()\n\n def _group_name(scalar_name):\n for rule, op in self._group_rules:\n if rule(scalar_name):\n return op(scalar_name)\n return scalar_name\n\n stats = {\n _group_name(name): scalars[0]\n for name, scalars in storage.latest().items()\n if scalars[1] > self._last_write\n }\n if len(stats) > 0:\n self._last_write = max([v[1] for k, v in storage.latest().items()])\n\n # storage.put_{image,histogram} is only meant to be used by\n # tensorboard writer. So we access its internal fields directly from here.\n if len(storage._vis_data) >= 1:\n stats[\"image\"] = [\n wandb.Image(img, caption=img_name)\n for img_name, img, step_num in storage._vis_data\n ]\n # Storage stores all image data and rely on this writer to clear them.\n # As a result it assumes only one writer will use its image data.\n # An alternative design is to let storage store limited recent\n # data (e.g. only the most recent image) that all writers can access.\n # In that case a writer may not see all image data if its period is long.\n storage.clear_images()\n\n if len(storage._histograms) >= 1:\n\n def create_bar(tag, bucket_limits, bucket_counts, **kwargs):\n data = [\n [label, val] for (label, val) in zip(bucket_limits, bucket_counts)\n ]\n table = wandb.Table(data=data, columns=[\"label\", \"value\"])\n return wandb.plot.bar(table, \"label\", \"value\", title=tag)\n\n stats[\"hist\"] = [create_bar(**params) for params in storage._histograms]\n\n storage.clear_histograms()\n\n if len(stats) == 0:\n return\n wandb.log(stats, step=storage.iter)\n\n def close(self):\n wandb.finish()" }, { "identifier": "setup_wandb", "path": "clouds/utils/events.py", "snippet": "def setup_wandb(cfg, args):\n if comm.is_main_process():\n init_args = {\n k.lower(): v\n for k, v in cfg.WANDB.items()\n if isinstance(k, str) and k not in [\"config\", \"name\"]\n }\n if \"config_exclude_keys\" in init_args:\n init_args[\"config\"] = cfg\n init_args[\"config\"][\"cfg_file\"] = args.config_file\n else:\n init_args[\"config\"] = {\n \"output_dir\": cfg.OUTPUT_DIR,\n \"train\": extract_dataset_from_string(cfg.DATASETS.TRAIN),\n \"test\": extract_dataset_from_string(cfg.DATASETS.TEST),\n \"iter\": cfg.SOLVER.MAX_ITER,\n \"lr\": cfg.SOLVER.BASE_LR,\n \"batch_size\": cfg.SOLVER.IMS_PER_BATCH,\n \"cfg_file\": args.config_file,\n }\n\n init_args[\"group\"] = get_base_name(cfg)\n if cfg.WANDB.NAME is not None:\n init_args[\"name\"] = cfg.WANDB.NAME\n else:\n init_args[\"name\"] = get_full_name_xp(init_args[\"group\"], cfg)\n if \"debug\" in cfg.OUTPUT_DIR:\n init_args[\"project\"] = \"debug\"\n wandb.init(**init_args)" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_train_loader, build_detection_test_loader, ) from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.modeling import build_model from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, inference_on_dataset, print_csv_format, DatasetEvaluator, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from fvcore.nn.precise_bn import get_bn_modules from clouds import ( CityscapesSemSegEvaluator, ClassicalSemSegEvaluator, MapperTrain, MapperTest, add_maskformer2_config, add_clouds_config, add_wandb_config, add_prerocessing_training_set_config, PersoEvalHook, add_repeat_factors, ) from clouds.utils import setup_wandb, WandbWriter import warnings import copy import itertools import logging import os import ast import torch import detectron2.utils.comm as comm
12,975
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper mapper = MapperTrain(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_test_loader(cls, cfg, dataset_name):
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper mapper = MapperTrain(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_test_loader(cls, cfg, dataset_name):
mapper = MapperTest(cfg, False)
6
2023-12-15 15:40:58+00:00
16k
modelscope/scepter
scepter/modules/model/network/ldm/ldm.py
[ { "identifier": "GaussianDiffusion", "path": "scepter/modules/model/network/diffusion/diffusion.py", "snippet": "class GaussianDiffusion(object):\n def __init__(self, sigmas, prediction_type='eps'):\n assert prediction_type in {'x0', 'eps', 'v'}\n self.sigmas = sigmas # noise coefficients\n self.alphas = torch.sqrt(1 - sigmas**2) # signal coefficients\n self.num_timesteps = len(sigmas)\n self.prediction_type = prediction_type\n\n def diffuse(self, x0, t, noise=None):\n \"\"\"\n Add Gaussian noise to signal x0 according to:\n q(x_t | x_0) = N(x_t | alpha_t x_0, sigma_t^2 I).\n \"\"\"\n noise = torch.randn_like(x0) if noise is None else noise\n xt = _i(self.alphas, t, x0) * x0 + _i(self.sigmas, t, x0) * noise\n return xt\n\n def denoise(self,\n xt,\n t,\n s,\n model,\n model_kwargs={},\n guide_scale=None,\n guide_rescale=None,\n clamp=None,\n percentile=None,\n cat_uc=False):\n \"\"\"\n Apply one step of denoising from the posterior distribution q(x_s | x_t, x0).\n Since x0 is not available, estimate the denoising results using the learned\n distribution p(x_s | x_t, \\hat{x}_0 == f(x_t)). # noqa\n \"\"\"\n s = t - 1 if s is None else s\n\n # hyperparams\n sigmas = _i(self.sigmas, t, xt)\n alphas = _i(self.alphas, t, xt)\n alphas_s = _i(self.alphas, s.clamp(0), xt)\n alphas_s[s < 0] = 1.\n sigmas_s = torch.sqrt(1 - alphas_s**2)\n\n # precompute variables\n betas = 1 - (alphas / alphas_s)**2\n coef1 = betas * alphas_s / sigmas**2\n coef2 = (alphas * sigmas_s**2) / (alphas_s * sigmas**2)\n var = betas * (sigmas_s / sigmas)**2\n log_var = torch.log(var).clamp_(-20, 20)\n\n # prediction\n if guide_scale is None:\n assert isinstance(model_kwargs, dict)\n out = model(xt, t=t, **model_kwargs)\n else:\n # classifier-free guidance (arXiv:2207.12598)\n # model_kwargs[0]: conditional kwargs\n # model_kwargs[1]: non-conditional kwargs\n assert isinstance(model_kwargs, list) and len(model_kwargs) == 2\n\n if guide_scale == 1.:\n out = model(xt, t=t, **model_kwargs[0])\n else:\n if cat_uc:\n\n def parse_model_kwargs(prev_value, value):\n if isinstance(value, torch.Tensor):\n prev_value = torch.cat([prev_value, value], dim=0)\n elif isinstance(value, dict):\n for k, v in value.items():\n prev_value[k] = parse_model_kwargs(\n prev_value[k], v)\n elif isinstance(value, list):\n for idx, v in enumerate(value):\n prev_value[idx] = parse_model_kwargs(\n prev_value[idx], v)\n return prev_value\n\n all_model_kwargs = copy.deepcopy(model_kwargs[0])\n for model_kwarg in model_kwargs[1:]:\n for key, value in model_kwarg.items():\n all_model_kwargs[key] = parse_model_kwargs(\n all_model_kwargs[key], value)\n all_out = model(xt.repeat(2, 1, 1, 1),\n t=t.repeat(2),\n **all_model_kwargs)\n y_out, u_out = all_out.chunk(2)\n else:\n y_out = model(xt, t=t, **model_kwargs[0])\n u_out = model(xt, t=t, **model_kwargs[1])\n out = u_out + guide_scale * (y_out - u_out)\n\n # rescale the output according to arXiv:2305.08891\n if guide_rescale is not None:\n assert guide_rescale >= 0 and guide_rescale <= 1\n ratio = (y_out.flatten(1).std(dim=1) /\n (out.flatten(1).std(dim=1) +\n 1e-12)).view((-1, ) + (1, ) * (y_out.ndim - 1))\n out *= guide_rescale * ratio + (1 - guide_rescale) * 1.0\n # compute x0\n if self.prediction_type == 'x0':\n x0 = out\n elif self.prediction_type == 'eps':\n x0 = (xt - sigmas * out) / alphas\n elif self.prediction_type == 'v':\n x0 = alphas * xt - sigmas * out\n else:\n raise NotImplementedError(\n f'prediction_type {self.prediction_type} not implemented')\n\n # restrict the range of x0\n if percentile is not None:\n # NOTE: percentile should only be used when data is within range [-1, 1]\n assert percentile > 0 and percentile <= 1\n s = torch.quantile(x0.flatten(1).abs(), percentile, dim=1)\n s = s.clamp_(1.0).view((-1, ) + (1, ) * (xt.ndim - 1))\n x0 = torch.min(s, torch.max(-s, x0)) / s\n elif clamp is not None:\n x0 = x0.clamp(-clamp, clamp)\n\n # recompute eps using the restricted x0\n eps = (xt - alphas * x0) / sigmas\n\n # compute mu (mean of posterior distribution) using the restricted x0\n mu = coef1 * x0 + coef2 * xt\n return mu, var, log_var, x0, eps\n\n def loss(self,\n x0,\n t,\n model,\n model_kwargs={},\n reduction='mean',\n noise=None,\n **kwargs):\n # hyperparams\n sigmas = _i(self.sigmas, t, x0)\n alphas = _i(self.alphas, t, x0)\n\n # diffuse and denoise\n if noise is None:\n noise = torch.randn_like(x0)\n xt = self.diffuse(x0, t, noise)\n out = model(xt, t=t, **model_kwargs, **kwargs)\n\n # mse loss\n target = {\n 'eps': noise,\n 'x0': x0,\n 'v': alphas * noise - sigmas * x0\n }[self.prediction_type]\n loss = (out - target).pow(2)\n if reduction == 'mean':\n loss = loss.flatten(1).mean(dim=1)\n return loss\n\n @torch.no_grad()\n def sample(self,\n noise,\n model,\n x=None,\n denoising_strength=1.0,\n refine_stage=False,\n refine_strength=0.0,\n model_kwargs={},\n condition_fn=None,\n guide_scale=None,\n guide_rescale=None,\n clamp=None,\n percentile=None,\n solver='euler_a',\n steps=20,\n t_max=None,\n t_min=None,\n discretization=None,\n discard_penultimate_step=None,\n return_intermediate=None,\n show_progress=False,\n seed=-1,\n intermediate_callback=None,\n cat_uc=False,\n **kwargs):\n # sanity check\n assert isinstance(steps, (int, torch.LongTensor))\n assert t_max is None or (t_max > 0 and t_max <= self.num_timesteps - 1)\n assert t_min is None or (t_min >= 0 and t_min < self.num_timesteps - 1)\n assert discretization in (None, 'leading', 'linspace', 'trailing')\n assert discard_penultimate_step in (None, True, False)\n assert return_intermediate in (None, 'x0', 'xt')\n\n # function of diffusion solver\n solver_fn = {\n 'ddim': sample_ddim,\n 'euler_ancestral': sample_euler_ancestral,\n 'euler': sample_euler,\n 'heun': sample_heun,\n 'dpm2': sample_dpm_2,\n 'dpm2_ancestral': sample_dpm_2_ancestral,\n 'dpmpp_2s_ancestral': sample_dpmpp_2s_ancestral,\n 'dpmpp_2m': sample_dpmpp_2m,\n 'dpmpp_sde': sample_dpmpp_sde,\n 'dpmpp_2m_sde': sample_dpmpp_2m_sde,\n 'dpm2_karras': sample_dpm_2,\n 'dpm2_ancestral_karras': sample_dpm_2_ancestral,\n 'dpmpp_2s_ancestral_karras': sample_dpmpp_2s_ancestral,\n 'dpmpp_2m_karras': sample_dpmpp_2m,\n 'dpmpp_sde_karras': sample_dpmpp_sde,\n 'dpmpp_2m_sde_karras': sample_dpmpp_2m_sde\n }[solver]\n\n # options\n schedule = 'karras' if 'karras' in solver else None\n discretization = discretization or 'linspace'\n seed = seed if seed >= 0 else random.randint(0, 2**31)\n if isinstance(steps, torch.LongTensor):\n discard_penultimate_step = False\n if discard_penultimate_step is None:\n discard_penultimate_step = True if solver in (\n 'dpm2', 'dpm2_ancestral', 'dpmpp_2m_sde', 'dpm2_karras',\n 'dpm2_ancestral_karras', 'dpmpp_2m_sde_karras') else False\n\n # function for denoising xt to get x0\n intermediates = []\n\n def model_fn(xt, sigma):\n # denoising\n t = self._sigma_to_t(sigma).repeat(len(xt)).round().long()\n x0 = self.denoise(xt,\n t,\n None,\n model,\n model_kwargs,\n guide_scale,\n guide_rescale,\n clamp,\n percentile,\n cat_uc=cat_uc)[-2]\n\n # collect intermediate outputs\n if return_intermediate == 'xt':\n intermediates.append(xt)\n elif return_intermediate == 'x0':\n intermediates.append(x0)\n if intermediate_callback is not None:\n intermediate_callback(intermediates[-1])\n return x0\n\n # get timesteps\n if isinstance(steps, int):\n steps += 1 if discard_penultimate_step else 0\n t_max = self.num_timesteps - 1 if t_max is None else t_max\n t_min = 0 if t_min is None else t_min\n\n # discretize timesteps\n if discretization == 'leading':\n steps = torch.arange(t_min, t_max + 1,\n (t_max - t_min + 1) / steps).flip(0)\n elif discretization == 'linspace':\n steps = torch.linspace(t_max, t_min, steps)\n elif discretization == 'trailing':\n steps = torch.arange(t_max, t_min - 1,\n -((t_max - t_min + 1) / steps))\n else:\n raise NotImplementedError(\n f'{discretization} discretization not implemented')\n steps = steps.clamp_(t_min, t_max)\n steps = torch.as_tensor(steps,\n dtype=torch.float32,\n device=noise.device)\n\n # get sigmas\n sigmas = self._t_to_sigma(steps)\n sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])\n t_enc = int(min(denoising_strength, 0.999) * len(steps))\n sigmas = sigmas[len(steps) - t_enc - 1:]\n if refine_strength > 0:\n t_refine = int(min(refine_strength, 0.999) * len(steps))\n if refine_stage:\n sigmas = sigmas[-t_refine:]\n else:\n sigmas = sigmas[:-t_refine + 1]\n # print(sigmas)\n if x is not None:\n noise = (x + noise * sigmas[0]) / torch.sqrt(1.0 + sigmas[0]**2.0)\n\n if schedule == 'karras':\n if sigmas[0] == float('inf'):\n sigmas = karras_schedule(\n n=len(steps) - 1,\n sigma_min=sigmas[sigmas > 0].min().item(),\n sigma_max=sigmas[sigmas < float('inf')].max().item(),\n rho=7.).to(sigmas)\n sigmas = torch.cat([\n sigmas.new_tensor([float('inf')]), sigmas,\n sigmas.new_zeros([1])\n ])\n else:\n sigmas = karras_schedule(\n n=len(steps),\n sigma_min=sigmas[sigmas > 0].min().item(),\n sigma_max=sigmas.max().item(),\n rho=7.).to(sigmas)\n sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])\n if discard_penultimate_step:\n sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])\n kwargs['seed'] = seed\n # sampling\n x0 = solver_fn(noise,\n model_fn,\n sigmas,\n show_progress=show_progress,\n **kwargs)\n return (x0, intermediates) if return_intermediate is not None else x0\n\n def _sigma_to_t(self, sigma):\n if sigma == float('inf'):\n t = torch.full_like(sigma, len(self.sigmas) - 1)\n else:\n log_sigmas = torch.sqrt(self.sigmas**2 /\n (1 - self.sigmas**2)).log().to(sigma)\n log_sigma = sigma.log()\n dists = log_sigma - log_sigmas[:, None]\n low_idx = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(\n max=log_sigmas.shape[0] - 2)\n high_idx = low_idx + 1\n low, high = log_sigmas[low_idx], log_sigmas[high_idx]\n w = (low - log_sigma) / (low - high)\n w = w.clamp(0, 1)\n t = (1 - w) * low_idx + w * high_idx\n t = t.view(sigma.shape)\n if t.ndim == 0:\n t = t.unsqueeze(0)\n return t\n\n def _t_to_sigma(self, t):\n t = t.float()\n low_idx, high_idx, w = t.floor().long(), t.ceil().long(), t.frac()\n log_sigmas = torch.sqrt(self.sigmas**2 /\n (1 - self.sigmas**2)).log().to(t)\n log_sigma = (1 - w) * log_sigmas[low_idx] + w * log_sigmas[high_idx]\n log_sigma[torch.isnan(log_sigma)\n | torch.isinf(log_sigma)] = float('inf')\n return log_sigma.exp()\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, steps):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n\n t_max = None\n t_min = None\n\n # discretization method\n discretization = 'trailing' if self.prediction_type == 'v' else 'leading'\n\n # timesteps\n if isinstance(steps, int):\n t_max = self.num_timesteps - 1 if t_max is None else t_max\n t_min = 0 if t_min is None else t_min\n steps = discretize_timesteps(t_max, t_min, steps, discretization)\n steps = torch.as_tensor(steps).round().long().flip(0).to(x0.device)\n # steps = torch.as_tensor(steps).round().long().to(x0.device)\n\n # self.alphas_bar = torch.cumprod(1 - self.sigmas ** 2, dim=0)\n # print('sigma: ', self.sigmas, len(self.sigmas))\n # print('alpha_bar: ', self.alphas_bar, len(self.alphas_bar))\n # print('steps: ', steps, len(steps))\n # sqrt_alphas_cumprod = torch.sqrt(self.alphas_bar).to(x0.device)[steps]\n # sqrt_one_minus_alphas_cumprod = torch.sqrt(1 - self.alphas_bar).to(x0.device)[steps]\n\n sqrt_alphas_cumprod = self.alphas.to(x0.device)[steps]\n sqrt_one_minus_alphas_cumprod = self.sigmas.to(x0.device)[steps]\n # print('sigma: ', self.sigmas, len(self.sigmas))\n # print('alpha: ', self.alphas, len(self.alphas))\n # print('steps: ', steps, len(steps))\n\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) *\n noise)\n\n @torch.no_grad()\n def sample_img2img(self,\n x,\n noise,\n model,\n denoising_strength=1,\n model_kwargs={},\n condition_fn=None,\n guide_scale=None,\n guide_rescale=None,\n clamp=None,\n percentile=None,\n solver='euler_a',\n steps=20,\n t_max=None,\n t_min=None,\n discretization=None,\n discard_penultimate_step=None,\n return_intermediate=None,\n show_progress=False,\n seed=-1,\n **kwargs):\n # sanity check\n assert isinstance(steps, (int, torch.LongTensor))\n assert t_max is None or (t_max > 0 and t_max <= self.num_timesteps - 1)\n assert t_min is None or (t_min >= 0 and t_min < self.num_timesteps - 1)\n assert discretization in (None, 'leading', 'linspace', 'trailing')\n assert discard_penultimate_step in (None, True, False)\n assert return_intermediate in (None, 'x0', 'xt')\n # function of diffusion solver\n solver_fn = {\n 'euler_ancestral': sample_img2img_euler_ancestral,\n 'euler': sample_img2img_euler,\n }[solver]\n # options\n schedule = 'karras' if 'karras' in solver else None\n discretization = discretization or 'linspace'\n seed = seed if seed >= 0 else random.randint(0, 2**31)\n if isinstance(steps, torch.LongTensor):\n discard_penultimate_step = False\n if discard_penultimate_step is None:\n discard_penultimate_step = True if solver in (\n 'dpm2', 'dpm2_ancestral', 'dpmpp_2m_sde', 'dpm2_karras',\n 'dpm2_ancestral_karras', 'dpmpp_2m_sde_karras') else False\n\n # function for denoising xt to get x0\n intermediates = []\n\n def get_scalings(sigma):\n c_out = -sigma\n c_in = 1 / (sigma**2 + 1.**2)**0.5\n return c_out, c_in\n\n def model_fn(xt, sigma):\n # denoising\n c_out, c_in = get_scalings(sigma)\n t = self._sigma_to_t(sigma).repeat(len(xt)).round().long()\n\n x0 = self.denoise(xt * c_in, t, None, model, model_kwargs,\n guide_scale, guide_rescale, clamp,\n percentile)[-2]\n # collect intermediate outputs\n if return_intermediate == 'xt':\n intermediates.append(xt)\n elif return_intermediate == 'x0':\n intermediates.append(x0)\n return xt + x0 * c_out\n\n # get timesteps\n if isinstance(steps, int):\n steps += 1 if discard_penultimate_step else 0\n t_max = self.num_timesteps - 1 if t_max is None else t_max\n t_min = 0 if t_min is None else t_min\n # discretize timesteps\n if discretization == 'leading':\n steps = torch.arange(t_min, t_max + 1,\n (t_max - t_min + 1) / steps).flip(0)\n elif discretization == 'linspace':\n steps = torch.linspace(t_max, t_min, steps)\n elif discretization == 'trailing':\n steps = torch.arange(t_max, t_min - 1,\n -((t_max - t_min + 1) / steps))\n else:\n raise NotImplementedError(\n f'{discretization} discretization not implemented')\n steps = steps.clamp_(t_min, t_max)\n steps = torch.as_tensor(steps, dtype=torch.float32, device=x.device)\n # get sigmas\n sigmas = self._t_to_sigma(steps)\n sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])\n t_enc = int(min(denoising_strength, 0.999) * len(steps))\n sigmas = sigmas[len(steps) - t_enc - 1:]\n noise = x + noise * sigmas[0]\n\n if schedule == 'karras':\n if sigmas[0] == float('inf'):\n sigmas = karras_schedule(\n n=len(steps) - 1,\n sigma_min=sigmas[sigmas > 0].min().item(),\n sigma_max=sigmas[sigmas < float('inf')].max().item(),\n rho=7.).to(sigmas)\n sigmas = torch.cat([\n sigmas.new_tensor([float('inf')]), sigmas,\n sigmas.new_zeros([1])\n ])\n else:\n sigmas = karras_schedule(\n n=len(steps),\n sigma_min=sigmas[sigmas > 0].min().item(),\n sigma_max=sigmas.max().item(),\n rho=7.).to(sigmas)\n sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])\n if discard_penultimate_step:\n sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])\n\n # sampling\n x0 = solver_fn(noise,\n model_fn,\n sigmas,\n seed=seed,\n show_progress=show_progress,\n **kwargs)\n return (x0, intermediates) if return_intermediate is not None else x0" }, { "identifier": "noise_schedule", "path": "scepter/modules/model/network/diffusion/schedules.py", "snippet": "def noise_schedule(schedule='logsnr_cosine_interp',\n n=1000,\n zero_terminal_snr=False,\n **kwargs):\n # compute sigmas\n sigmas = {\n 'linear': linear_schedule,\n 'scaled_linear': scaled_linear_schedule,\n 'quadratic': quadratic_schedule,\n 'cosine': cosine_schedule,\n 'sigmoid': sigmoid_schedule,\n 'karras': karras_schedule,\n 'exponential': exponential_schedule,\n 'polyexponential': polyexponential_schedule,\n 'vp': vp_schedule,\n 'logsnr_cosine': logsnr_cosine_schedule,\n 'logsnr_cosine_shifted': logsnr_cosine_shifted_schedule,\n 'logsnr_cosine_interp': logsnr_cosine_interp_schedule\n }[schedule](n, **kwargs)\n\n # post-processing\n if zero_terminal_snr and sigmas.max() != 1.0:\n scale = (1.0 - sigmas.min()) / (sigmas.max() - sigmas.min())\n sigmas = sigmas.min() + scale * (sigmas - sigmas.min())\n return sigmas" }, { "identifier": "TrainModule", "path": "scepter/modules/model/network/train_module.py", "snippet": "class TrainModule(BaseModel, metaclass=ABCMeta):\n para_dict = {}\n\n def __init__(self, cfg, logger=None):\n super(TrainModule, self).__init__(cfg, logger=logger)\n self.logger = logger\n self.cfg = cfg\n\n @abstractmethod\n def forward(self, *inputs, **kwargs):\n pass\n\n @abstractmethod\n def forward_train(self, *inputs, **kwargs):\n pass\n\n @abstractmethod\n @torch.no_grad()\n def forward_test(self, *inputs, **kwargs):\n pass\n\n @staticmethod\n def get_config_template():\n '''\n { \"ENV\" :\n { \"description\" : \"\",\n \"A\" : {\n \"value\": 1.0,\n \"description\": \"\"\n }\n }\n }\n :return:\n '''\n return dict_to_yaml('networkname',\n __class__.__name__,\n TrainModule.para_dict,\n set_name=True)" }, { "identifier": "BACKBONES", "path": "scepter/modules/model/registry.py", "snippet": "BACKBONES = Registry('BACKBONES', build_func=build_model)" }, { "identifier": "EMBEDDERS", "path": "scepter/modules/model/registry.py", "snippet": "EMBEDDERS = Registry('EMBEDDERS', build_func=build_model)" }, { "identifier": "LOSSES", "path": "scepter/modules/model/registry.py", "snippet": "LOSSES = Registry('LOSSES', build_func=build_model)" }, { "identifier": "MODELS", "path": "scepter/modules/model/registry.py", "snippet": "MODELS = Registry('MODELS', build_func=build_model)" }, { "identifier": "TOKENIZERS", "path": "scepter/modules/model/registry.py", "snippet": "TOKENIZERS = Registry('TOKENIZER', build_func=build_model)" }, { "identifier": "count_params", "path": "scepter/modules/model/utils/basic_utils.py", "snippet": "def count_params(model):\n total_params = sum(p.numel() for p in model.parameters())\n return transfer_size(total_params)" }, { "identifier": "default", "path": "scepter/modules/model/utils/basic_utils.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "dict_to_yaml", "path": "scepter/modules/utils/config.py", "snippet": "def dict_to_yaml(module_name, name, json_config, set_name=False):\n '''\n { \"ENV\" :\n { \"description\" : \"\",\n \"A\" : {\n \"value\": 1.0,\n \"description\": \"\"\n }\n }\n }\n convert std dict to yaml\n :param module_name:\n :param json_config:\n :return:\n '''\n def convert_yaml_style(level=1,\n name='ENV',\n description='ENV PARA',\n default='',\n type_name='',\n is_sys=False):\n new_line = ''\n new_line += '{}# {} DESCRIPTION: {} TYPE: {} default: {}\\n'.format(\n '\\t' * (level - 1), name.upper(), description, type_name,\n f'\\'{default}\\'' if isinstance(default, str) else default)\n if is_sys:\n if name == '-':\n new_line += '{}{}\\n'.format('\\t' * (level - 1), name.upper())\n else:\n new_line += '{}{}:\\n'.format('\\t' * (level - 1), name.upper())\n else:\n # if isinstance(default, str):\n # default = f'\\'{default}\\''\n if default is None:\n new_line += '{}# {}: {}\\n'.format('\\t' * (level - 1),\n name.upper(), default)\n else:\n new_line += '{}{}: {}\\n'.format('\\t' * (level - 1),\n name.upper(), default)\n return new_line\n\n def parse_dict(json_config,\n level_num,\n parent_key,\n set_name=False,\n name='',\n parent_type='dict'):\n yaml_str = ''\n # print(level_num, json_config)\n if isinstance(json_config, dict):\n if 'value' in json_config:\n value = json_config['value']\n if isinstance(value, dict):\n assert len(value) < 1\n value = None\n description = json_config.get('description', '')\n yaml_str += convert_yaml_style(level=level_num - 1,\n name=parent_key,\n description=description,\n default=value,\n type_name=type(value).__name__)\n return True, yaml_str\n else:\n if len(json_config) < 1:\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default='',\n type_name='')\n level_num += 1\n for k, v in json_config.items():\n if k == 'description':\n continue\n if isinstance(v, dict):\n is_final, new_yaml_str = parse_dict(v,\n level_num,\n k,\n parent_type='dict')\n if not is_final and parent_type == 'dict':\n description = v.get('description', '')\n yaml_str += convert_yaml_style(\n level=level_num - 1,\n name=k,\n description=description,\n default='',\n type_name='',\n is_sys=True)\n if not is_final and parent_type == 'list':\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=k,\n type_name='')\n yaml_str += new_yaml_str\n elif isinstance(v, list):\n base_yaml_str = convert_yaml_style(level=level_num - 1,\n name=k,\n description='',\n default='',\n type_name='',\n is_sys=True)\n yaml_str += base_yaml_str\n for tup in v:\n is_final, new_yaml_str = parse_dict(\n tup, level_num, '-', parent_type='list')\n if not is_final:\n yaml_str += convert_yaml_style(level=level_num,\n name='-',\n description='',\n default='',\n type_name='',\n is_sys=True)\n yaml_str += new_yaml_str\n else:\n raise KeyError(\n f'json config {json_config} must be a dict of list'\n )\n\n elif isinstance(json_config, list):\n level_num += 1\n for tup in json_config:\n is_final, new_yaml_str = parse_dict(tup, level_num, '-')\n if not is_final:\n\n yaml_str += convert_yaml_style(level=level_num - 1,\n name='-',\n description='',\n default='',\n type_name='',\n is_sys=True)\n if set_name:\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=name,\n type_name='')\n yaml_str += new_yaml_str\n else:\n raise KeyError(f'json config {json_config} must be a dict')\n return False, yaml_str\n\n if isinstance(json_config, dict):\n first_dict, sec_dict, third_dict = {}, {}, {}\n for key, value in json_config.items():\n if isinstance(value, dict) and len(value) > 0:\n first_dict[key] = value\n elif isinstance(value, dict) and len(value) == 0:\n sec_dict[key] = value\n elif isinstance(value, list):\n third_dict[key] = value\n else:\n raise f'Config {json_config} is illegal'\n json_config = {}\n json_config.update(first_dict)\n json_config.update(sec_dict)\n json_config.update(third_dict)\n\n yaml_str = f'[{module_name}] module yaml examples:\\n'\n level_num = 1\n base_yaml_str = convert_yaml_style(level=level_num,\n name=module_name,\n description='',\n default='',\n type_name='',\n is_sys=True)\n level_num += 1\n\n is_final, new_yaml_str = parse_dict(json_config,\n level_num,\n module_name,\n set_name=isinstance(json_config, list)\n and set_name,\n name=name)\n if not is_final:\n yaml_str += base_yaml_str\n if set_name and not isinstance(json_config, list):\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=name,\n type_name='')\n yaml_str += new_yaml_str\n else:\n yaml_str += new_yaml_str[1:]\n\n return yaml_str" }, { "identifier": "we", "path": "scepter/modules/utils/distribute.py", "snippet": " def set_random_seed(seed):\ndef get_dist_info():\ndef gather_data(data):\ndef gather_list(data):\ndef gather_picklable(data):\ndef _gather_picklable_custom(data):\ndef gather_gpu_tensors(tensor, all_recv=False, is_cat=True):\ndef broadcast(tensor, src, group=None, **kwargs):\ndef barrier():\ndef get_global_gloo_group():\ndef reduce_scatter(output,\n input_list,\n op=dist.ReduceOp.SUM,\n group=None,\n **kwargs):\ndef all_reduce(tensor, op=dist.ReduceOp.SUM, group=None, **kwargs):\ndef reduce(tensor, dst, op=dist.ReduceOp.SUM, group=None, **kwargs):\ndef _serialize_to_tensor(data):\ndef _unserialize_from_tensor(recv_data):\ndef send(tensor, dst, group=None, **kwargs):\ndef recv(tensor, src=None, group=None, **kwargs):\ndef isend(tensor, dst, group=None, **kwargs):\ndef irecv(tensor, src=None, group=None, **kwargs):\ndef scatter(data, scatter_list=None, src=0, group=None, **kwargs):\ndef shared_random_seed():\ndef mp_worker(gpu, ngpus_per_node, cfg, fn, pmi_rank, world_size, work_env):\n def __init__(self):\n def init_env(self, config, fn, logger=None):\n def get_env(self):\n def set_env(self, we_env):\n def __str__(self):\nclass Workenv(object):" }, { "identifier": "FS", "path": "scepter/modules/utils/file_system.py", "snippet": "FS = FileSystem()" } ]
import numbers import random import torch from collections import OrderedDict from scepter.modules.model.network.diffusion.diffusion import GaussianDiffusion from scepter.modules.model.network.diffusion.schedules import noise_schedule from scepter.modules.model.network.train_module import TrainModule from scepter.modules.model.registry import (BACKBONES, EMBEDDERS, LOSSES, MODELS, TOKENIZERS) from scepter.modules.model.utils.basic_utils import count_params, default from scepter.modules.utils.config import dict_to_yaml from scepter.modules.utils.distribute import we from scepter.modules.utils.file_system import FS from safetensors.torch import load_file as load_safetensors
10,882
h = int(meta['image_size'][0][0]) w = int(meta['image_size'][1][0]) image_size = [h, w] if 'image_size' in kwargs: image_size = kwargs.pop('image_size') if isinstance(image_size, numbers.Number): image_size = [image_size, image_size] if image_size is None: image_size = [1024, 1024] height, width = image_size noise = self.noise_sample(num_samples, height // self.size_factor, width // self.size_factor, g) # UNet use input n_prompt samples = self.diffusion.sample(solver=sampler, noise=noise, model=self.model, model_kwargs=[{ 'cond': context }, { 'cond': null_context }], steps=sample_steps, guide_scale=guide_scale, guide_rescale=guide_rescale, discretization=discretization, show_progress=True, seed=seed, condition_fn=None, clamp=None, percentile=None, t_max=None, t_min=None, discard_penultimate_step=None, return_intermediate=None, **kwargs) x_samples = self.decode_first_stage(samples).float() x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) # UNet use train n_prompt if not self.default_n_prompt == self.train_n_prompt and run_train_n: train_n_prompt = [self.train_n_prompt] * len(prompt) null_train_context = self.encode_condition( self.tokenizer(train_n_prompt).to(we.device_id), method=method) tn_samples = self.diffusion.sample(solver=sampler, noise=noise, model=self.model, model_kwargs=[{ 'cond': context }, { 'cond': null_train_context }], steps=sample_steps, guide_scale=guide_scale, guide_rescale=guide_rescale, discretization=discretization, show_progress=we.rank == 0, seed=seed, condition_fn=None, clamp=None, percentile=None, t_max=None, t_min=None, discard_penultimate_step=None, return_intermediate=None, **kwargs) t_x_samples = self.decode_first_stage(tn_samples).float() t_x_samples = torch.clamp((t_x_samples + 1.0) / 2.0, min=0.0, max=1.0) else: train_n_prompt = ['' for _ in prompt] t_x_samples = [None for _ in prompt] outputs = list() for i, (p, np, tnp, img, t_img) in enumerate( zip(prompt, n_prompt, train_n_prompt, x_samples, t_x_samples)): one_tup = {'prompt': p, 'n_prompt': np, 'image': img} if hint is not None: one_tup.update({'hint': hint[i]}) if t_img is not None: one_tup['train_n_prompt'] = tnp one_tup['train_n_image'] = t_img outputs.append(one_tup) return outputs @torch.no_grad() def log_images(self, image=None, prompt=None, n_prompt=None, **kwargs): results = self.forward_test(prompt=prompt, n_prompt=n_prompt, **kwargs) outputs = list() for img, res in zip(image, results): one_tup = { 'orig': torch.clamp((img + 1.0) / 2.0, min=0.0, max=1.0), 'recon': res['image'], 'prompt': res['prompt'], 'n_prompt': res['n_prompt'] } if 'hint' in res: one_tup.update({'hint': res['hint']}) if 'train_n_prompt' in res: one_tup['train_n_prompt'] = res['train_n_prompt'] one_tup['train_n_image'] = res['train_n_image'] outputs.append(one_tup) return outputs @torch.no_grad() def encode_first_stage(self, x, **kwargs): z = self.first_stage_model.encode(x) return self.scale_factor * z @torch.no_grad() def decode_first_stage(self, z): z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @staticmethod def get_config_template():
# -*- coding: utf-8 -*- # Copyright (c) Alibaba, Inc. and its affiliates. def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self @MODELS.register_class() class LatentDiffusion(TrainModule): para_dict = { 'PARAMETERIZATION': { 'value': 'v', 'description': "The prediction type, you can choose from 'eps' and 'x0' and 'v'", }, 'TIMESTEPS': { 'value': 1000, 'description': 'The schedule steps for diffusion.', }, 'SCHEDULE_ARGS': {}, 'MIN_SNR_GAMMA': { 'value': None, 'description': 'The minimum snr gamma, default is None.', }, 'ZERO_TERMINAL_SNR': { 'value': False, 'description': 'Whether zero terminal snr, default is False.', }, 'PRETRAINED_MODEL': { 'value': None, 'description': "Whole model's pretrained model path.", }, 'IGNORE_KEYS': { 'value': [], 'description': 'The ignore keys for pretrain model loaded.', }, 'SCALE_FACTOR': { 'value': 0.18215, 'description': 'The vae embeding scale.', }, 'SIZE_FACTOR': { 'value': 8, 'description': 'The vae size factor.', }, 'DEFAULT_N_PROMPT': { 'value': '', 'description': 'The default negtive prompt.', }, 'TRAIN_N_PROMPT': { 'value': '', 'description': 'The negtive prompt used in train phase.', }, 'P_ZERO': { 'value': 0.0, 'description': 'The prob for zero or negtive prompt.', }, 'USE_EMA': { 'value': True, 'description': 'Use Ema or not. Default True', }, 'DIFFUSION_MODEL': {}, 'DIFFUSION_MODEL_EMA': {}, 'FIRST_STAGE_MODEL': {}, 'COND_STAGE_MODEL': {}, 'TOKENIZER': {} } def __init__(self, cfg, logger=None): super().__init__(cfg, logger=logger) self.init_params() self.construct_network() def init_params(self): self.parameterization = self.cfg.get('PARAMETERIZATION', 'eps') assert self.parameterization in [ 'eps', 'x0', 'v' ], 'currently only supporting "eps" and "x0" and "v"' self.num_timesteps = self.cfg.get('TIMESTEPS', 1000) self.schedule_args = { k.lower(): v for k, v in self.cfg.get('SCHEDULE_ARGS', { 'NAME': 'logsnr_cosine_interp', 'SCALE_MIN': 2.0, 'SCALE_MAX': 4.0 }).items() } self.min_snr_gamma = self.cfg.get('MIN_SNR_GAMMA', None) self.zero_terminal_snr = self.cfg.get('ZERO_TERMINAL_SNR', False) if self.zero_terminal_snr: assert self.parameterization == 'v', 'Now zero_terminal_snr only support v-prediction mode.' self.sigmas = noise_schedule(schedule=self.schedule_args.pop('name'), n=self.num_timesteps, zero_terminal_snr=self.zero_terminal_snr, **self.schedule_args) self.diffusion = GaussianDiffusion( sigmas=self.sigmas, prediction_type=self.parameterization) self.pretrained_model = self.cfg.get('PRETRAINED_MODEL', None) self.ignore_keys = self.cfg.get('IGNORE_KEYS', []) self.model_config = self.cfg.DIFFUSION_MODEL self.first_stage_config = self.cfg.FIRST_STAGE_MODEL self.cond_stage_config = self.cfg.COND_STAGE_MODEL self.tokenizer_config = self.cfg.get('TOKENIZER', None) self.loss_config = self.cfg.get('LOSS', None) self.scale_factor = self.cfg.get('SCALE_FACTOR', 0.18215) self.size_factor = self.cfg.get('SIZE_FACTOR', 8) self.default_n_prompt = self.cfg.get('DEFAULT_N_PROMPT', '') self.default_n_prompt = '' if self.default_n_prompt is None else self.default_n_prompt self.p_zero = self.cfg.get('P_ZERO', 0.0) self.train_n_prompt = self.cfg.get('TRAIN_N_PROMPT', '') if self.default_n_prompt is None: self.default_n_prompt = '' if self.train_n_prompt is None: self.train_n_prompt = '' self.use_ema = self.cfg.get('USE_EMA', True) self.model_ema_config = self.cfg.get('DIFFUSION_MODEL_EMA', None) def construct_network(self): self.model = BACKBONES.build(self.model_config, logger=self.logger) self.logger.info('all parameters:{}'.format(count_params(self.model))) if self.use_ema and self.model_ema_config: self.model_ema = BACKBONES.build(self.model_ema_config, logger=self.logger) self.model_ema = self.model_ema.eval() for param in self.model_ema.parameters(): param.requires_grad = False if self.loss_config: self.loss = LOSSES.build(self.loss_config, logger=self.logger) if self.tokenizer_config is not None: self.tokenizer = TOKENIZERS.build(self.tokenizer_config, logger=self.logger) self.first_stage_model = MODELS.build(self.first_stage_config, logger=self.logger) self.first_stage_model = self.first_stage_model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False if self.tokenizer_config is not None: self.cond_stage_config.KWARGS = { 'vocab_size': self.tokenizer.vocab_size } if self.cond_stage_config == '__is_unconditional__': print( f'Training {self.__class__.__name__} as an unconditional model.' ) self.cond_stage_model = None else: model = EMBEDDERS.build(self.cond_stage_config, logger=self.logger) self.cond_stage_model = model.eval().requires_grad_(False) self.cond_stage_model.train = disabled_train def load_pretrained_model(self, pretrained_model): if pretrained_model is not None: with FS.get_from(pretrained_model, wait_finish=True) as local_model: self.init_from_ckpt(local_model, ignore_keys=self.ignore_keys) def init_from_ckpt(self, path, ignore_keys=list()): if path.endswith('safetensors'): sd = load_safetensors(path) else: sd = torch.load(path, map_location='cpu') new_sd = OrderedDict() for k, v in sd.items(): ignored = False for ik in ignore_keys: if ik in k: if we.rank == 0: self.logger.info( 'Ignore key {} from state_dict.'.format(k)) ignored = True break if not ignored: if k.startswith('model.diffusion_model.'): k = k.replace('model.diffusion_model.', 'model.') k = k.replace('post_quant_conv', 'conv2') if 'post_quant_conv' in k else k k = k.replace('quant_conv', 'conv1') if 'quant_conv' in k else k new_sd[k] = v missing, unexpected = self.load_state_dict(new_sd, strict=False) if we.rank == 0: self.logger.info( f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' ) if len(missing) > 0: self.logger.info(f'Missing Keys:\n {missing}') if len(unexpected) > 0: self.logger.info(f'\nUnexpected Keys:\n {unexpected}') def encode_condition(self, input, method='encode_text'): if hasattr(self.cond_stage_model, method): return getattr(self.cond_stage_model, method)(input, tokenizer=self.tokenizer) else: return self.cond_stage_model(input) def forward_train(self, image=None, noise=None, prompt=None, **kwargs): x_start = self.encode_first_stage(image, **kwargs) t = torch.randint(0, self.num_timesteps, (x_start.shape[0], ), device=x_start.device).long() context = {} if prompt and self.cond_stage_model: zeros = (torch.rand(len(prompt)) < self.p_zero).numpy().tolist() prompt = [ self.train_n_prompt if zeros[idx] else p for idx, p in enumerate(prompt) ] self.register_probe({'after_prompt': prompt}) with torch.autocast(device_type='cuda', enabled=False): context = self.encode_condition( self.tokenizer(prompt).to(we.device_id)) if 'hint' in kwargs and kwargs['hint'] is not None: hint = kwargs.pop('hint') if isinstance(context, dict): context['hint'] = hint else: context = {'crossattn': context, 'hint': hint} else: hint = None if self.min_snr_gamma is not None: alphas = self.diffusion.alphas.to(we.device_id)[t] sigmas = self.diffusion.sigmas.pow(2).to(we.device_id)[t] snrs = (alphas / sigmas).clamp(min=1e-20) min_snrs = snrs.clamp(max=self.min_snr_gamma) weights = min_snrs / snrs else: weights = 1 self.register_probe({'snrs_weights': weights}) loss = self.diffusion.loss(x0=x_start, t=t, model=self.model, model_kwargs={'cond': context}, noise=noise, **kwargs) loss = loss * weights loss = loss.mean() ret = {'loss': loss, 'probe_data': {'prompt': prompt}} return ret def noise_sample(self, batch_size, h, w, g): noise = torch.empty(batch_size, 4, h, w, device=we.device_id).normal_(generator=g) return noise def forward(self, **kwargs): if self.training: return self.forward_train(**kwargs) else: return self.forward_test(**kwargs) @torch.no_grad() @torch.autocast('cuda', dtype=torch.float16) def forward_test(self, prompt=None, n_prompt=None, sampler='ddim', sample_steps=50, seed=2023, guide_scale=7.5, guide_rescale=0.5, discretization='trailing', run_train_n=True, **kwargs): g = torch.Generator(device=we.device_id) seed = seed if seed >= 0 else random.randint(0, 2**32 - 1) g.manual_seed(seed) num_samples = len(prompt) if 'dynamic_encode_text' in kwargs and kwargs.pop( 'dynamic_encode_text'): method = 'dynamic_encode_text' else: method = 'encode_text' n_prompt = default(n_prompt, [self.default_n_prompt] * len(prompt)) assert isinstance(prompt, list) and \ isinstance(n_prompt, list) and \ len(prompt) == len(n_prompt) # with torch.autocast(device_type="cuda", enabled=False): context = self.encode_condition(self.tokenizer(prompt).to( we.device_id), method=method) null_context = self.encode_condition(self.tokenizer(n_prompt).to( we.device_id), method=method) if 'hint' in kwargs and kwargs['hint'] is not None: hint = kwargs.pop('hint') if isinstance(context, dict): context['hint'] = hint else: context = {'crossattn': context, 'hint': hint} if isinstance(null_context, dict): null_context['hint'] = hint else: null_context = {'crossattn': null_context, 'hint': hint} else: hint = None if 'index' in kwargs: kwargs.pop('index') image_size = None if 'meta' in kwargs: meta = kwargs.pop('meta') if 'image_size' in meta: h = int(meta['image_size'][0][0]) w = int(meta['image_size'][1][0]) image_size = [h, w] if 'image_size' in kwargs: image_size = kwargs.pop('image_size') if isinstance(image_size, numbers.Number): image_size = [image_size, image_size] if image_size is None: image_size = [1024, 1024] height, width = image_size noise = self.noise_sample(num_samples, height // self.size_factor, width // self.size_factor, g) # UNet use input n_prompt samples = self.diffusion.sample(solver=sampler, noise=noise, model=self.model, model_kwargs=[{ 'cond': context }, { 'cond': null_context }], steps=sample_steps, guide_scale=guide_scale, guide_rescale=guide_rescale, discretization=discretization, show_progress=True, seed=seed, condition_fn=None, clamp=None, percentile=None, t_max=None, t_min=None, discard_penultimate_step=None, return_intermediate=None, **kwargs) x_samples = self.decode_first_stage(samples).float() x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) # UNet use train n_prompt if not self.default_n_prompt == self.train_n_prompt and run_train_n: train_n_prompt = [self.train_n_prompt] * len(prompt) null_train_context = self.encode_condition( self.tokenizer(train_n_prompt).to(we.device_id), method=method) tn_samples = self.diffusion.sample(solver=sampler, noise=noise, model=self.model, model_kwargs=[{ 'cond': context }, { 'cond': null_train_context }], steps=sample_steps, guide_scale=guide_scale, guide_rescale=guide_rescale, discretization=discretization, show_progress=we.rank == 0, seed=seed, condition_fn=None, clamp=None, percentile=None, t_max=None, t_min=None, discard_penultimate_step=None, return_intermediate=None, **kwargs) t_x_samples = self.decode_first_stage(tn_samples).float() t_x_samples = torch.clamp((t_x_samples + 1.0) / 2.0, min=0.0, max=1.0) else: train_n_prompt = ['' for _ in prompt] t_x_samples = [None for _ in prompt] outputs = list() for i, (p, np, tnp, img, t_img) in enumerate( zip(prompt, n_prompt, train_n_prompt, x_samples, t_x_samples)): one_tup = {'prompt': p, 'n_prompt': np, 'image': img} if hint is not None: one_tup.update({'hint': hint[i]}) if t_img is not None: one_tup['train_n_prompt'] = tnp one_tup['train_n_image'] = t_img outputs.append(one_tup) return outputs @torch.no_grad() def log_images(self, image=None, prompt=None, n_prompt=None, **kwargs): results = self.forward_test(prompt=prompt, n_prompt=n_prompt, **kwargs) outputs = list() for img, res in zip(image, results): one_tup = { 'orig': torch.clamp((img + 1.0) / 2.0, min=0.0, max=1.0), 'recon': res['image'], 'prompt': res['prompt'], 'n_prompt': res['n_prompt'] } if 'hint' in res: one_tup.update({'hint': res['hint']}) if 'train_n_prompt' in res: one_tup['train_n_prompt'] = res['train_n_prompt'] one_tup['train_n_image'] = res['train_n_image'] outputs.append(one_tup) return outputs @torch.no_grad() def encode_first_stage(self, x, **kwargs): z = self.first_stage_model.encode(x) return self.scale_factor * z @torch.no_grad() def decode_first_stage(self, z): z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @staticmethod def get_config_template():
return dict_to_yaml('MODEL',
10
2023-12-21 02:01:48+00:00
16k
RomGai/BrainVis
dc_ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "dc_ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "dc_ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "dc_ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "dc_ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "dc_ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "dc_ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "dc_ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "dc_ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "dc_ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "dc_ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "dc_ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "dc_ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "dc_ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "dc_ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n self.trainable = False\n \n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=False, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=False, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=False, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=False, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "dc_ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,generator=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device, generator=generator)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" }, { "identifier": "PLMSSampler", "path": "dc_ldm/models/diffusion/plms.py", "snippet": "class PLMSSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n if ddim_eta != 0:\n raise ValueError('ddim_eta must be 0 for PLMS')\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for PLMS sampling is {size}')\n\n samples, intermediates = self.plms_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def plms_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, generator=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device, generator=generator)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running PLMS Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)\n old_eps = []\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n old_eps=old_eps, t_next=ts_next)\n img, pred_x0, e_t = outs\n old_eps.append(e_t)\n if len(old_eps) >= 4:\n old_eps.pop(0)\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):\n b, *_, device = *x.shape, x.device\n\n def get_model_output(x, t):\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n return e_t\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n\n def get_x_prev_and_pred_x0(e_t, index):\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n e_t = get_model_output(x, t)\n if len(old_eps) == 0:\n # Pseudo Improved Euler (2nd order)\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)\n e_t_next = get_model_output(x_prev, t_next)\n e_t_prime = (e_t + e_t_next) / 2\n elif len(old_eps) == 1:\n # 2nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (3 * e_t - old_eps[-1]) / 2\n elif len(old_eps) == 2:\n # 3nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12\n elif len(old_eps) >= 3:\n # 4nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24\n\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)\n\n return x_prev, pred_x0, e_t" }, { "identifier": "get_similarity_metric", "path": "eval_metrics.py", "snippet": "def get_similarity_metric(img1, img2, method='pair-wise', metric_name='mse', **kwargs):\n # img1: n, w, h, 3\n # img2: n, w, h, 3\n # all in pixel values: 0 ~ 255\n # return: list of scores 0 ~ 1.\n if img1.shape[-1] != 3:\n img1 = rearrange(img1, 'n c w h -> n w h c')\n if img2.shape[-1] != 3:\n img2 = rearrange(img2, 'n c w h -> n w h c')\n\n if method == 'pair-wise':\n eval_procedure_func = pair_wise_score \n elif method == 'n-way':\n eval_procedure_func = n_way_scores\n elif method == 'metrics-only':\n eval_procedure_func = metrics_only\n elif method == 'class':\n return get_n_way_top_k_acc(img1, img2, **kwargs)\n else:\n raise NotImplementedError\n\n if metric_name == 'mse':\n metric_func = mse_metric\n decision_func = smaller_the_better\n elif metric_name == 'pcc':\n metric_func = pcc_metric\n decision_func = larger_the_better\n elif metric_name == 'ssim':\n metric_func = ssim_metric\n decision_func = larger_the_better\n elif metric_name == 'psm':\n metric_func = psm_wrapper()\n decision_func = smaller_the_better\n elif metric_name == 'fid':\n metric_func = fid_wrapper()\n decision_func = smaller_the_better\n else:\n raise NotImplementedError\n \n return eval_procedure_func(img1, img2, metric_func, decision_func, **kwargs)" }, { "identifier": "FrozenImageEmbedder", "path": "dc_ldm/modules/encoders/modules.py", "snippet": "class FrozenImageEmbedder(AbstractEncoder):\n \"\"\"Uses the CLIP transformer encoder for text (from Hugging Face)\"\"\"\n def __init__(self, version=\"openai/clip-vit-large-patch14\", device=\"cuda\", max_length=77):\n super().__init__()\n # self.processor = AutoProcessor.from_pretrained(version)\n self.transformer = CLIPVisionModelWithProjection.from_pretrained(version)\n self.device = device\n self.max_length = max_length\n self.freeze()\n\n\n\n def freeze(self):\n self.transformer = self.transformer.eval()\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, inputs):\n # image = Image.open(requests.get(url, stream=True).raw)\n # inputs = self.processor(images=image, return_tensors=\"pt\")\n outputs = self.transformer(**inputs)\n image_embeds = outputs.image_embeds\n return image_embeds\n # z = outputs.last_hidden_state\n\n # return z\n\n def encode(self, inputs):\n return self(inputs)" } ]
import os import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.rank_zero import rank_zero_only from dc_ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from dc_ldm.modules.ema import LitEma from dc_ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from dc_ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from dc_ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from dc_ldm.models.diffusion.ddim import DDIMSampler from dc_ldm.models.diffusion.plms import PLMSSampler from PIL import Image from eval_metrics import get_similarity_metric from dc_ldm.modules.encoders.modules import FrozenImageEmbedder
14,361
@torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): self.train() self.cond_stage_model.train() ###到底是在哪里训练的 loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=False, on_epoch=True) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=False, on_epoch=True) return loss @torch.no_grad() def generate(self, data, num_samples, ddim_steps=300, HW=None, limit=None, state=None): # fmri_embedding: n, seq_len, embed_dim all_samples = [] if HW is None: shape = (self.p_channels, self.p_image_size, self.p_image_size) else: num_resolutions = len(self.ch_mult) shape = (self.p_channels, HW[0] // 2**(num_resolutions-1), HW[1] // 2**(num_resolutions-1)) model = self
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ddim_steps=300 ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.validation_count = 0 self.ddim_steps = ddim_steps self.return_cond = False self.output_path = None self.main_config = None self.best_val = 0.0 self.run_full_validation_threshold = 0.0 self.eval_avg = True def re_init_ema(self): if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): self.train() self.cond_stage_model.train() ###到底是在哪里训练的 loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=False, on_epoch=True) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=False, on_epoch=True) return loss @torch.no_grad() def generate(self, data, num_samples, ddim_steps=300, HW=None, limit=None, state=None): # fmri_embedding: n, seq_len, embed_dim all_samples = [] if HW is None: shape = (self.p_channels, self.p_image_size, self.p_image_size) else: num_resolutions = len(self.ch_mult) shape = (self.p_channels, HW[0] // 2**(num_resolutions-1), HW[1] // 2**(num_resolutions-1)) model = self
sampler = PLMSSampler(model)
18
2023-12-16 12:52:14+00:00
16k
tonnetonne814/PL-Bert-VITS2
train_ms.py
[ { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n i=0\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n # sid = 1\n max_bert_len = max([x[4].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n bert_lengths = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n bert_padded = torch.FloatTensor(len(batch), 13, max_bert_len, 768)\n\n text_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n bert = row[4]\n bert_padded[i, :, :bert.size(1),:] = bert\n bert_lengths[i] = bert.size(1)\n\n\n if self.return_ids:\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n bert_padded,\n bert_lengths,\n sid,\n ids_sorted_decreasing,\n )\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n bert_padded,\n bert_lengths,\n sid,\n )" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.hparams = hparams\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.text_cleaners = hparams.text_cleaners\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 999)\n self.min_audio_len = getattr(hparams, \"min_audio_len\", 8192)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n self.count = 0\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n for data in self.audiopaths_sid_text:\n audiopath, sid, ph, text, bert, emo, style = data\n if not os.path.isfile(audiopath):\n continue\n if self.min_text_len <= len(text) and len(text) <= self.max_text_len:\n audiopaths_sid_text_new.append([audiopath, sid, ph, text, bert, emo, style])\n length = os.path.getsize(audiopath) // (2 * self.hop_length)\n if length < self.min_audio_len // self.hop_length:\n print(\"DATA PASS\")\n continue\n lengths.append(length)\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n print(f\"INFO:{len(self.audiopaths_sid_text)} is used as Training Dataset.\")\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, ph, text, pl_bert, emo, style = (\n audiopath_sid_text[0],\n audiopath_sid_text[1],\n audiopath_sid_text[2],\n audiopath_sid_text[3],\n audiopath_sid_text[4],\n audiopath_sid_text[5],\n audiopath_sid_text[6],\n )\n ph = self.get_text(ph)\n spec, wav = self.get_audio(audiopath)\n bert = self.get_pl_bert(pl_bert)\n sid = self.get_sid(sid)\n\n # parameter checker \n assert len(ph) == bert.size(1)\n\n return (ph, spec, wav, sid, bert)\n \n def get_pl_bert(self, filename):\n path = os.path.join(\"pl_bert_embeddings\", f\"{filename}.PlBertJa\")\n data = torch.load(path)\n if self.add_blank:\n L, T, H = data.shape\n new_data = torch.zeros(size=(L,2*T+1,H), dtype=data.dtype)\n for idx in range(T):\n target_idx = idx*2+1\n new_data[:, target_idx, :] = data[:, idx, :]\n data = new_data\n return data\n\n def get_audio(self, filename):\n # TODO : if linear spec exists convert to mel from existing linear spec\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate\n )\n )\n # audio_norm = audio / self.max_wav_value\n audio_norm = audio.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n if os.path.exists(spec_filename):\n spec = torch.load(spec_filename)\n else:\n if self.use_mel_spec_posterior:\n \"\"\"TODO : (need verification)\n if linear spec exists convert to\n mel from existing linear spec (uncomment below lines)\"\"\"\n # if os.path.exists(filename.replace(\".wav\", \".spec.pt\")):\n # # spec, n_fft, num_mels, sampling_rate, fmin, fmax\n # spec = spec_to_mel_torch(\n # torch.load(filename.replace(\".wav\", \".spec.pt\")),\n # self.filter_length, self.n_mel_channels, self.sampling_rate,\n # self.hparams.mel_fmin, self.hparams.mel_fmax)\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text):\n if self.cleaned_text:\n text_norm = cleaned_text_to_sequence(text)\n else:\n text_norm = text_to_sequence(text, self.text_cleaners)\n if self.add_blank:\n text_norm = commons.intersperse(text_norm, 0)\n text_norm = torch.LongTensor(text_norm)\n return text_norm\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(\n sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax\n )\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n if version.parse(torch.__version__) >= version.parse(\"2\"):\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n else:\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(\n sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax\n )\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "AVAILABLE_DURATION_DISCRIMINATOR_TYPES", "path": "models.py", "snippet": "AVAILABLE_DURATION_DISCRIMINATOR_TYPES = [\n \"dur_disc_1\",\n \"dur_disc_2\",\n]" }, { "identifier": "AVAILABLE_FLOW_TYPES", "path": "models.py", "snippet": "AVAILABLE_FLOW_TYPES = [\n \"pre_conv\",\n \"pre_conv2\",\n \"fft\",\n \"mono_layer_inter_residual\",\n \"mono_layer_post_residual\",\n]" }, { "identifier": "DurationDiscriminatorV1", "path": "models.py", "snippet": "class DurationDiscriminatorV1(nn.Module): # vits2\n # TODO : not using \"spk conditioning\" for now according to the paper.\n # Can be a better discriminator if we use it.\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n # self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n # self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n # if gin_channels != 0:\n # self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n # x = torch.relu(x)\n # x = self.pre_out_norm_1(x)\n # x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n # x = torch.relu(x)\n # x = self.pre_out_norm_2(x)\n # x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n # if g is not None:\n # g = torch.detach(g)\n # x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n # x = torch.relu(x)\n # x = self.norm_1(x)\n # x = self.drop(x)\n x = self.conv_2(x * x_mask)\n # x = torch.relu(x)\n # x = self.norm_2(x)\n # x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "DurationDiscriminatorV2", "path": "models.py", "snippet": "class DurationDiscriminatorV2(nn.Module): # vits2\n # TODO : not using \"spk conditioning\" for now according to the paper.\n # Can be a better discriminator if we use it.\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n # if gin_channels != 0:\n # self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n # if g is not None:\n # g = torch.detach(g)\n # x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append([output_prob])\n\n return output_probs" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11, 17, 23, 37]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n bert_emb_size,\n n_speakers=0,\n gin_channels=0,\n use_sdp=True,\n **kwargs,\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", False\n )\n self.use_transformer_flows = kwargs.get(\"use_transformer_flows\", False)\n self.transformer_flow_type = kwargs.get(\n \"transformer_flow_type\", \"mono_layer_post_residual\"\n )\n if self.use_transformer_flows:\n assert (\n self.transformer_flow_type in AVAILABLE_FLOW_TYPES\n ), f\"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}\"\n self.use_sdp = use_sdp\n # self.use_duration_discriminator = kwargs.get(\"use_duration_discriminator\", False)\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n else:\n self.enc_gin_channels = 0\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n bert_emb_size=bert_emb_size,\n gin_channels=self.enc_gin_channels,\n )\n\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n # self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)\n self.flow = ResidualCouplingTransformersBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 4,\n gin_channels=gin_channels,\n use_transformer_flows=self.use_transformer_flows,\n transformer_flow_type=self.transformer_flow_type,\n )\n\n if use_sdp:\n self.dp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n else:\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers > 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n\n # 重み付け加算式を取る\n self.WSL = WeightSumLayer(n_layers=13)\n\n def forward(self, x, x_lengths, y, y_lengths, bert, bert_lengths, sid=None):\n bert = self.WSL(bert)\n\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = None\n\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, bert, bert_lengths, g=g)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n if self.use_sdp:\n l_length = self.dp(x, x_mask, w, g=g)\n l_length = l_length / torch.sum(x_mask)\n logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=1.0)\n logw_ = torch.log(w + 1e-6) * x_mask\n else:\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n )\n\n def infer(\n self,\n x,\n x_lengths,\n bert,\n bert_lengths,\n sid=None,\n noise_scale=1,\n length_scale=1,\n noise_scale_w=1.0,\n max_len=None,\n ):\n bert = self.WSL(bert)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = None\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, bert, bert_lengths, g=g)\n if self.use_sdp:\n logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)\n else:\n logw = self.dp(x, x_mask, g=g)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n\n # currently vits-2 is not capable of voice conversion\n ## comment - choihkk\n ## Assuming the use of the ResidualCouplingTransformersLayer2 module, it seems that voice conversion is possible \n def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):\n assert self.n_speakers > 0, \"n_speakers have to be larger than 0.\"\n g_src = self.emb_g(sid_src).unsqueeze(-1)\n g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)\n z_p = self.flow(z, y_mask, g=g_src)\n z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)\n o_hat = self.dec(z_hat * y_mask, g=g_tgt)\n return o_hat, y_mask, (z, z_p, z_hat)" }, { "identifier": "symbols", "path": "PL_BERT_ja/text/symbols.py", "snippet": "" } ]
import argparse import itertools import json import math import os import logging import torch import torch.distributed as dist import torch.multiprocessing as mp import tqdm import commons import models import utils from torch import nn, optim from torch.cuda.amp import GradScaler, autocast from torch.nn import functional as F from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from data_utils import (DistributedBucketSampler, TextAudioSpeakerCollate, TextAudioSpeakerLoader) from losses import discriminator_loss, feature_loss, generator_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from models import (AVAILABLE_DURATION_DISCRIMINATOR_TYPES, AVAILABLE_FLOW_TYPES, DurationDiscriminatorV1, DurationDiscriminatorV2, MultiPeriodDiscriminator, SynthesizerTrn) from PL_BERT_ja.text.symbols import symbols
11,022
posterior_channels = 128 # vits2 hps.data.use_mel_posterior_encoder = True else: print("Using lin posterior encoder for VITS1") posterior_channels = hps.data.filter_length // 2 + 1 hps.data.use_mel_posterior_encoder = False train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 500, 700, 900, 1100, 1300, 1500, 3000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=8, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, ) if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=8, shuffle=False, batch_size=hps.train.batch_size, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) # some of these flags are not being used in the code and directly set in hps json file. # they are kept here for reference and prototyping. if ( "use_transformer_flows" in hps.model.keys() and hps.model.use_transformer_flows == True ): use_transformer_flows = True transformer_flow_type = hps.model.transformer_flow_type print(f"Using transformer flows {transformer_flow_type} for VITS2") assert ( transformer_flow_type in AVAILABLE_FLOW_TYPES ), f"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}" else: print("Using normal flows for VITS1") use_transformer_flows = False if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) use_spk_conditioned_encoder = True else: print("Using normal encoder for VITS1") use_spk_conditioned_encoder = False if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True ): print("Using noise scaled MAS for VITS2") use_noise_scaled_mas = True mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") use_noise_scaled_mas = False mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True ): # print("Using duration discriminator for VITS2") use_duration_discriminator = True # comment - choihkk # add duration discriminator type here # I think it would be a good idea to come up with a method to input this part accurately, like a hydra duration_discriminator_type = getattr( hps.model, "duration_discriminator_type", "dur_disc_1" ) print(f"Using duration_discriminator {duration_discriminator_type} for VITS2") assert ( duration_discriminator_type in AVAILABLE_DURATION_DISCRIMINATOR_TYPES ), f"duration_discriminator_type must be one of {AVAILABLE_DURATION_DISCRIMINATOR_TYPES}" # duration_discriminator_type = AVAILABLE_DURATION_DISCRIMINATOR_TYPES # ここ修正 if duration_discriminator_type == "dur_disc_1": net_dur_disc = DurationDiscriminatorV1( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) elif duration_discriminator_type == "dur_disc_2": net_dur_disc = DurationDiscriminatorV2( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) else: print("NOT using any duration discriminator like VITS1") net_dur_disc = None use_duration_discriminator = False net_g = SynthesizerTrn(
numba_logger = logging.getLogger('numba') numba_logger.setLevel(logging.WARNING) # from tensorboardX import SummaryWriter torch.backends.cudnn.benchmark = True global_step = 0 def main(): """Assume Single Node Multi GPUs Training Only""" assert torch.cuda.is_available(), "CPU training is not allowed." n_gpus = torch.cuda.device_count() os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "6060" hps = utils.get_hparams() mp.spawn( run, nprocs=n_gpus, args=( n_gpus, hps, ), ) def run(rank, n_gpus, hps): net_dur_disc = None global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) dist.init_process_group( backend="nccl", init_method="env://", world_size=n_gpus, rank=rank ) torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) if ( "use_mel_posterior_encoder" in hps.model.keys() and hps.model.use_mel_posterior_encoder == True ): print("Using mel posterior encoder for VITS2") posterior_channels = 128 # vits2 hps.data.use_mel_posterior_encoder = True else: print("Using lin posterior encoder for VITS1") posterior_channels = hps.data.filter_length // 2 + 1 hps.data.use_mel_posterior_encoder = False train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 500, 700, 900, 1100, 1300, 1500, 3000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=8, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, ) if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=8, shuffle=False, batch_size=hps.train.batch_size, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) # some of these flags are not being used in the code and directly set in hps json file. # they are kept here for reference and prototyping. if ( "use_transformer_flows" in hps.model.keys() and hps.model.use_transformer_flows == True ): use_transformer_flows = True transformer_flow_type = hps.model.transformer_flow_type print(f"Using transformer flows {transformer_flow_type} for VITS2") assert ( transformer_flow_type in AVAILABLE_FLOW_TYPES ), f"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}" else: print("Using normal flows for VITS1") use_transformer_flows = False if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) use_spk_conditioned_encoder = True else: print("Using normal encoder for VITS1") use_spk_conditioned_encoder = False if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True ): print("Using noise scaled MAS for VITS2") use_noise_scaled_mas = True mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") use_noise_scaled_mas = False mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True ): # print("Using duration discriminator for VITS2") use_duration_discriminator = True # comment - choihkk # add duration discriminator type here # I think it would be a good idea to come up with a method to input this part accurately, like a hydra duration_discriminator_type = getattr( hps.model, "duration_discriminator_type", "dur_disc_1" ) print(f"Using duration_discriminator {duration_discriminator_type} for VITS2") assert ( duration_discriminator_type in AVAILABLE_DURATION_DISCRIMINATOR_TYPES ), f"duration_discriminator_type must be one of {AVAILABLE_DURATION_DISCRIMINATOR_TYPES}" # duration_discriminator_type = AVAILABLE_DURATION_DISCRIMINATOR_TYPES # ここ修正 if duration_discriminator_type == "dur_disc_1": net_dur_disc = DurationDiscriminatorV1( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) elif duration_discriminator_type == "dur_disc_2": net_dur_disc = DurationDiscriminatorV2( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) else: print("NOT using any duration discriminator like VITS1") net_dur_disc = None use_duration_discriminator = False net_g = SynthesizerTrn(
len(symbols)+1,
15
2023-12-16 05:34:02+00:00
16k
Ruiyuan-Zhang/CCS
multi_part_assembly/utils/wx_transformer_utilities/multihead_attention.py
[ { "identifier": "FairseqDropout", "path": "multi_part_assembly/utils/wx_transformer_utilities/fairseq_dropout.py", "snippet": "class FairseqDropout(nn.Module):\n\n def __init__(self, p, module_name=None):\n super().__init__()\n self.p = p\n self.module_name = module_name\n self.apply_during_inference = False\n\n def forward(self, x, inplace: bool = False):\n if self.training or self.apply_during_inference:\n return F.dropout(x, p=self.p, training=True, inplace=inplace)\n else:\n return x\n\n def make_generation_fast_(\n self,\n name: str,\n retain_dropout: bool = False,\n retain_dropout_modules: Optional[List[str]] = None,\n **kwargs\n ):\n if retain_dropout:\n if retain_dropout_modules is not None and self.module_name is None:\n logger.warning(\n 'Cannot enable dropout during inference for module {} '\n 'because module_name was not set'.format(name)\n )\n elif (\n retain_dropout_modules is None # if None, apply to all modules\n or self.module_name in retain_dropout_modules\n ):\n logger.info(\n 'Enabling dropout during inference for module: {}'.format(name)\n )\n self.apply_during_inference = True\n else:\n logger.info('Disabling dropout for module: {}'.format(name))" }, { "identifier": "MultiHeadAttention", "path": "multi_part_assembly/utils/wx_transformer_utilities/attention_rim.py", "snippet": "class MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n\n def __init__(self, n_head, d_model_read, d_model_write, d_model_out, d_k, d_v, grad_sparse, residual=True, dropout=0.1, skip_write=False, flag=False):\n super().__init__()\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Initialize Multi-Head Attention~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n # print('d model read: ', d_model_read)\n # print('d_model_write: ', d_model_write)\n # print('d_model_out: ', d_model_out)\n # print('n_head: ', n_head)\n # print('d_k: ', d_k)\n # print('d_v: ', d_v)\n # print('num_blocks_read: ', num_blocks_read)\n # print('num_blocks_write: ', num_blocks_write)\n # input()\n\n self.GLN_qs = nn.Linear(d_model_read, n_head * d_k)\n self.GLN_ks = nn.Linear(d_model_write, n_head * d_k)\n self.GLN_vs = nn.Linear(d_model_write, n_head * d_v)\n\n self.residual = residual\n\n #self.w_qs = nn.Linear(d_model_read, n_head * d_k)\n #self.w_ks = nn.Linear(d_model_write, n_head * d_k)\n #self.w_vs = nn.Linear(d_model_write, n_head * d_v)\n\n #nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n #nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n #nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))\n\n self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5), flag=flag)\n #self.layer_norm = nn.LayerNorm(d_model)\n\n self.gate_fc = nn.Linear(n_head * d_v, d_model_out)\n\n if not skip_write:\n self.fc = nn.Linear(n_head * d_v, d_model_out)\n else:\n self.fc = lambda a: a\n\n #nn.init.xavier_normal_(self.fc.weight)\n\n self.dropout = nn.Dropout(dropout)\n\n self.ln = nn.LayerNorm(d_model_out)\n\n def forward(self, q, k, v, mask=None):\n\n #print('attn input shape', q.shape)\n\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n\n sz_b, len_q, _ = q.size()\n sz_b, len_k, _ = k.size()\n sz_b, len_v, _ = v.size()\n\n residual = q\n\n #print('q shape', q.shape)\n\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Forward of Multi-Head Attention~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n # print(\"q: \", q.size())\n # print(\"k: \", k.size())\n # print(\"v: \", v.size())\n # input()\n\n q = self.GLN_qs(q).view(sz_b, len_q, n_head, d_k)\n #q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.GLN_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.GLN_vs(v).reshape(sz_b, len_v, n_head, d_v)\n #v = v.view(sz_b, len_v, n_head, d_v)\n\n # print(\"GLN q: \", q.size())\n # print(\"GLN k: \", k.size())\n # print(\"GLN v: \", v.size())\n\n q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk\n k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk\n v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv\n\n # print(\"Permute q: \", q.size())\n # print(\"Permute k: \", k.size())\n # print(\"Permute v: \", v.size())\n\n #mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..\n output, attn, extra_loss = self.attention(q, k, v, mask=None)\n\n # print(\"Output: \", output.size())\n # print(\"Attention: \", attn.size())\n\n output = output.view(n_head, sz_b, len_q, d_v)\n output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)\n\n # print(\"Here Output: \", output.size())\n\n #print('output shape before fc', output.shape)\n\n #TODO: probably shouldn't just apply residual layer in the forward pass.\n\n output_init = output*1.0\n\n output = self.dropout(self.fc(output_init))\n\n gate = torch.sigmoid(self.gate_fc(output_init))\n\n #output = self.layer_norm(gate * output + (1 - gate) * residual)\n #output = gate * output + (1 - gate) * residual\n\n if self.residual:\n output = gate * torch.tanh(output)\n else:\n #output = self.ln(output)\n pass\n\n # print(\"Final Output: \", output.size())\n\n #output\n\n #print('attn', attn[0])\n #print('output input diff', output - residual)\n\n return output, attn, extra_loss" }, { "identifier": "quant_noise", "path": "multi_part_assembly/utils/wx_transformer_utilities/quant_noise.py", "snippet": "def quant_noise(module, p, block_size):\n \"\"\"\n Wraps modules and applies quantization noise to the weights for\n subsequent quantization with Iterative Product Quantization as\n described in \"Training with Quantization Noise for Extreme Model Compression\"\n\n Args:\n - module: nn.Module\n - p: amount of Quantization Noise\n - block_size: size of the blocks for subsequent quantization with iPQ\n\n Remarks:\n - Module weights must have the right sizes wrt the block size\n - Only Linear, Embedding and Conv2d modules are supported for the moment\n - For more detail on how to quantize by blocks with convolutional weights,\n see \"And the Bit Goes Down: Revisiting the Quantization of Neural Networks\"\n - We implement the simplest form of noise here as stated in the paper\n which consists in randomly dropping blocks\n \"\"\"\n\n # if no quantization noise, don't register hook\n if p <= 0:\n return module\n\n # supported modules\n assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))\n\n # test whether module.weight has the right sizes wrt block_size\n is_conv = module.weight.ndim == 4\n\n # 2D matrix\n if not is_conv:\n assert module.weight.size(1) % block_size == 0, \"Input features must be a multiple of block sizes\"\n\n # 4D matrix\n else:\n # 1x1 convolutions\n if module.kernel_size == (1, 1):\n assert module.in_channels % block_size == 0, \"Input channels must be a multiple of block sizes\"\n # regular convolutions\n else:\n k = module.kernel_size[0] * module.kernel_size[1]\n assert k % block_size == 0, \"Kernel size must be a multiple of block size\"\n\n def _forward_pre_hook(mod, input):\n # no noise for evaluation\n if mod.training:\n if not is_conv:\n # gather weight and sizes\n weight = mod.weight\n in_features = weight.size(1)\n out_features = weight.size(0)\n\n # split weight matrix into blocks and randomly drop selected blocks\n mask = torch.zeros(in_features // block_size * out_features, device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)\n\n else:\n # gather weight and sizes\n weight = mod.weight\n in_channels = mod.in_channels\n out_channels = mod.out_channels\n\n # split weight matrix into blocks and randomly drop selected blocks\n if mod.kernel_size == (1, 1):\n mask = torch.zeros(int(in_channels // block_size * out_channels), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)\n else:\n mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])\n\n # scale weights and apply mask\n mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript\n s = 1 / (1 - p)\n mod.weight.data = s * weight.masked_fill(mask, 0)\n\n module.register_forward_pre_hook(_forward_pre_hook)\n return module" }, { "identifier": "GroupLinearLayer", "path": "multi_part_assembly/utils/wx_transformer_utilities/group_linear_layer.py", "snippet": "class GroupLinearLayer(nn.Module):\n\n def __init__(self, din, dout, num_blocks, bias=True, a = None):\n super(GroupLinearLayer, self).__init__()\n self.nb = num_blocks\n self.dout = dout\n\n if a is None:\n a = 1. / math.sqrt(dout * num_blocks)\n\n #gain = 1.0 / math.sqrt(2)\n #a = gain * math.sqrt(6.0 / (din + dout))\n\n self.weight = nn.Parameter(torch.FloatTensor(num_blocks,din,dout).uniform_(-a,a))\n\n self.bias = bias\n\n if bias is True:\n self.bias = nn.Parameter(torch.FloatTensor(num_blocks,dout).uniform_(-a,a))\n #self.bias = nn.Parameter(torch.zeros(dout*num_blocks))\n else:\n self.bias = None\n\n def forward(self,x):\n\n\t#input: ts x bs x blocks*nhid\n\t#ts*bs , blocks, nhid\n\t#blocks, ts*bs, nhid\n ts,bs,m = x.shape\t\n\n x = x.reshape((ts*bs, self.nb, m//self.nb))\n x = x.permute(1,0,2)\n x = torch.bmm(x,self.weight)\n x = x.permute(1,0,2)\n \n if not self.bias is None:\n x = x + self.bias\n\n x = x.reshape((ts, bs, self.dout*self.nb))\n \n #if not self.bias is None:\n # x += self.bias\n\n return x" }, { "identifier": "RelationalMemory", "path": "multi_part_assembly/utils/wx_transformer_utilities/relational_memory_volatile.py", "snippet": "class RelationalMemory(nn.Module):\n \"\"\"\n Constructs a `RelationalMemory` object.\n This class is same as the RMC from relational_rnn_models.py, but without language modeling-specific variables.\n Args:\n mem_slots: The total number of memory slots to use.\n head_size: The size of an attention head.\n input_size: The size of input per step. i.e. the dimension of each input vector\n num_heads: The number of attention heads to use. Defaults to 1.\n num_blocks: Number of times to compute attention per time step. Defaults\n to 1.\n forget_bias: Bias to use for the forget gate, assuming we are using\n some form of gating. Defaults to 1.\n input_bias: Bias to use for the input gate, assuming we are using\n some form of gating. Defaults to 0.\n gate_style: Whether to use per-element gating ('unit'),\n per-memory slot gating ('memory'), or no gating at all (None).\n Defaults to `unit`.\n attention_mlp_layers: Number of layers to use in the post-attention\n MLP. Defaults to 2.\n key_size: Size of vector to use for key & query vectors in the attention\n computation. Defaults to None, in which case we use `head_size`.\n name: Name of the module.\n\n # NEW flag for this class\n return_all_outputs: Whether the model returns outputs for each step (like seq2seq) or only the final output.\n Raises:\n ValueError: gate_style not one of [None, 'memory', 'unit'].\n ValueError: num_blocks is < 1.\n ValueError: attention_mlp_layers is < 1.\n \"\"\"\n\n def __init__(self, mem_slots, head_size, input_size, output_size, num_heads=1, num_blocks=1, forget_bias=1., input_bias=0.,\n gate_style='unit', attention_mlp_layers=2, key_size=None, return_all_outputs=False, use_topk = False, topk = 3, num_steps = 5,\n null_attention = False):\n super(RelationalMemory, self).__init__()\n\n ########## generic parameters for RMC ##########\n self.mem_slots = mem_slots\n self.head_size = head_size\n self.num_heads = num_heads\n self.mem_size = self.head_size * self.num_heads\n self.use_topk = use_topk\n self.topk = topk\n\n # a new fixed params needed for pytorch port of RMC\n # +1 is the concatenated input per time step : we do self-attention with the concatenated memory & input\n # so if the mem_slots = 1, this value is 2\n self.mem_slots_plus_input = self.mem_slots + 1\n\n if num_blocks < 1:\n raise ValueError('num_blocks must be >=1. Got: {}.'.format(num_blocks))\n self.num_blocks = num_blocks\n\n print(\"Using gate style\", gate_style)\n if gate_style not in ['unit', 'memory', None]:\n raise ValueError(\n 'gate_style must be one of [\\'unit\\', \\'memory\\', None]. got: '\n '{}.'.format(gate_style))\n self.gate_style = gate_style\n\n if attention_mlp_layers < 1:\n raise ValueError('attention_mlp_layers must be >= 1. Got: {}.'.format(\n attention_mlp_layers))\n self.attention_mlp_layers = attention_mlp_layers\n\n self.key_size = key_size if key_size else self.head_size\n self.attn_log = None\n\n ########## parameters for multihead attention ##########\n # value_size is same as head_size\n self.value_size = self.head_size\n # total size for query-key-value\n self.qkv_size = 2 * self.key_size + self.value_size\n self.total_qkv_size = self.qkv_size * self.num_heads # denoted as F\n\n self.query_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n count_parameters(\"query\", self.query_proj)\n self.key_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n count_parameters(\"key\", self.key_proj)\n self.value_proj = nn.Linear(self.mem_size, self.value_size * self.num_heads)\n count_parameters(\"value\", self.value_proj)\n\n # each head has qkv_sized linear projector\n # just using one big param is more efficient, rather than this line\n # self.qkv_projector = [nn.Parameter(torch.randn((self.qkv_size, self.qkv_size))) for _ in range(self.num_heads)]\n #self.qkv_projector = nn.Linear(self.mem_size, self.total_qkv_size)\n #self.qkv_layernorm = nn.LayerNorm(self.total_qkv_size)\n\n # used for attend_over_memory function\n self.attention_mlp = nn.ModuleList([nn.Linear(self.mem_size, self.mem_size)] * self.attention_mlp_layers)\n count_parameters(\"attention_mlp\", self.attention_mlp[0])\n self.attended_memory_layernorm = nn.LayerNorm( self.mem_size)\n count_parameters(\"layernorm1\", self.attended_memory_layernorm)\n self.attended_memory_layernorm2 = nn.LayerNorm(self.mem_size)\n count_parameters(\"layernorm2\", self.attended_memory_layernorm2)\n\n ########## parameters for initial embedded input projection ##########\n self.input_size = input_size\n self.input_projector = nn.Linear(self.input_size, self.mem_size)\n count_parameters(\"input_projector\", self.input_projector)\n\n #self.output_projector = nn.Linear(self.output_size, self.input_size)\n\n ########## parameters for gating ##########\n self.num_gates = 2 * self.calculate_gate_size()\n print('input projector:'+str(self.mem_size))\n \n if gate_style in ['unit', 'memory']:\n self.input_gate_projector = RepeatLinear(self.mem_size, self.num_gates, num_steps)\n count_parameters(\"input_gate_projector\", self.input_gate_projector)\n self.memory_gate_projector = GroupLinearLayer(self.mem_size, self.num_gates, self.mem_slots)\n #self.memory_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n\n #(self.mem_size, self.num_gates, self.mem_slots)\n count_parameters(\"memory_gate_projector\", self.memory_gate_projector)\n \n # trainable scalar gate bias tensors\n self.forget_bias = nn.Parameter(torch.tensor(forget_bias, dtype=torch.float32))\n self.input_bias = nn.Parameter(torch.tensor(input_bias, dtype=torch.float32))\n\n ########## number of outputs returned #####\n self.return_all_outputs = return_all_outputs\n\n self.null_attention = null_attention\n\n print(\"relational volatie!!!\") \n #self.competition_mlp = nn.Sequential(nn.Linear(self.mem_slots * self.mem_size + self.mem_size, 256),\n # nn.ReLU(),\n # nn.Linear(256, 256),\n # nn.ReLU(),\n # nn.Linear(256, 256),\n # nn.ReLU(),\n # nn.Linear(256, 2))\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n # needed for truncated BPTT, called at every batch forward pass\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initial_state(self, batch_size, trainable=False):\n \"\"\"\n Creates the initial memory.\n We should ensure each row of the memory is initialized to be unique,\n so initialize the matrix to be the identity. We then pad or truncate\n as necessary so that init_state is of size\n (batch_size, self.mem_slots, self.mem_size).\n Args:\n batch_size: The size of the batch.\n trainable: Whether the initial state is trainable. This is always True.\n Returns:\n init_state: A truncated or padded matrix of size\n (batch_size, self.mem_slots, self.mem_size).\n \"\"\"\n if True:\n init_state = torch.stack([torch.eye(self.mem_slots) for _ in range(batch_size)])\n\n # pad the matrix with zeros\n if self.mem_size > self.mem_slots:\n difference = self.mem_size - self.mem_slots\n pad = torch.zeros((batch_size, self.mem_slots, difference))\n init_state = torch.cat([init_state, pad], -1)\n\n # truncation. take the first 'self.mem_size' components\n elif self.mem_size < self.mem_slots:\n init_state = init_state[:, :, :self.mem_size]\n\n return init_state\n else:\n init_state = torch.randn(batch_size, self.mem_slots, self.mem_size)\n return init_state\n def multihead_attention(self, input, memory, use_topk_ = True, store_log = True):\n \"\"\"\n Perform multi-head attention from 'Attention is All You Need'.\n Implementation of the attention mechanism from\n https://arxiv.org/abs/1706.03762.\n Args:\n memory: Memory tensor to perform attention on.\n Returns:\n new_memory: New memory tensor.\n \"\"\"\n\n q = self.query_proj(memory)\n k = self.key_proj(input)\n v = self.value_proj(input)\n\n q = q.reshape(q.size(0), q.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n k = k.reshape(k.size(0), k.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n v = v.reshape(v.size(0), v.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n scores = torch.matmul(q, k.transpose(2, 3))\n\n scores = torch.softmax(scores, dim = -1)\n #if store_log:\n # self.attn_log = scores[0]\n if not self.null_attention:\n if self.use_topk and use_topk_:\n topk = torch.topk(scores, dim = -1, k = self.topk)\n mask = torch.zeros(scores.size()).to(scores.device)\n mask.scatter_(3, topk.indices, 1)\n scores = scores * mask\n else:\n memory_flat = memory.reshape(memory.size(0), -1).unsqueeze(1)\n memory_flat = memory_flat.repeat(1, input.shape[1], 1)\n\n N = torch.cat((input, memory_flat), dim = 2)\n N = self.competition_mlp(N)\n\n N = torch.nn.functional.gumbel_softmax(N, dim = 2, hard = True, tau = 0.5)\n\n N = N[:, :, 0]\n\n scores = scores * N.unsqueeze(1).unsqueeze(1)\n\n\n output = torch.matmul(scores, v)\n\n \"\"\"#print(memory.size())\n # First, a simple linear projection is used to construct queries\n qkv = self.qkv_projector(memory)\n # apply layernorm for every dim except the batch dim\n qkv = self.qkv_layernorm(qkv)\n\n # mem_slots needs to be dynamically computed since mem_slots got concatenated with inputs\n # example: self.mem_slots=10 and seq_length is 3, and then mem_slots is 10 + 1 = 11 for each 3 step forward pass\n # this is the same as self.mem_slots_plus_input, but defined to keep the sonnet implementation code style\n mem_slots = memory.shape[1] # denoted as N\n\n # split the qkv to multiple heads H\n # [B, N, F] => [B, N, H, F/H]\n qkv_reshape = qkv.view(qkv.shape[0], mem_slots, self.num_heads, self.qkv_size)\n\n # [B, N, H, F/H] => [B, H, N, F/H]\n qkv_transpose = qkv_reshape.permute(0, 2, 1, 3)\n\n # [B, H, N, key_size], [B, H, N, key_size], [B, H, N, value_size]\n q, k, v = torch.split(qkv_transpose, [self.key_size, self.key_size, self.value_size], -1)\n\n # scale q with d_k, the dimensionality of the key vectors\n q *= (self.key_size ** -0.5)\n\n # make it [B, H, N, N]\n dot_product = torch.matmul(q, k.permute(0, 1, 3, 2))\n weights = F.softmax(dot_product, dim=-1)\n\n if self.use_topk:\n topk = torch.topk(weights, dim = -1, k = self.topk)\n mask = torch.zeros(weights.size()).to(weights.device)\n mask.scatter_(3, topk.indices, 1)\n weights = weights * mask\n\n # output is [B, H, N, V]\n output = torch.matmul(weights, v)\"\"\"\n\n # [B, H, N, V] => [B, N, H, V] => [B, N, H*V]\n output_transpose = output.permute(0, 2, 1, 3).contiguous()\n new_memory = output_transpose.view((output_transpose.shape[0], output_transpose.shape[1], -1))\n\n return new_memory\n\n\n @property\n def state_size(self):\n return [self.mem_slots, self.mem_size]\n\n @property\n def output_size(self):\n return self.mem_slots * self.mem_size\n\n def print_log(self):\n print(self.attn_log)\n\n def calculate_gate_size(self):\n \"\"\"\n Calculate the gate size from the gate_style.\n Returns:\n The per sample, per head parameter size of each gate.\n \"\"\"\n if self.gate_style == 'unit':\n return self.mem_size\n elif self.gate_style == 'memory':\n return 1\n else: # self.gate_style == None\n return 0\n\n def create_gates(self, inputs, memory):\n \"\"\"\n Create input and forget gates for this step using `inputs` and `memory`.\n Args:\n inputs: Tensor input.\n memory: The current state of memory.\n Returns:\n input_gate: A LSTM-like insert gate.\n forget_gate: A LSTM-like forget gate.\n \"\"\"\n # We'll create the input and forget gates at once. Hence, calculate double\n # the gate size.\n\n # equation 8: since there is no output gate, h is just a tanh'ed m\n memory = torch.tanh(memory)\n\n # TODO: check this input flattening is correct\n # sonnet uses this, but i think it assumes time step of 1 for all cases\n # if inputs is (B, T, features) where T > 1, this gets incorrect\n # inputs = inputs.view(inputs.shape[0], -1)\n\n # fixed implementation\n if len(inputs.shape) == 3:\n #if inputs.shape[1] > 1:\n # raise ValueError(\n # \"input seq length is larger than 1. create_gate function is meant to be called for each step, with input seq length of 1\")\n \n # matmul for equation 4 and 5\n # there is no output gate, so equation 6 is not implemented\n #print('jello')\n gate_inputs = self.input_gate_projector(inputs)\n gate_inputs = gate_inputs.unsqueeze(dim=1)\n gate_memory = self.memory_gate_projector(memory)\n else:\n raise ValueError(\"input shape of create_gate function is 2, expects 3\")\n\n # this completes the equation 4 and 5\n #print(gate_inputs.size())\n #print(gate_memory.size())\n gates = gate_memory + gate_inputs\n #self.attn_log = gates[0]\n gates = torch.split(gates, split_size_or_sections=int(gates.shape[2] / 2), dim=2)\n input_gate, forget_gate = gates\n assert input_gate.shape[2] == forget_gate.shape[2]\n\n # to be used for equation 7\n self.attn_log = torch.zeros(input_gate.shape[1], input_gate.shape[2], 2)\n self.attn_log[:, :, 0] = input_gate[0].cpu()\n\n input_gate = torch.sigmoid(input_gate+self.input_bias)\n forget_gate = torch.sigmoid(forget_gate + self.forget_bias)\n\n return input_gate, forget_gate\n\n def attend_over_memory(self, inputs, memory):\n \"\"\"\n Perform multiheaded attention over `memory`.\n Args:\n memory: Current relational memory.\n Returns:\n The attended-over memory.\n \"\"\"\n for _ in range(self.num_blocks):\n attended_memory = self.multihead_attention(inputs, memory)\n\n # Add a skip connection to the multiheaded attention's input.\n memory = self.attended_memory_layernorm(memory + attended_memory)\n\n # add a skip connection to the attention_mlp's input.\n attention_mlp = memory\n for i, l in enumerate(self.attention_mlp):\n attention_mlp = self.attention_mlp[i](attention_mlp)\n attention_mlp = F.relu(attention_mlp)\n memory = self.attended_memory_layernorm2(memory + attention_mlp)\n #memory = self.multihead_attention(memory, memory, use_topk_ = False, store_log = False)\n\n return memory\n\n def forward_step(self, inputs, memory, treat_input_as_matrix=False):\n \"\"\"\n Forward step of the relational memory core.\n Args:\n inputs: Tensor input.\n memory: Memory output from the previous time step.\n treat_input_as_matrix: Optional, whether to treat `input` as a sequence\n of matrices. Default to False, in which case the input is flattened\n into a vector.\n Returns:\n output: This time step's output.\n next_memory: The next version of memory to use.\n \"\"\"\n\n if treat_input_as_matrix:\n # keep (Batch, Seq, ...) dim (0, 1), flatten starting from dim 2\n inputs = inputs.view(inputs.shape[0], inputs.shape[1], -1)\n # apply linear layer for dim 2\n inputs_reshape = self.input_projector(inputs)\n else:\n # keep (Batch, ...) dim (0), flatten starting from dim 1\n inputs = inputs.view(inputs.shape[0], -1)\n # apply linear layer for dim 1\n inputs = self.input_projector(inputs)\n # unsqueeze the time step to dim 1\n inputs_reshape = inputs.unsqueeze(dim=1)\n\n #memory_plus_input = torch.cat([memory, inputs_reshape], dim=1)\n #print(memory_plus_input.size())\n next_memory = self.attend_over_memory(inputs_reshape, memory)\n\n # cut out the concatenated input vectors from the original memory slots\n #n = inputs_reshape.shape[1]\n #next_memory = next_memory[:, :-n, :]\n\n if self.gate_style == 'unit' or self.gate_style == 'memory':\n # these gates are sigmoid-applied ones for equation 7\n input_gate, forget_gate = self.create_gates(inputs_reshape, memory)\n # equation 7 calculation\n next_memory = input_gate * torch.tanh(next_memory)\n next_memory += forget_gate * memory\n self.attn_log[:, :, 1] = input_gate[0].cpu()\n\n\n output = next_memory.reshape(next_memory.shape[0], -1)\n hx = self.multihead_attention(next_memory, inputs_reshape, use_topk_ = False, store_log = False)\n return output, next_memory, hx\n\n def forward(self, inputs, memory, parallel = True):\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n # memory = self.repackage_hidden(memory)\n\n # for loop implementation of (entire) recurrent forward pass of the model\n # inputs is batch first [batch, seq], and output logit per step is [batch, vocab]\n # so the concatenated logits are [seq * batch, vocab]\n\n # targets are flattened [seq, batch] => [seq * batch], so the dimension is correct\n\n logits = []\n #print(inputs.size())\n #print(memory.size())\n #memory = self.repackage_hidden(memory)\n # shape[1] is seq_lenth T\n if not parallel:\n for idx_step in range(inputs.shape[1]):\n logit, memory = self.forward_step(inputs[:, idx_step], memory)\n logits.append(logit)\n logits = torch.cat(logits)\n else:\n logits, memory, hx = self.forward_step(inputs, memory, treat_input_as_matrix = True)\n \n memory_out = None #self.output_projector(memory.view(memory.shape[0], -1))\n\n #print(inputs.size())\n #print(memory_out.size())\n #print('------')\n if self.return_all_outputs:\n return logits, memory_out , memory, hx\n else:\n return logits, memory_out, memory, hx" }, { "identifier": "RelationalMemory", "path": "multi_part_assembly/utils/wx_transformer_utilities/relational_memory_regressive.py", "snippet": "class RelationalMemory(nn.Module):\n \"\"\"\n Constructs a `RelationalMemory` object.\n This class is same as the RMC from relational_rnn_models.py, but without language modeling-specific variables.\n Args:\n mem_slots: The total number of memory slots to use.\n head_size: The size of an attention head.\n input_size: The size of input per step. i.e. the dimension of each input vector\n num_heads: The number of attention heads to use. Defaults to 1.\n num_blocks: Number of times to compute attention per time step. Defaults\n to 1.\n forget_bias: Bias to use for the forget gate, assuming we are using\n some form of gating. Defaults to 1.\n input_bias: Bias to use for the input gate, assuming we are using\n some form of gating. Defaults to 0.\n gate_style: Whether to use per-element gating ('unit'),\n per-memory slot gating ('memory'), or no gating at all (None).\n Defaults to `unit`.\n attention_mlp_layers: Number of layers to use in the post-attention\n MLP. Defaults to 2.\n key_size: Size of vector to use for key & query vectors in the attention\n computation. Defaults to None, in which case we use `head_size`.\n name: Name of the module.\n\n # NEW flag for this class\n return_all_outputs: Whether the model returns outputs for each step (like seq2seq) or only the final output.\n Raises:\n ValueError: gate_style not one of [None, 'memory', 'unit'].\n ValueError: num_blocks is < 1.\n ValueError: attention_mlp_layers is < 1.\n \"\"\"\n\n def __init__(self, mem_slots, head_size, input_size, output_size, num_heads=1, num_blocks=1, forget_bias=1., input_bias=0.,\n gate_style='unit', attention_mlp_layers=2, key_size=None, return_all_outputs=False, use_topk = False, topk = 3, num_steps = 5,\n null_attention = False):\n super(RelationalMemory, self).__init__()\n\n ########## generic parameters for RMC ##########\n self.mem_slots = mem_slots\n self.head_size = head_size\n self.num_heads = num_heads\n self.mem_size = self.head_size * self.num_heads\n self.use_topk = use_topk\n self.topk = topk\n\n # a new fixed params needed for pytorch port of RMC\n # +1 is the concatenated input per time step : we do self-attention with the concatenated memory & input\n # so if the mem_slots = 1, this value is 2\n self.mem_slots_plus_input = self.mem_slots + 1\n\n if num_blocks < 1:\n raise ValueError('num_blocks must be >=1. Got: {}.'.format(num_blocks))\n self.num_blocks = num_blocks\n\n if gate_style not in ['unit', 'memory', None]:\n raise ValueError(\n 'gate_style must be one of [\\'unit\\', \\'memory\\', None]. got: '\n '{}.'.format(gate_style))\n self.gate_style = gate_style\n\n if attention_mlp_layers < 1:\n raise ValueError('attention_mlp_layers must be >= 1. Got: {}.'.format(\n attention_mlp_layers))\n self.attention_mlp_layers = attention_mlp_layers\n\n self.key_size = key_size if key_size else self.head_size\n\n ########## parameters for multihead attention ##########\n # value_size is same as head_size\n self.value_size = self.head_size\n # total size for query-key-value\n self.qkv_size = 2 * self.key_size + self.value_size\n self.total_qkv_size = self.qkv_size * self.num_heads # denoted as F\n\n self.query_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.key_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.value_proj = nn.Linear(self.mem_size, self.value_size * self.num_heads)\n\n\n # each head has qkv_sized linear projector\n # just using one big param is more efficient, rather than this line\n # self.qkv_projector = [nn.Parameter(torch.randn((self.qkv_size, self.qkv_size))) for _ in range(self.num_heads)]\n self.qkv_projector = nn.Linear(self.mem_size, self.total_qkv_size)\n self.qkv_layernorm = nn.LayerNorm(self.total_qkv_size)\n\n # used for attend_over_memory function\n self.attention_mlp = nn.ModuleList([nn.Linear(self.mem_size, self.mem_size)] * self.attention_mlp_layers)\n self.attended_memory_layernorm = nn.LayerNorm( self.mem_size)\n self.attended_memory_layernorm2 = nn.LayerNorm(self.mem_size)\n\n ########## parameters for initial embedded input projection ##########\n self.input_size = input_size\n self.input_projector = nn.Linear(self.input_size, self.mem_size)\n\n self.output_projector = nn.Linear(self.output_size, self.input_size)\n\n ########## parameters for gating ##########\n self.num_gates = 2 * self.calculate_gate_size()\n print('input projector:'+str(self.mem_size))\n self.input_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n self.memory_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n # trainable scalar gate bias tensors\n self.forget_bias = nn.Parameter(torch.tensor(forget_bias, dtype=torch.float32))\n self.input_bias = nn.Parameter(torch.tensor(input_bias, dtype=torch.float32))\n\n ########## number of outputs returned #####\n self.return_all_outputs = return_all_outputs\n\n self.null_attention = null_attention\n\n self.competition_mlp = nn.Sequential(nn.Linear(self.mem_slots * self.mem_size + self.mem_size, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 2))\n self.score_log = None\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n # needed for truncated BPTT, called at every batch forward pass\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initial_state(self, batch_size, ts, trainable=False):\n \"\"\"\n Creates the initial memory.\n We should ensure each row of the memory is initialized to be unique,\n so initialize the matrix to be the identity. We then pad or truncate\n as necessary so that init_state is of size\n (batch_size, self.mem_slots, self.mem_size).\n Args:\n batch_size: The size of the batch.\n trainable: Whether the initial state is trainable. This is always True.\n Returns:\n init_state: A truncated or padded matrix of size\n (batch_size, self.mem_slots, self.mem_size).\n \"\"\"\n init_state = torch.stack([torch.eye(self.mem_slots) for _ in range(batch_size)])\n\n # pad the matrix with zeros\n if self.mem_size > self.mem_slots:\n difference = self.mem_size - self.mem_slots\n pad = torch.zeros((batch_size, self.mem_slots, difference))\n init_state = torch.cat([init_state, pad], -1)\n\n # truncation. take the first 'self.mem_size' components\n elif self.mem_size < self.mem_slots:\n init_state = init_state[:, :, :self.mem_size]\n\n init_state = init_state.unsqueeze(1)\n init_state = init_state.repeat(1, ts, 1, 1)\n init_state = init_state.reshape(batch_size * ts, self.mem_slots, -1)\n\n return init_state\n\n def multihead_attention(self, input, memory, mask = None):\n \"\"\"\n Perform multi-head attention from 'Attention is All You Need'.\n Implementation of the attention mechanism from\n https://arxiv.org/abs/1706.03762.\n Args:\n memory: Memory tensor to perform attention on.\n Returns:\n new_memory: New memory tensor.\n \"\"\"\n\n q = self.query_proj(memory)\n k = self.key_proj(input)\n v = self.value_proj(input)\n\n q = q.reshape(q.size(0), q.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n k = k.reshape(k.size(0), k.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n v = v.reshape(v.size(0), v.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n scores = torch.matmul(q, k.transpose(2, 3))\n\n mask = mask.unsqueeze(1).unsqueeze(1)\n #print(mask.size())\n #print(scores.size())\n #scores = scores.masked_fill(mask.bool(), float('-inf'))\n scores = Identity().apply(scores)\n\n scores = torch.softmax(scores, dim = -1)\n scores = scores * mask # mask for attending to prev positions only\n self.score_log = scores\n if True:\n if self.use_topk:\n topk = torch.topk(scores, dim = -1, k = self.topk)\n topk_mask = torch.zeros(scores.size()).to(scores.device)\n topk_mask.scatter_(3, topk.indices, 1)\n scores = scores * topk_mask\n else:\n memory_flat = memory.reshape(memory.size(0), -1).unsqueeze(1)\n memory_flat = memory_flat.repeat(1, input.shape[1], 1)\n\n N = torch.cat((input, memory_flat), dim = 2)\n N = self.competition_mlp(N)\n\n N = torch.nn.functional.gumbel_softmax(N, dim = 2, hard = True, tau = 0.5)\n\n N = N[:, :, 0]\n\n scores = scores * N.unsqueeze(1).unsqueeze(1)\n\n\n output = torch.matmul(scores, v)\n\n \"\"\"#print(memory.size())\n # First, a simple linear projection is used to construct queries\n qkv = self.qkv_projector(memory)\n # apply layernorm for every dim except the batch dim\n qkv = self.qkv_layernorm(qkv)\n\n # mem_slots needs to be dynamically computed since mem_slots got concatenated with inputs\n # example: self.mem_slots=10 and seq_length is 3, and then mem_slots is 10 + 1 = 11 for each 3 step forward pass\n # this is the same as self.mem_slots_plus_input, but defined to keep the sonnet implementation code style\n mem_slots = memory.shape[1] # denoted as N\n\n # split the qkv to multiple heads H\n # [B, N, F] => [B, N, H, F/H]\n qkv_reshape = qkv.view(qkv.shape[0], mem_slots, self.num_heads, self.qkv_size)\n\n # [B, N, H, F/H] => [B, H, N, F/H]\n qkv_transpose = qkv_reshape.permute(0, 2, 1, 3)\n\n # [B, H, N, key_size], [B, H, N, key_size], [B, H, N, value_size]\n q, k, v = torch.split(qkv_transpose, [self.key_size, self.key_size, self.value_size], -1)\n\n # scale q with d_k, the dimensionality of the key vectors\n q *= (self.key_size ** -0.5)\n\n # make it [B, H, N, N]\n dot_product = torch.matmul(q, k.permute(0, 1, 3, 2))\n weights = F.softmax(dot_product, dim=-1)\n\n if self.use_topk:\n topk = torch.topk(weights, dim = -1, k = self.topk)\n mask = torch.zeros(weights.size()).to(weights.device)\n mask.scatter_(3, topk.indices, 1)\n weights = weights * mask\n\n # output is [B, H, N, V]\n output = torch.matmul(weights, v)\"\"\"\n\n # [B, H, N, V] => [B, N, H, V] => [B, N, H*V]\n output_transpose = output.permute(0, 2, 1, 3).contiguous()\n new_memory = output_transpose.view((output_transpose.shape[0], output_transpose.shape[1], -1))\n\n return new_memory\n\n\n @property\n def state_size(self):\n return [self.mem_slots, self.mem_size]\n\n @property\n def output_size(self):\n return self.mem_slots * self.mem_size\n\n def calculate_gate_size(self):\n \"\"\"\n Calculate the gate size from the gate_style.\n Returns:\n The per sample, per head parameter size of each gate.\n \"\"\"\n if self.gate_style == 'unit':\n return self.mem_size\n elif self.gate_style == 'memory':\n return 1\n else: # self.gate_style == None\n return 0\n\n def create_gates(self, inputs, memory):\n \"\"\"\n Create input and forget gates for this step using `inputs` and `memory`.\n Args:\n inputs: Tensor input.\n memory: The current state of memory.\n Returns:\n input_gate: A LSTM-like insert gate.\n forget_gate: A LSTM-like forget gate.\n \"\"\"\n # We'll create the input and forget gates at once. Hence, calculate double\n # the gate size.\n\n # equation 8: since there is no output gate, h is just a tanh'ed m\n memory = torch.tanh(memory)\n\n # TODO: check this input flattening is correct\n # sonnet uses this, but i think it assumes time step of 1 for all cases\n # if inputs is (B, T, features) where T > 1, this gets incorrect\n # inputs = inputs.view(inputs.shape[0], -1)\n\n # fixed implementation\n if len(inputs.shape) == 3:\n #if inputs.shape[1] > 1:\n # raise ValueError(\n # \"input seq length is larger than 1. create_gate function is meant to be called for each step, with input seq length of 1\")\n inputs = inputs.view(inputs.shape[0], -1)\n # matmul for equation 4 and 5\n # there is no output gate, so equation 6 is not implemented\n #print(inputs.size())\n gate_inputs = self.input_gate_projector(inputs)\n gate_inputs = gate_inputs.unsqueeze(dim=1)\n gate_memory = self.memory_gate_projector(memory)\n else:\n raise ValueError(\"input shape of create_gate function is 2, expects 3\")\n\n # this completes the equation 4 and 5\n #print(gate_inputs.size())\n #print(gate_memory.size())\n gates = gate_memory + gate_inputs\n gates = torch.split(gates, split_size_or_sections=int(gates.shape[2] / 2), dim=2)\n input_gate, forget_gate = gates\n assert input_gate.shape[2] == forget_gate.shape[2]\n\n # to be used for equation 7\n input_gate = torch.sigmoid(input_gate + self.input_bias)\n forget_gate = torch.sigmoid(forget_gate + self.forget_bias)\n\n return input_gate, forget_gate\n\n def attend_over_memory(self, inputs, memory, mask = None):\n \"\"\"\n Perform multiheaded attention over `memory`.\n Args:\n memory: Current relational memory.\n Returns:\n The attended-over memory.\n \"\"\"\n for _ in range(self.num_blocks):\n attended_memory = self.multihead_attention(inputs, memory, mask = mask)\n\n # Add a skip connection to the multiheaded attention's input.\n memory = self.attended_memory_layernorm(memory + attended_memory)\n\n # add a skip connection to the attention_mlp's input.\n attention_mlp = memory\n for i, l in enumerate(self.attention_mlp):\n attention_mlp = self.attention_mlp[i](attention_mlp)\n attention_mlp = F.relu(attention_mlp)\n memory = self.attended_memory_layernorm2(memory + attention_mlp)\n\n return memory\n\n def forward_step(self, inputs, memory, treat_input_as_matrix=False, mask = None, other_inp = None):\n \"\"\"\n Forward step of the relational memory core.\n Args:\n inputs: Tensor input.\n memory: Memory output from the previous time step.\n treat_input_as_matrix: Optional, whether to treat `input` as a sequence\n of matrices. Default to False, in which case the input is flattened\n into a vector.\n Returns:\n output: This time step's output.\n next_memory: The next version of memory to use.\n \"\"\"\n\n if treat_input_as_matrix:\n # keep (Batch, Seq, ...) dim (0, 1), flatten starting from dim 2\n inputs = inputs.view(inputs.shape[0], inputs.shape[1], -1)\n #print(inputs.size())\n # apply linear layer for dim 2\n inputs_reshape = self.input_projector(inputs)\n #print(inputs_reshape.size())\n else:\n # keep (Batch, ...) dim (0), flatten starting from dim 1\n inputs = inputs.view(inputs.shape[0], -1)\n # apply linear layer for dim 1\n inputs = self.input_projector(inputs)\n # unsqueeze the time step to dim 1\n inputs_reshape = inputs.unsqueeze(dim=1)\n\n #memory_plus_input = torch.cat([memory, inputs_reshape], dim=1)\n #print(memory_plus_input.size())\n next_memory = self.attend_over_memory(inputs_reshape, memory, mask = mask)\n\n #print(next_memory.size())\n #print(inputs_reshape.size())\n\n # cut out the concatenated input vectors from the original memory slots\n #n = inputs_reshape.shape[1]\n #next_memory = next_memory[:, :-n, :]\n\n if self.gate_style == 'unit' or self.gate_style == 'memory':\n # these gates are sigmoid-applied ones for equation 7\n input_gate, forget_gate = self.create_gates(other_inp.unsqueeze(1), memory)\n # equation 7 calculation\n next_memory = input_gate * torch.tanh(next_memory)\n next_memory += forget_gate * memory\n\n\n output = next_memory.view(next_memory.shape[0], -1)\n return output, next_memory\n\n # relational memory这里是不是\n def forward(self, inputs, memory):\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n # memory = self.repackage_hidden(memory)\n\n # for loop implementation of (entire) recurrent forward pass of the model\n # inputs is batch first [batch, seq], and output logit per step is [batch, vocab]\n # so the concatenated logits are [seq * batch, vocab]\n\n # targets are flattened [seq, batch] => [seq * batch], so the dimension is correct\n\n B, T, D = inputs.size()\n mask = torch.ones(inputs.size(1), inputs.size(1)).to(inputs.device)\n mask = torch.tril(mask)\n mask = mask.unsqueeze(0)\n mask = mask.repeat(inputs.size(0), 1, 1)\n\n mask = mask.reshape(mask.size(0) * mask.size(1), -1)\n\n inputs_ = inputs.unsqueeze(2)\n inputs_ = inputs_.repeat(1, 1, inputs.size(1), 1)\n inputs_ = inputs_.reshape(B * T, T, -1)\n\n\n logits = []\n #print(inputs.size())\n #print(memory.size())\n #memory = self.repackage_hidden(memory)\n # shape[1] is seq_lenth T\n #if not parallel:\n # for idx_step in range(inputs.shape[1]):\n # logit, memory = self.forward_step(inputs[:, idx_step], memory)\n # logits.append(logit)\n # logits = torch.cat(logits)\n #else:\n logits, memory = self.forward_step(inputs_, memory, treat_input_as_matrix = True, mask = mask, other_inp = inputs.reshape(B * T, -1))\n \n memory_out = self.output_projector(memory.view(memory.shape[0], -1))\n\n #print(inputs.size())\n #print(memory_out.size())\n #print('------')\n if self.return_all_outputs:\n return logits, memory_out , memory\n else:\n return logits, memory_out, memory\n\n def print_log(self):\n print(self.score_log[25])" } ]
import math import time import numpy as np import torch import torch.nn.functional as F import multi_part_assembly.utils.wx_transformer_utilities.fairseq_utils as utils from typing import Dict, Optional, Tuple from torch import Tensor, nn from torch.nn import Parameter from .fairseq_dropout import FairseqDropout from .attention_rim import MultiHeadAttention as MHAMemory from .quant_noise import quant_noise from .group_linear_layer import GroupLinearLayer from .relational_memory_volatile import RelationalMemory from .relational_memory_regressive import RelationalMemory as RelationalMemoryRegressive
14,080
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #import models.fairseq_util #from fairseq.incremental_decoding_utils import with_incremental_state #from .relational_memory_lstm import RelationalMemory # 为什么作者没有从这两个类别中引入relmem? #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer as GroupLinearLayer class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, nblocks=1, top_k_ratio=None, use_value_competition=True, shared_memory_attention = False, use_topk = False, topk = 3, num_steps = 5, mem_slots = 4, null_attention = False, regressive = False ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads self.shared_memory_attention = shared_memory_attention print('total heads', self.num_heads) print('head dim', self.head_dim) self.use_topk = use_topk self.topk = topk print('use topk?' + str(self.use_topk)) print('topk:'+str(self.topk)) assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) if not self.shared_memory_attention: # 这里的共享memory_attention是什么内容呢?表示的是不在不同的layer之间共享memory吗?
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #import models.fairseq_util #from fairseq.incremental_decoding_utils import with_incremental_state #from .relational_memory_lstm import RelationalMemory # 为什么作者没有从这两个类别中引入relmem? #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer as GroupLinearLayer class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, nblocks=1, top_k_ratio=None, use_value_competition=True, shared_memory_attention = False, use_topk = False, topk = 3, num_steps = 5, mem_slots = 4, null_attention = False, regressive = False ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads self.shared_memory_attention = shared_memory_attention print('total heads', self.num_heads) print('head dim', self.head_dim) self.use_topk = use_topk self.topk = topk print('use topk?' + str(self.use_topk)) print('topk:'+str(self.topk)) assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) if not self.shared_memory_attention: # 这里的共享memory_attention是什么内容呢?表示的是不在不同的layer之间共享memory吗?
self.k_proj = quant_noise(GroupLinearLayer(self.kdim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)
2
2023-12-15 13:13:01+00:00
16k
camenduru/FreeInit-hf
app.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n use_inflated_groupnorm=False,\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n \n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if use_inflated_groupnorm:\n self.conv_norm_out = InflatedGroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n else:\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "AnimationFreeInitPipeline", "path": "animatediff/pipelines/pipeline_animation.py", "snippet": "class AnimationFreeInitPipeline(AnimationPipeline):\n _optional_components = []\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet3DConditionModel,\n scheduler: Union[\n DDIMScheduler,\n PNDMScheduler,\n LMSDiscreteScheduler,\n EulerDiscreteScheduler,\n EulerAncestralDiscreteScheduler,\n DPMSolverMultistepScheduler,\n ],\n ):\n super().__init__(vae, text_encoder, tokenizer, unet, scheduler)\n self.freq_filter = None\n\n \n @torch.no_grad()\n def init_filter(self, video_length, height, width, filter_params):\n # initialize frequency filter for noise reinitialization\n batch_size = 1\n num_channels_latents = self.unet.in_channels\n filter_shape = [\n batch_size, \n num_channels_latents, \n video_length, \n height // self.vae_scale_factor, \n width // self.vae_scale_factor\n ]\n # self.freq_filter = get_freq_filter(filter_shape, device=self._execution_device, params=filter_params)\n self.freq_filter = get_freq_filter(\n filter_shape, \n device=self._execution_device, \n filter_type=filter_params.method,\n n=filter_params.n,\n d_s=filter_params.d_s,\n d_t=filter_params.d_t\n )\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]],\n video_length: Optional[int],\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_videos_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"tensor\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n # freeinit args\n num_iters: int = 5,\n use_fast_sampling: bool = False,\n save_intermediate: bool = False,\n return_orig: bool = False,\n save_dir: str = None,\n save_name: str = None,\n use_fp16: bool = False,\n **kwargs\n ):\n if use_fp16:\n print('Warning: using half percision for inferencing!')\n self.vae.to(dtype=torch.float16)\n self.unet.to(dtype=torch.float16)\n self.text_encoder.to(dtype=torch.float16)\n # Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # Check inputs. Raise error if not correct\n # import pdb\n # pdb.set_trace()\n self.check_inputs(prompt, height, width, callback_steps)\n\n # Define call parameters\n # batch_size = 1 if isinstance(prompt, str) else len(prompt)\n batch_size = 1\n if latents is not None:\n batch_size = latents.shape[0]\n if isinstance(prompt, list):\n batch_size = len(prompt)\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # Encode input prompt\n prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size\n if negative_prompt is not None:\n negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size \n text_embeddings = self._encode_prompt(\n prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt\n )\n\n # Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # Prepare latent variables\n num_channels_latents = self.unet.in_channels\n latents = self.prepare_latents(\n batch_size * num_videos_per_prompt,\n num_channels_latents,\n video_length,\n height,\n width,\n text_embeddings.dtype,\n device,\n generator,\n latents,\n )\n latents_dtype = latents.dtype\n\n # Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # Sampling with FreeInit.\n for iter in range(num_iters):\n # FreeInit ------------------------------------------------------------------\n if iter == 0:\n initial_noise = latents.detach().clone()\n else:\n # 1. DDPM Forward with initial noise, get noisy latents z_T\n # if use_fast_sampling:\n # current_diffuse_timestep = self.scheduler.config.num_train_timesteps / num_iters * (iter + 1) - 1\n # else:\n # current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1\n current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1 # diffuse to t=999 noise level\n diffuse_timesteps = torch.full((batch_size,),int(current_diffuse_timestep))\n diffuse_timesteps = diffuse_timesteps.long()\n z_T = self.scheduler.add_noise(\n original_samples=latents.to(device), \n noise=initial_noise.to(device), \n timesteps=diffuse_timesteps.to(device)\n )\n # 2. create random noise z_rand for high-frequency\n z_rand = torch.randn((batch_size * num_videos_per_prompt, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor), device=device)\n # 3. Roise Reinitialization\n latents = freq_mix_3d(z_T.to(dtype=torch.float32), z_rand, LPF=self.freq_filter)\n latents = latents.to(latents_dtype)\n \n # Coarse-to-Fine Sampling for Fast Inference (can lead to sub-optimal results)\n if use_fast_sampling:\n current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1))\n self.scheduler.set_timesteps(current_num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n # --------------------------------------------------------------------------\n\n # Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n # if use_fast_sampling:\n # # Coarse-to-Fine Sampling for Fast Inference\n # current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1))\n # current_timesteps = timesteps[:current_num_inference_steps]\n # else:\n current_timesteps = timesteps\n for i, t in enumerate(current_timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample.to(dtype=latents_dtype)\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(current_timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n \n # save intermediate results\n if save_intermediate:\n # Post-processing\n video = self.decode_latents(latents)\n video = torch.from_numpy(video)\n os.makedirs(save_dir, exist_ok=True)\n save_videos_grid(video, f\"{save_dir}/{save_name}_iter{iter}.gif\")\n \n if return_orig and iter==0:\n orig_video = self.decode_latents(latents)\n orig_video = torch.from_numpy(orig_video)\n\n # Post-processing\n video = self.decode_latents(latents)\n\n # Convert to tensor\n if output_type == \"tensor\":\n video = torch.from_numpy(video)\n\n if not return_dict:\n return video\n\n if return_orig:\n return AnimationFreeInitPipelineOutput(videos=video, orig_videos=orig_video)\n\n return AnimationFreeInitPipelineOutput(videos=video)" }, { "identifier": "save_videos_grid", "path": "animatediff/utils/util.py", "snippet": "def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):\n videos = rearrange(videos, \"b c t h w -> t b c h w\")\n outputs = []\n for x in videos:\n x = torchvision.utils.make_grid(x, nrow=n_rows)\n x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)\n if rescale:\n x = (x + 1.0) / 2.0 # -1,1 -> 0,1\n x = (x * 255).numpy().astype(np.uint8)\n outputs.append(x)\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n imageio.mimsave(path, outputs, fps=fps)" }, { "identifier": "convert_ldm_unet_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False, controlnet=False):\n \"\"\"\n Takes a state dict and a config, and returns a converted checkpoint.\n \"\"\"\n\n # extract state_dict for UNet\n unet_state_dict = {}\n keys = list(checkpoint.keys())\n\n if controlnet:\n unet_key = \"control_model.\"\n else:\n unet_key = \"model.diffusion_model.\"\n\n # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA\n if sum(k.startswith(\"model_ema\") for k in keys) > 100 and extract_ema:\n print(f\"Checkpoint {path} has both EMA and non-EMA weights.\")\n print(\n \"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA\"\n \" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.\"\n )\n for key in keys:\n if key.startswith(\"model.diffusion_model\"):\n flat_ema_key = \"model_ema.\" + \"\".join(key.split(\".\")[1:])\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(flat_ema_key)\n else:\n if sum(k.startswith(\"model_ema\") for k in keys) > 100:\n print(\n \"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA\"\n \" weights (usually better for inference), please make sure to add the `--extract_ema` flag.\"\n )\n\n for key in keys:\n if key.startswith(unet_key):\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"time_embedding.linear_1.weight\"] = unet_state_dict[\"time_embed.0.weight\"]\n new_checkpoint[\"time_embedding.linear_1.bias\"] = unet_state_dict[\"time_embed.0.bias\"]\n new_checkpoint[\"time_embedding.linear_2.weight\"] = unet_state_dict[\"time_embed.2.weight\"]\n new_checkpoint[\"time_embedding.linear_2.bias\"] = unet_state_dict[\"time_embed.2.bias\"]\n\n if config[\"class_embed_type\"] is None:\n # No parameters to port\n ...\n elif config[\"class_embed_type\"] == \"timestep\" or config[\"class_embed_type\"] == \"projection\":\n new_checkpoint[\"class_embedding.linear_1.weight\"] = unet_state_dict[\"label_emb.0.0.weight\"]\n new_checkpoint[\"class_embedding.linear_1.bias\"] = unet_state_dict[\"label_emb.0.0.bias\"]\n new_checkpoint[\"class_embedding.linear_2.weight\"] = unet_state_dict[\"label_emb.0.2.weight\"]\n new_checkpoint[\"class_embedding.linear_2.bias\"] = unet_state_dict[\"label_emb.0.2.bias\"]\n else:\n raise NotImplementedError(f\"Not implemented `class_embed_type`: {config['class_embed_type']}\")\n\n new_checkpoint[\"conv_in.weight\"] = unet_state_dict[\"input_blocks.0.0.weight\"]\n new_checkpoint[\"conv_in.bias\"] = unet_state_dict[\"input_blocks.0.0.bias\"]\n\n if not controlnet:\n new_checkpoint[\"conv_norm_out.weight\"] = unet_state_dict[\"out.0.weight\"]\n new_checkpoint[\"conv_norm_out.bias\"] = unet_state_dict[\"out.0.bias\"]\n new_checkpoint[\"conv_out.weight\"] = unet_state_dict[\"out.2.weight\"]\n new_checkpoint[\"conv_out.bias\"] = unet_state_dict[\"out.2.bias\"]\n\n # Retrieves the keys for the input blocks only\n num_input_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"input_blocks\" in layer})\n input_blocks = {\n layer_id: [key for key in unet_state_dict if f\"input_blocks.{layer_id}\" in key]\n for layer_id in range(num_input_blocks)\n }\n\n # Retrieves the keys for the middle blocks only\n num_middle_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"middle_block\" in layer})\n middle_blocks = {\n layer_id: [key for key in unet_state_dict if f\"middle_block.{layer_id}\" in key]\n for layer_id in range(num_middle_blocks)\n }\n\n # Retrieves the keys for the output blocks only\n num_output_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"output_blocks\" in layer})\n output_blocks = {\n layer_id: [key for key in unet_state_dict if f\"output_blocks.{layer_id}\" in key]\n for layer_id in range(num_output_blocks)\n }\n\n for i in range(1, num_input_blocks):\n block_id = (i - 1) // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = (i - 1) % (config[\"layers_per_block\"] + 1)\n\n resnets = [\n key for key in input_blocks[i] if f\"input_blocks.{i}.0\" in key and f\"input_blocks.{i}.0.op\" not in key\n ]\n attentions = [key for key in input_blocks[i] if f\"input_blocks.{i}.1\" in key]\n\n if f\"input_blocks.{i}.0.op.weight\" in unet_state_dict:\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.weight\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.weight\"\n )\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.bias\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.bias\"\n )\n\n paths = renew_resnet_paths(resnets)\n meta_path = {\"old\": f\"input_blocks.{i}.0\", \"new\": f\"down_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\"old\": f\"input_blocks.{i}.1\", \"new\": f\"down_blocks.{block_id}.attentions.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n resnet_0 = middle_blocks[0]\n attentions = middle_blocks[1]\n resnet_1 = middle_blocks[2]\n\n resnet_0_paths = renew_resnet_paths(resnet_0)\n assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)\n\n resnet_1_paths = renew_resnet_paths(resnet_1)\n assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)\n\n attentions_paths = renew_attention_paths(attentions)\n meta_path = {\"old\": \"middle_block.1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(\n attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n for i in range(num_output_blocks):\n block_id = i // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = i % (config[\"layers_per_block\"] + 1)\n output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]\n output_block_list = {}\n\n for layer in output_block_layers:\n layer_id, layer_name = layer.split(\".\")[0], shave_segments(layer, 1)\n if layer_id in output_block_list:\n output_block_list[layer_id].append(layer_name)\n else:\n output_block_list[layer_id] = [layer_name]\n\n if len(output_block_list) > 1:\n resnets = [key for key in output_blocks[i] if f\"output_blocks.{i}.0\" in key]\n attentions = [key for key in output_blocks[i] if f\"output_blocks.{i}.1\" in key]\n\n resnet_0_paths = renew_resnet_paths(resnets)\n paths = renew_resnet_paths(resnets)\n\n meta_path = {\"old\": f\"output_blocks.{i}.0\", \"new\": f\"up_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n output_block_list = {k: sorted(v) for k, v in output_block_list.items()}\n if [\"conv.bias\", \"conv.weight\"] in output_block_list.values():\n index = list(output_block_list.values()).index([\"conv.bias\", \"conv.weight\"])\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.weight\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.weight\"\n ]\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.bias\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.bias\"\n ]\n\n # Clear attentions as they have been attributed above.\n if len(attentions) == 2:\n attentions = []\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\n \"old\": f\"output_blocks.{i}.1\",\n \"new\": f\"up_blocks.{block_id}.attentions.{layer_in_block_id}\",\n }\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n else:\n resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)\n for path in resnet_0_paths:\n old_path = \".\".join([\"output_blocks\", str(i), path[\"old\"]])\n new_path = \".\".join([\"up_blocks\", str(block_id), \"resnets\", str(layer_in_block_id), path[\"new\"]])\n\n new_checkpoint[new_path] = unet_state_dict[old_path]\n\n if controlnet:\n # conditioning embedding\n\n orig_index = 0\n\n new_checkpoint[\"controlnet_cond_embedding.conv_in.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_in.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n orig_index += 2\n\n diffusers_index = 0\n\n while diffusers_index < 6:\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n diffusers_index += 1\n orig_index += 2\n\n new_checkpoint[\"controlnet_cond_embedding.conv_out.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_out.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n # down blocks\n for i in range(num_input_blocks):\n new_checkpoint[f\"controlnet_down_blocks.{i}.weight\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.weight\")\n new_checkpoint[f\"controlnet_down_blocks.{i}.bias\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.bias\")\n\n # mid block\n new_checkpoint[\"controlnet_mid_block.weight\"] = unet_state_dict.pop(\"middle_block_out.0.weight\")\n new_checkpoint[\"controlnet_mid_block.bias\"] = unet_state_dict.pop(\"middle_block_out.0.bias\")\n\n return new_checkpoint" }, { "identifier": "convert_ldm_clip_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_clip_checkpoint(checkpoint):\n text_model = CLIPTextModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n keys = list(checkpoint.keys())\n\n text_model_dict = {}\n\n for key in keys:\n if key.startswith(\"cond_stage_model.transformer\"):\n text_model_dict[key[len(\"cond_stage_model.transformer.\") :]] = checkpoint[key]\n\n text_model.load_state_dict(text_model_dict)\n\n return text_model" }, { "identifier": "convert_ldm_vae_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_vae_checkpoint(checkpoint, config):\n # extract state dict for VAE\n vae_state_dict = {}\n vae_key = \"first_stage_model.\"\n keys = list(checkpoint.keys())\n for key in keys:\n if key.startswith(vae_key):\n vae_state_dict[key.replace(vae_key, \"\")] = checkpoint.get(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"encoder.conv_in.weight\"] = vae_state_dict[\"encoder.conv_in.weight\"]\n new_checkpoint[\"encoder.conv_in.bias\"] = vae_state_dict[\"encoder.conv_in.bias\"]\n new_checkpoint[\"encoder.conv_out.weight\"] = vae_state_dict[\"encoder.conv_out.weight\"]\n new_checkpoint[\"encoder.conv_out.bias\"] = vae_state_dict[\"encoder.conv_out.bias\"]\n new_checkpoint[\"encoder.conv_norm_out.weight\"] = vae_state_dict[\"encoder.norm_out.weight\"]\n new_checkpoint[\"encoder.conv_norm_out.bias\"] = vae_state_dict[\"encoder.norm_out.bias\"]\n\n new_checkpoint[\"decoder.conv_in.weight\"] = vae_state_dict[\"decoder.conv_in.weight\"]\n new_checkpoint[\"decoder.conv_in.bias\"] = vae_state_dict[\"decoder.conv_in.bias\"]\n new_checkpoint[\"decoder.conv_out.weight\"] = vae_state_dict[\"decoder.conv_out.weight\"]\n new_checkpoint[\"decoder.conv_out.bias\"] = vae_state_dict[\"decoder.conv_out.bias\"]\n new_checkpoint[\"decoder.conv_norm_out.weight\"] = vae_state_dict[\"decoder.norm_out.weight\"]\n new_checkpoint[\"decoder.conv_norm_out.bias\"] = vae_state_dict[\"decoder.norm_out.bias\"]\n\n new_checkpoint[\"quant_conv.weight\"] = vae_state_dict[\"quant_conv.weight\"]\n new_checkpoint[\"quant_conv.bias\"] = vae_state_dict[\"quant_conv.bias\"]\n new_checkpoint[\"post_quant_conv.weight\"] = vae_state_dict[\"post_quant_conv.weight\"]\n new_checkpoint[\"post_quant_conv.bias\"] = vae_state_dict[\"post_quant_conv.bias\"]\n\n # Retrieves the keys for the encoder down blocks only\n num_down_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"encoder.down\" in layer})\n down_blocks = {\n layer_id: [key for key in vae_state_dict if f\"down.{layer_id}\" in key] for layer_id in range(num_down_blocks)\n }\n\n # Retrieves the keys for the decoder up blocks only\n num_up_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"decoder.up\" in layer})\n up_blocks = {\n layer_id: [key for key in vae_state_dict if f\"up.{layer_id}\" in key] for layer_id in range(num_up_blocks)\n }\n\n for i in range(num_down_blocks):\n resnets = [key for key in down_blocks[i] if f\"down.{i}\" in key and f\"down.{i}.downsample\" not in key]\n\n if f\"encoder.down.{i}.downsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.weight\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.weight\"\n )\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.bias\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.bias\"\n )\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"down.{i}.block\", \"new\": f\"down_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"encoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"encoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"encoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n for i in range(num_up_blocks):\n block_id = num_up_blocks - 1 - i\n resnets = [\n key for key in up_blocks[block_id] if f\"up.{block_id}\" in key and f\"up.{block_id}.upsample\" not in key\n ]\n\n if f\"decoder.up.{block_id}.upsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.weight\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.weight\"\n ]\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.bias\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.bias\"\n ]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"up.{block_id}.block\", \"new\": f\"up_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"decoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"decoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"decoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n return new_checkpoint" }, { "identifier": "get_freq_filter", "path": "animatediff/utils/freeinit_utils.py", "snippet": "def get_freq_filter(shape, device, filter_type, n, d_s, d_t):\n \"\"\"\n Form the frequency filter for noise reinitialization.\n\n Args:\n shape: shape of latent (B, C, T, H, W)\n filter_type: type of the freq filter\n n: (only for butterworth) order of the filter, larger n ~ ideal, smaller n ~ gaussian\n d_s: normalized stop frequency for spatial dimensions (0.0-1.0)\n d_t: normalized stop frequency for temporal dimension (0.0-1.0)\n \"\"\"\n if filter_type == \"gaussian\":\n return gaussian_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"ideal\":\n return ideal_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"box\":\n return box_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"butterworth\":\n return butterworth_low_pass_filter(shape=shape, n=n, d_s=d_s, d_t=d_t).to(device)\n else:\n raise NotImplementedError" } ]
import os import torch import random import gradio as gr from glob import glob from omegaconf import OmegaConf from safetensors import safe_open from diffusers import AutoencoderKL from diffusers import EulerDiscreteScheduler, DDIMScheduler from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer from animatediff.models.unet import UNet3DConditionModel from animatediff.pipelines.pipeline_animation import AnimationFreeInitPipeline from animatediff.utils.util import save_videos_grid from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint from diffusers.training_utils import set_seed from animatediff.utils.freeinit_utils import get_freq_filter from collections import namedtuple
14,217
"butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint)
pretrained_model_path = "models/StableDiffusion/stable-diffusion-v1-5" inference_config_path = "configs/inference/inference-v1.yaml" css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ examples = [ # 0-RealisticVision [ "realisticVisionV51_v20Novae.safetensors", "mm_sd_v14.ckpt", "A panda standing on a surfboard in the ocean under moonlight.", "worst quality, low quality, nsfw, logo", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 1-ToonYou [ "toonyou_beta3.safetensors", "mm_sd_v14.ckpt", "(best quality, masterpiece), 1girl, looking at viewer, blurry background, upper body, contemporary, dress", "(worst quality, low quality)", 512, 512, "478028150728261", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 2-Lyriel [ "lyriel_v16.safetensors", "mm_sd_v14.ckpt", "hypercars cyberpunk moving, muted colors, swirling color smokes, legend, cityscape, space", "3d, cartoon, anime, sketches, worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 3-RCNZ [ "rcnzCartoon3d_v10.safetensors", "mm_sd_v14.ckpt", "A cute raccoon playing guitar in a boat on the ocean", "worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint)
converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config)
3
2023-12-19 21:06:32+00:00
16k
exislow/tidal-dl-ng
tidal_dl_ng/gui.py
[ { "identifier": "get_format_template", "path": "tidal_dl_ng/helper/path.py", "snippet": "def get_format_template(\n media: Track | Album | Playlist | UserPlaylist | Video | Mix | MediaType, settings\n) -> str | bool:\n result = False\n\n if isinstance(media, Track) or media == MediaType.TRACK:\n result = settings.data.format_track\n elif isinstance(media, Album) or media == MediaType.ALBUM:\n result = settings.data.format_album\n elif isinstance(media, Playlist | UserPlaylist) or media == MediaType.PLAYLIST:\n result = settings.data.format_playlist\n elif isinstance(media, Mix) or media == MediaType.MIX:\n result = settings.data.format_mix\n elif isinstance(media, Video) or media == MediaType.VIDEO:\n result = settings.data.format_video\n\n return result" }, { "identifier": "Settings", "path": "tidal_dl_ng/config.py", "snippet": "class Settings(BaseConfig, metaclass=SingletonMeta):\n cls_model = ModelSettings\n data = None\n\n def __init__(self):\n self.file_path = path_file_settings()\n self.read(self.file_path)" }, { "identifier": "Tidal", "path": "tidal_dl_ng/config.py", "snippet": "class Tidal(BaseConfig, metaclass=SingletonMeta):\n cls_model = ModelToken\n session: tidalapi.Session = None\n data: ModelToken = None\n token_from_storage: bool = False\n settings: Settings = None\n\n def __init__(self, settings: Settings = None):\n self.session = tidalapi.Session()\n # self.session.config.client_id = \"km8T1xS355y7dd3H\"\n # self.session.config.client_secret = \"vcmeGW1OuZ0fWYMCSZ6vNvSLJlT3XEpW0ambgYt5ZuI=\"\n self.session.video_quality = tidalapi.VideoQuality.high\n self.file_path = path_file_token()\n self.token_from_storage = self.read(self.file_path)\n self.login_token()\n\n if settings:\n self.settings = settings\n self.settings_apply()\n\n def settings_apply(self, settings: Settings = None) -> bool:\n if settings:\n self.settings = settings\n\n self.session.audio_quality = self.settings.data.quality_audio\n\n return True\n\n def login_token(self) -> bool:\n result = False\n\n if self.token_from_storage:\n try:\n result = self.session.load_oauth_session(\n self.data.token_type, self.data.access_token, self.data.refresh_token, self.data.expiry_time\n )\n except HTTPError:\n result = False\n\n return result\n\n def login_oauth_start(self, function=print) -> None:\n self.session.login_oauth_simple(function)\n\n def login_oauth_finish(self) -> bool:\n result = self.session.check_login()\n\n if result:\n self.token_persist()\n\n return result\n\n def token_persist(self) -> None:\n self.set_option(\"token_type\", self.session.token_type)\n self.set_option(\"access_token\", self.session.access_token)\n self.set_option(\"refresh_token\", self.session.refresh_token)\n self.set_option(\"expiry_time\", self.session.expiry_time)\n self.save()\n\n def login(self, fn_print: Callable) -> bool:\n is_token = self.login_token()\n result = False\n\n if is_token:\n fn_print(\"Yep, looks good! You are logged in.\")\n\n result = True\n elif not is_token:\n fn_print(\"You either do not have a token or your token is invalid.\")\n fn_print(\"No worries, we will handle this...\")\n self.login_oauth_start(fn_print)\n\n is_login = self.login_oauth_finish()\n\n if is_login:\n fn_print(\"The login was successful. I have stored your credentials (token).\")\n\n result = True\n else:\n fn_print(\"Something went wrong. Did you login using your browser correctly? May try again...\")\n\n return result" }, { "identifier": "QualityVideo", "path": "tidal_dl_ng/constants.py", "snippet": "class QualityVideo(Enum):\n P360: int = 360\n P480: int = 480\n P720: int = 720\n P1080: int = 1080" }, { "identifier": "TidalLists", "path": "tidal_dl_ng/constants.py", "snippet": "class TidalLists(Enum):\n PLAYLISTS = \"Playlists\"\n FAVORITES = \"Favorites\"\n MIXES = \"Mixes\"" }, { "identifier": "Download", "path": "tidal_dl_ng/download.py", "snippet": "class Download:\n settings: Settings = None\n session: Session = None\n skip_existing: SkipExisting = False\n\n def __init__(self, session: Session, skip_existing: SkipExisting = SkipExisting.Disabled):\n self.settings = Settings()\n self.session = session\n self.skip_existing = skip_existing\n\n def _download(\n self,\n fn_logger: Callable,\n media: Track | Video,\n progress: Progress,\n progress_gui: ProgressBars,\n stream_manifest: StreamManifest,\n path_file: str,\n ):\n media_name: str = name_builder_item(media)\n\n # Set the correct progress output channel.\n if progress_gui is None:\n progress_stdout: bool = True\n else:\n progress_stdout: bool = False\n # Send signal to GUI with media name\n progress_gui.item_name.emit(media_name)\n\n try:\n # Compute total iterations for progress\n urls_count: int = len(stream_manifest.urls)\n\n if urls_count > 1:\n progress_total: int = urls_count\n block_size: int | None = None\n else:\n # Compute progress iterations based on the file size.\n r = requests.get(stream_manifest.urls[0], stream=True, timeout=REQUESTS_TIMEOUT_SEC)\n\n r.raise_for_status()\n\n # Get file size and compute progress steps\n total_size_in_bytes: int = int(r.headers.get(\"content-length\", 0))\n block_size: int | None = 4096\n progress_total: float = total_size_in_bytes / block_size\n\n # Create progress Task\n p_task: TaskID = progress.add_task(\n f\"[blue]Item '{media_name[:30]}'\",\n total=progress_total,\n visible=progress_stdout,\n )\n\n # Write content to file until progress is finished.\n while not progress.tasks[p_task].finished:\n with open(path_file, \"wb\") as f:\n for url in stream_manifest.urls:\n # Create the request object with stream=True, so the content won't be loaded into memory at once.\n r = requests.get(url, stream=True, timeout=REQUESTS_TIMEOUT_SEC)\n\n r.raise_for_status()\n\n # Write the content to disk. If `chunk_size` is set to `None` the whole file will be written at once.\n for data in r.iter_content(chunk_size=block_size):\n f.write(data)\n # Advance progress bar.\n progress.advance(p_task)\n\n # To send the progress to the GUI, we need to emit the percentage.\n if not progress_stdout:\n progress_gui.item.emit(progress.tasks[p_task].percentage)\n except HTTPError as e:\n # TODO: Handle Exception...\n fn_logger(e)\n\n # Check if file is encrypted.\n needs_decryption = self.is_encrypted(stream_manifest.encryption_type)\n\n if needs_decryption:\n key, nonce = decrypt_security_token(stream_manifest.encryption_key)\n tmp_path_file_decrypted = path_file + \"_decrypted\"\n decrypt_file(path_file, tmp_path_file_decrypted, key, nonce)\n else:\n tmp_path_file_decrypted = path_file\n\n # Write metadata to file.\n if not isinstance(media, Video):\n self.metadata_write(media, tmp_path_file_decrypted)\n\n return tmp_path_file_decrypted\n\n def instantiate_media(\n self,\n session: Session,\n media_type: type[MediaType.TRACK, MediaType.VIDEO, MediaType.ALBUM, MediaType.PLAYLIST, MediaType.MIX],\n id_media: str,\n ) -> Track | Video:\n if media_type == MediaType.TRACK:\n media = Track(session, id_media)\n elif media_type == MediaType.VIDEO:\n media = Video(session, id_media)\n elif media_type == MediaType.ALBUM:\n media = Album(self.session, id_media)\n elif media_type == MediaType.PLAYLIST:\n media = Playlist(self.session, id_media)\n elif media_type == MediaType.MIX:\n media = Mix(self.session, id_media)\n else:\n raise MediaUnknown\n\n return media\n\n def item(\n self,\n path_base: str,\n file_template: str,\n fn_logger: Callable,\n media: Track | Video = None,\n media_id: str = None,\n media_type: MediaType = None,\n video_download: bool = True,\n progress_gui: ProgressBars = None,\n progress: Progress = None,\n ) -> (bool, str):\n # If no media instance is provided, we need to create the media instance.\n if media_id and media_type:\n media = self.instantiate_media(self.session, media_type, media_id)\n elif not media:\n raise MediaMissing\n\n # If video download is not allowed end here\n if not video_download:\n fn_logger.info(\n f\"Video downloads are deactivated (see settings). Skipping video: {name_builder_item(media)}\"\n )\n\n return False, \"\"\n\n # Create file name and path\n file_name_relative = format_path_media(file_template, media)\n path_file = os.path.abspath(os.path.normpath(os.path.join(path_base, file_name_relative)))\n\n # Populate StreamManifest for further download.\n if isinstance(media, Track):\n stream = media.stream()\n manifest: str = stream.manifest\n mime_type: str = stream.manifest_mime_type\n else:\n manifest: str = media.get_url()\n mime_type: str = StreamManifestMimeType.VIDEO.value\n\n stream_manifest = self.stream_manifest_parse(manifest, mime_type)\n\n # Sanitize final path_file to fit into OS boundaries.\n path_file = path_file_sanitize(path_file + stream_manifest.file_extension, adapt=True)\n\n # Compute if and how downloads need to be skipped.\n if self.skip_existing.value:\n extension_ignore = self.skip_existing == SkipExisting.ExtensionIgnore\n # TODO: Check if extension is already in `path_file` or not.\n download_skip = check_file_exists(path_file, extension_ignore=extension_ignore)\n else:\n download_skip = False\n\n if not download_skip:\n # Create a temp directory and file.\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmp_path_dir:\n tmp_path_file = os.path.join(tmp_path_dir, str(uuid4()) + stream_manifest.file_extension)\n # Download media.\n tmp_path_file = self._download(fn_logger, media, progress, progress_gui, stream_manifest, tmp_path_file)\n\n if isinstance(media, Video) and self.settings.data.video_convert_mp4:\n # TODO: Make optional.\n # Convert `*.ts` file to `*.mp4` using ffmpeg\n tmp_path_file = self._video_convert(tmp_path_file)\n path_file = os.path.splitext(path_file)[0] + \".mp4\"\n\n # Move final file to the configured destination directory.\n os.makedirs(os.path.dirname(path_file), exist_ok=True)\n shutil.move(tmp_path_file, path_file)\n else:\n fn_logger.debug(f\"Download skipped, since file exists: '{path_file}'\")\n\n return not download_skip, path_file\n\n def cover_url(self, sid: str, dimension: CoverDimensions = CoverDimensions.Px320):\n if sid is None:\n return \"\"\n\n return f\"https://resources.tidal.com/images/{sid.replace('-', '/')}/{dimension.value}.jpg\"\n\n def metadata_write(self, track: Track, path_file: str):\n result: bool = False\n release_date: str = (\n track.album.release_date.strftime(\"%Y-%m-%d\") if track.album and track.album.release_date else \"\"\n )\n copy_right: str = track.copyright if hasattr(track, \"copyright\") and track.copyright else \"\"\n isrc: str = track.isrc if hasattr(track, \"isrc\") and track.isrc else \"\"\n lyrics: str = \"\"\n\n if self.settings.data.lyrics_save:\n # Try to retrieve lyrics.\n try:\n lyrics: str = track.lyrics().subtitles if hasattr(track, \"lyrics\") else \"\"\n except HTTPError:\n # TODO: Implement proper logging.\n print(f\"Could not retrieve lyrics for `{name_builder_item(track)}`.\")\n\n # TODO: Check if it is possible to pass \"None\" values.\n m: Metadata = Metadata(\n path_file=path_file,\n lyrics=lyrics,\n copy_right=copy_right,\n title=track.name,\n artists=[artist.name for artist in track.artists],\n album=track.album.name if track.album else \"\",\n tracknumber=track.track_num,\n date=release_date,\n isrc=isrc,\n albumartist=name_builder_item(track),\n totaltrack=track.album.num_tracks if track.album and track.album.num_tracks else 1,\n totaldisc=track.album.num_volumes if track.album and track.album.num_volumes else 1,\n discnumber=track.volume_num if track.volume_num else 1,\n url_cover=(\n self.cover_url(track.album.cover, self.settings.data.metadata_cover_dimension) if track.album else \"\"\n ),\n )\n\n m.save()\n\n result = True\n\n return result\n\n def items(\n self,\n path_base: str,\n fn_logger: Logger | WrapperLogger,\n media_id: str = None,\n media_type: MediaType = None,\n file_template: str = None,\n media: Album | Playlist | UserPlaylist | Mix = None,\n video_download: bool = False,\n progress_gui: ProgressBars = None,\n progress: Progress = None,\n download_delay: bool = True,\n ):\n # If no media instance is provided, we need to create the media instance.\n if media_id and media_type:\n media = self.instantiate_media(self.session, media_type, media_id)\n elif not media:\n raise MediaMissing\n\n # Create file name and path\n file_name_relative = format_path_media(file_template, media)\n\n # TODO: Extend with pagination support: Iterate through `items` and `tracks`until len(returned list) == 0\n # Get the items and name of the list.\n if isinstance(media, Mix):\n items = media.items()\n list_media_name = media.title[:30]\n elif video_download:\n items = media.items(limit=100)\n list_media_name = media.name[:30]\n else:\n items = media.tracks(limit=999)\n list_media_name = media.name[:30]\n\n # Determine where to redirect the progress information.\n if progress_gui is None:\n progress_stdout: bool = True\n else:\n progress_stdout: bool = False\n\n # Create the list progress task.\n p_task1: TaskID = progress.add_task(\n f\"[green]List '{list_media_name}'\", total=len(items), visible=progress_stdout\n )\n\n # Iterate through list items\n while not progress.finished:\n for media in items:\n # TODO: Handle return value of `track` method.\n # Download the item.\n status_download, result_path_file = self.item(\n path_base=path_base,\n file_template=file_name_relative,\n media=media,\n progress_gui=progress_gui,\n progress=progress,\n fn_logger=fn_logger,\n )\n\n # Advance progress bar.\n progress.advance(p_task1)\n\n if not progress_stdout:\n progress_gui.list_item.emit(progress.tasks[p_task1].percentage)\n\n # If a file was downloaded and the download delay is enabled, wait until the next download.\n if download_delay and status_download:\n time_sleep: float = round(random.SystemRandom().uniform(2, 5), 1)\n\n # TODO: Fix logging. Is not displayed in debug window.\n fn_logger.debug(f\"Next download will start in {time_sleep} seconds.\")\n time.sleep(time_sleep)\n\n def is_encrypted(self, encryption_type: str) -> bool:\n result = encryption_type != \"NONE\"\n\n return result\n\n def get_file_extension(self, stream_url: str, stream_codec: str) -> str:\n if \".flac\" in stream_url:\n result: str = \".flac\"\n elif \".mp4\" in stream_url:\n # TODO: Need to investigate, what the correct extension is.\n # if \"ac4\" in stream_codec or \"mha1\" in stream_codec:\n # result = \".mp4\"\n # elif \"flac\" in stream_codec:\n # result = \".flac\"\n # else:\n # result = \".m4a\"\n result: str = \".mp4\"\n elif \".ts\" in stream_url:\n result: str = \".ts\"\n else:\n result: str = \".m4a\"\n\n return result\n\n def _video_convert(self, path_file: str) -> str:\n path_file_out = os.path.splitext(path_file)[0] + \".mp4\"\n result, _ = ffmpeg.input(path_file).output(path_file_out, map=0, c=\"copy\").run()\n\n return path_file_out\n\n def stream_manifest_parse(self, manifest: str, mime_type: str) -> StreamManifest:\n if mime_type == StreamManifestMimeType.MPD.value:\n # Stream Manifest is base64 encoded.\n manifest_parsed: str = base64.b64decode(manifest).decode(\"utf-8\")\n mpd = MPEGDASHParser.parse(manifest_parsed)\n codecs: str = mpd.periods[0].adaptation_sets[0].representations[0].codecs\n mime_type: str = mpd.periods[0].adaptation_sets[0].mime_type\n # TODO: Handle encryption key. But I have never seen an encrypted file so far.\n encryption_type: str = \"NONE\"\n encryption_key: str | None = None\n # .initialization + the very first of .media; See https://developers.broadpeak.io/docs/foundations-dash\n segments_count = 1 + 1\n\n for s in mpd.periods[0].adaptation_sets[0].representations[0].segment_templates[0].segment_timelines[0].Ss:\n segments_count += s.r if s.r else 1\n\n # Populate segment urls.\n segment_template = mpd.periods[0].adaptation_sets[0].representations[0].segment_templates[0]\n stream_urls: list[str] = []\n\n for index in range(segments_count):\n stream_urls.append(segment_template.media.replace(\"$Number$\", str(index)))\n\n elif mime_type == StreamManifestMimeType.BTS.value:\n # Stream Manifest is base64 encoded.\n manifest_parsed: str = base64.b64decode(manifest).decode(\"utf-8\")\n # JSON string to object.\n stream_manifest = json.loads(manifest_parsed)\n # TODO: Handle more than one download URL\n stream_urls: str = stream_manifest[\"urls\"]\n codecs: str = stream_manifest[\"codecs\"]\n mime_type: str = stream_manifest[\"mimeType\"]\n encryption_type: str = stream_manifest[\"encryptionType\"]\n encryption_key: str | None = (\n stream_manifest[\"encryptionKey\"] if self.is_encrypted(encryption_type) else None\n )\n elif mime_type == StreamManifestMimeType.VIDEO.value:\n # Parse M3U8 video playlist\n m3u8_variant: m3u8.M3U8 = m3u8.load(manifest)\n # Find the desired video resolution or the next best one.\n m3u8_playlist, codecs = self._extract_video_stream(m3u8_variant, self.settings.data.quality_video.value)\n # Populate urls.\n stream_urls: list[str] = m3u8_playlist.files\n\n # TODO: Handle encryption key. But I have never seen an encrypted file so far.\n encryption_type: str = \"NONE\"\n encryption_key: str | None = None\n else:\n raise UnknownManifestFormat\n\n file_extension: str = self.get_file_extension(stream_urls[0], codecs)\n\n result: StreamManifest = StreamManifest(\n urls=stream_urls,\n codecs=codecs,\n file_extension=file_extension,\n encryption_type=encryption_type,\n encryption_key=encryption_key,\n mime_type=mime_type,\n )\n\n return result\n\n def _extract_video_stream(self, m3u8_variant: m3u8.M3U8, quality: str) -> (m3u8.M3U8 | bool, str):\n m3u8_playlist: m3u8.M3U8 | bool = False\n resolution_best: int = 0\n mime_type: str = \"\"\n\n if m3u8_variant.is_variant:\n for playlist in m3u8_variant.playlists:\n if resolution_best < playlist.stream_info.resolution[1]:\n resolution_best = playlist.stream_info.resolution[1]\n m3u8_playlist = m3u8.load(playlist.uri)\n mime_type = playlist.stream_info.codecs\n\n if quality == playlist.stream_info.resolution[1]:\n break\n\n return m3u8_playlist, mime_type" }, { "identifier": "XStream", "path": "tidal_dl_ng/logger.py", "snippet": "class XStream(QtCore.QObject):\nclass QtHandler(logging.Handler):\n def flush(self):\n def fileno(self):\n def write(self, msg):\n def stdout():\n def stderr():\n def __init__(self):\n def emit(self, record):" }, { "identifier": "ProgressBars", "path": "tidal_dl_ng/model/gui_data.py", "snippet": "class ProgressBars:\n item: QtCore.Signal\n item_name: QtCore.Signal\n list_item: QtCore.Signal" }, { "identifier": "ResultSearch", "path": "tidal_dl_ng/model/gui_data.py", "snippet": "class ResultSearch:\n position: int\n artist: str\n title: str\n album: str\n duration_sec: int\n obj: object" }, { "identifier": "Ui_MainWindow", "path": "tidal_dl_ng/ui/main.py", "snippet": "class Ui_MainWindow:\n def setupUi(self, MainWindow):\n if not MainWindow.objectName():\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(900, 700)\n self.a_options = QAction(MainWindow)\n self.a_options.setObjectName(\"a_options\")\n self.a_options.setEnabled(False)\n self.a_options.setText(\"Options\")\n self.a_options.setIconText(\"Options\")\n # if QT_CONFIG(tooltip)\n self.a_options.setToolTip(\"Options\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.a_options.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.a_options.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n self.w_central = QWidget(MainWindow)\n self.w_central.setObjectName(\"w_central\")\n self.w_central.setEnabled(True)\n sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(100)\n sizePolicy.setVerticalStretch(100)\n sizePolicy.setHeightForWidth(self.w_central.sizePolicy().hasHeightForWidth())\n self.w_central.setSizePolicy(sizePolicy)\n # if QT_CONFIG(tooltip)\n self.w_central.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.w_central.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.w_central.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.w_central.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.w_central.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.horizontalLayout = QHBoxLayout(self.w_central)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.lh_main = QHBoxLayout()\n self.lh_main.setObjectName(\"lh_main\")\n self.lh_main.setSizeConstraint(QLayout.SetNoConstraint)\n self.tr_lists_user = QTreeWidget(self.w_central)\n __qtreewidgetitem = QTreeWidgetItem()\n __qtreewidgetitem.setText(1, \"Info\")\n __qtreewidgetitem.setText(0, \"Playlist\")\n self.tr_lists_user.setHeaderItem(__qtreewidgetitem)\n __qtreewidgetitem1 = QTreeWidgetItem(self.tr_lists_user)\n __qtreewidgetitem1.setFlags(Qt.ItemIsEnabled)\n __qtreewidgetitem2 = QTreeWidgetItem(self.tr_lists_user)\n __qtreewidgetitem2.setFlags(Qt.ItemIsEnabled)\n __qtreewidgetitem3 = QTreeWidgetItem(self.tr_lists_user)\n __qtreewidgetitem3.setFlags(Qt.ItemIsEnabled)\n self.tr_lists_user.setObjectName(\"tr_lists_user\")\n # if QT_CONFIG(tooltip)\n self.tr_lists_user.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.tr_lists_user.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.tr_lists_user.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.tr_lists_user.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.tr_lists_user.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.tr_lists_user.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.tr_lists_user.setProperty(\"showDropIndicator\", False)\n self.tr_lists_user.setIndentation(10)\n self.tr_lists_user.setUniformRowHeights(True)\n self.tr_lists_user.setSortingEnabled(True)\n self.tr_lists_user.header().setCascadingSectionResizes(True)\n self.tr_lists_user.header().setHighlightSections(True)\n self.tr_lists_user.header().setProperty(\"showSortIndicator\", True)\n\n self.lh_main.addWidget(self.tr_lists_user)\n\n self.lv_search_result = QVBoxLayout()\n # ifndef Q_OS_MAC\n self.lv_search_result.setSpacing(-1)\n # endif\n self.lv_search_result.setObjectName(\"lv_search_result\")\n self.lh_search = QHBoxLayout()\n self.lh_search.setObjectName(\"lh_search\")\n self.l_search = QLineEdit(self.w_central)\n self.l_search.setObjectName(\"l_search\")\n self.l_search.setAcceptDrops(False)\n # if QT_CONFIG(tooltip)\n self.l_search.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.l_search.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.l_search.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.l_search.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.l_search.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.l_search.setLocale(QLocale(QLocale.English, QLocale.UnitedStates))\n self.l_search.setText(\"\")\n self.l_search.setPlaceholderText(\"Type and press ENTER to search...\")\n self.l_search.setClearButtonEnabled(True)\n\n self.lh_search.addWidget(self.l_search)\n\n self.cb_search_type = QComboBox(self.w_central)\n self.cb_search_type.setObjectName(\"cb_search_type\")\n # if QT_CONFIG(tooltip)\n self.cb_search_type.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.cb_search_type.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.cb_search_type.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.cb_search_type.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.cb_search_type.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.cb_search_type.setCurrentText(\"\")\n self.cb_search_type.setPlaceholderText(\"\")\n\n self.lh_search.addWidget(self.cb_search_type)\n\n self.b_search = QPushButton(self.w_central)\n self.b_search.setObjectName(\"b_search\")\n # if QT_CONFIG(statustip)\n self.b_search.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.b_search.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.b_search.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.b_search.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.b_search.setText(\"Search\")\n # if QT_CONFIG(shortcut)\n self.b_search.setShortcut(\"\")\n # endif // QT_CONFIG(shortcut)\n\n self.lh_search.addWidget(self.b_search)\n\n self.lv_search_result.addLayout(self.lh_search)\n\n self.tr_results = QTreeWidget(self.w_central)\n self.tr_results.setObjectName(\"tr_results\")\n self.tr_results.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.tr_results.setProperty(\"showDropIndicator\", False)\n self.tr_results.setDragDropOverwriteMode(False)\n self.tr_results.setAlternatingRowColors(False)\n self.tr_results.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.tr_results.setIndentation(10)\n self.tr_results.setSortingEnabled(True)\n self.tr_results.header().setProperty(\"showSortIndicator\", True)\n self.tr_results.header().setStretchLastSection(False)\n\n self.lv_search_result.addWidget(self.tr_results)\n\n self.lh_download = QHBoxLayout()\n self.lh_download.setObjectName(\"lh_download\")\n self.l_quality_audio = QLabel(self.w_central)\n self.l_quality_audio.setObjectName(\"l_quality_audio\")\n # if QT_CONFIG(tooltip)\n self.l_quality_audio.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.l_quality_audio.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.l_quality_audio.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.l_quality_audio.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.l_quality_audio.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.l_quality_audio.setText(\"Audio\")\n self.l_quality_audio.setAlignment(Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter)\n\n self.lh_download.addWidget(self.l_quality_audio)\n\n self.cb_quality_audio = QComboBox(self.w_central)\n self.cb_quality_audio.setObjectName(\"cb_quality_audio\")\n # if QT_CONFIG(tooltip)\n self.cb_quality_audio.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.cb_quality_audio.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.cb_quality_audio.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.cb_quality_audio.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.cb_quality_audio.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.cb_quality_audio.setCurrentText(\"\")\n self.cb_quality_audio.setPlaceholderText(\"\")\n self.cb_quality_audio.setFrame(True)\n\n self.lh_download.addWidget(self.cb_quality_audio)\n\n self.l_quality_video = QLabel(self.w_central)\n self.l_quality_video.setObjectName(\"l_quality_video\")\n # if QT_CONFIG(tooltip)\n self.l_quality_video.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.l_quality_video.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.l_quality_video.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.l_quality_video.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.l_quality_video.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.l_quality_video.setText(\"Video\")\n self.l_quality_video.setAlignment(Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter)\n\n self.lh_download.addWidget(self.l_quality_video)\n\n self.cb_quality_video = QComboBox(self.w_central)\n self.cb_quality_video.setObjectName(\"cb_quality_video\")\n # if QT_CONFIG(tooltip)\n self.cb_quality_video.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.cb_quality_video.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.cb_quality_video.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.cb_quality_video.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.cb_quality_video.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.cb_quality_video.setCurrentText(\"\")\n self.cb_quality_video.setPlaceholderText(\"\")\n\n self.lh_download.addWidget(self.cb_quality_video)\n\n self.b_download = QPushButton(self.w_central)\n self.b_download.setObjectName(\"b_download\")\n # if QT_CONFIG(tooltip)\n self.b_download.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.b_download.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.b_download.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.b_download.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.b_download.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.b_download.setText(\"Download\")\n # if QT_CONFIG(shortcut)\n self.b_download.setShortcut(\"\")\n # endif // QT_CONFIG(shortcut)\n\n self.lh_download.addWidget(self.b_download)\n\n self.lh_download.setStretch(0, 5)\n self.lh_download.setStretch(2, 5)\n self.lh_download.setStretch(4, 15)\n\n self.lv_search_result.addLayout(self.lh_download)\n\n self.te_debug = QPlainTextEdit(self.w_central)\n self.te_debug.setObjectName(\"te_debug\")\n self.te_debug.setEnabled(True)\n sizePolicy1 = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Maximum)\n sizePolicy1.setHorizontalStretch(0)\n sizePolicy1.setVerticalStretch(0)\n sizePolicy1.setHeightForWidth(self.te_debug.sizePolicy().hasHeightForWidth())\n self.te_debug.setSizePolicy(sizePolicy1)\n self.te_debug.setMaximumSize(QSize(16777215, 16777215))\n self.te_debug.setAcceptDrops(False)\n # if QT_CONFIG(tooltip)\n self.te_debug.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.te_debug.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.te_debug.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.te_debug.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.te_debug.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.te_debug.setUndoRedoEnabled(False)\n self.te_debug.setReadOnly(True)\n\n self.lv_search_result.addWidget(self.te_debug)\n\n self.lh_main.addLayout(self.lv_search_result)\n\n self.lh_main.setStretch(0, 40)\n self.lh_main.setStretch(1, 60)\n\n self.horizontalLayout.addLayout(self.lh_main)\n\n MainWindow.setCentralWidget(self.w_central)\n self.menubar = QMenuBar(MainWindow)\n self.menubar.setObjectName(\"menubar\")\n self.menubar.setGeometry(QRect(0, 0, 900, 24))\n # if QT_CONFIG(tooltip)\n self.menubar.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.menubar.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.menubar.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.menubar.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.menubar.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.m_file = QMenu(self.menubar)\n self.m_file.setObjectName(\"m_file\")\n # if QT_CONFIG(tooltip)\n self.m_file.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.m_file.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.m_file.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.m_file.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.m_file.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n # if QT_CONFIG(tooltip)\n self.statusbar.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.statusbar.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.statusbar.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.statusbar.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.statusbar.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.statusbar.setLayoutDirection(Qt.LeftToRight)\n MainWindow.setStatusBar(self.statusbar)\n\n self.menubar.addAction(self.m_file.menuAction())\n self.m_file.addAction(self.a_options)\n\n self.retranslateUi(MainWindow)\n\n QMetaObject.connectSlotsByName(MainWindow)\n\n # setupUi\n\n def retranslateUi(self, MainWindow):\n MainWindow.setWindowTitle(QCoreApplication.translate(\"MainWindow\", \"MainWindow\", None))\n ___qtreewidgetitem = self.tr_lists_user.headerItem()\n ___qtreewidgetitem.setText(2, QCoreApplication.translate(\"MainWindow\", \"obj\", None))\n\n __sortingEnabled = self.tr_lists_user.isSortingEnabled()\n self.tr_lists_user.setSortingEnabled(False)\n ___qtreewidgetitem1 = self.tr_lists_user.topLevelItem(0)\n ___qtreewidgetitem1.setText(0, QCoreApplication.translate(\"MainWindow\", \"Playlists\", None))\n ___qtreewidgetitem2 = self.tr_lists_user.topLevelItem(1)\n ___qtreewidgetitem2.setText(0, QCoreApplication.translate(\"MainWindow\", \"Mixes\", None))\n ___qtreewidgetitem3 = self.tr_lists_user.topLevelItem(2)\n ___qtreewidgetitem3.setText(0, QCoreApplication.translate(\"MainWindow\", \"Favorites\", None))\n self.tr_lists_user.setSortingEnabled(__sortingEnabled)\n\n ___qtreewidgetitem4 = self.tr_results.headerItem()\n ___qtreewidgetitem4.setText(5, QCoreApplication.translate(\"MainWindow\", \"obj\", None))\n ___qtreewidgetitem4.setText(4, QCoreApplication.translate(\"MainWindow\", \"Duration\", None))\n ___qtreewidgetitem4.setText(3, QCoreApplication.translate(\"MainWindow\", \"Album\", None))\n ___qtreewidgetitem4.setText(2, QCoreApplication.translate(\"MainWindow\", \"Title\", None))\n ___qtreewidgetitem4.setText(1, QCoreApplication.translate(\"MainWindow\", \"Artist\", None))\n ___qtreewidgetitem4.setText(0, QCoreApplication.translate(\"MainWindow\", \"#\", None))\n self.te_debug.setPlaceholderText(QCoreApplication.translate(\"MainWindow\", \"Logs...\", None))\n self.m_file.setTitle(QCoreApplication.translate(\"MainWindow\", \"File\", None))\n\n # retranslateUi" }, { "identifier": "QtWaitingSpinner", "path": "tidal_dl_ng/ui/spinner.py", "snippet": "class QtWaitingSpinner(QWidget):\n def __init__(\n self, parent, centerOnParent=True, disableParentWhenSpinning=False, modality=Qt.WindowModality.NonModal\n ):\n super().__init__(parent)\n\n self._centerOnParent = centerOnParent\n self._disableParentWhenSpinning = disableParentWhenSpinning\n\n # WAS IN initialize()\n self._color = QColor(Qt.GlobalColor.black)\n self._roundness = 100.0\n self._minimumTrailOpacity = 3.14159265358979323846\n self._trailFadePercentage = 80.0\n self._revolutionsPerSecond = 1.57079632679489661923\n self._numberOfLines = 20\n self._lineLength = 10\n self._lineWidth = 2\n self._innerRadius = 10\n self._currentCounter = 0\n self._isSpinning = False\n\n self._timer = QTimer(self)\n self._timer.timeout.connect(self.rotate)\n self.updateSize()\n self.updateTimer()\n self.hide()\n # END initialize()\n\n self.setWindowModality(modality)\n self.setAttribute(Qt.WidgetAttribute.WA_TranslucentBackground)\n\n def paintEvent(self, QPaintEvent):\n self.updatePosition()\n painter = QPainter(self)\n painter.fillRect(self.rect(), Qt.GlobalColor.transparent)\n # Can't found in Qt6\n # painter.setRenderHint(QPainter.Antialiasing, True)\n\n if self._currentCounter >= self._numberOfLines:\n self._currentCounter = 0\n\n painter.setPen(Qt.PenStyle.NoPen)\n for i in range(0, self._numberOfLines):\n painter.save()\n painter.translate(self._innerRadius + self._lineLength, self._innerRadius + self._lineLength)\n rotateAngle = float(360 * i) / float(self._numberOfLines)\n painter.rotate(rotateAngle)\n painter.translate(self._innerRadius, 0)\n distance = self.lineCountDistanceFromPrimary(i, self._currentCounter, self._numberOfLines)\n color = self.currentLineColor(\n distance, self._numberOfLines, self._trailFadePercentage, self._minimumTrailOpacity, self._color\n )\n painter.setBrush(color)\n rect = QRect(0, int(-self._lineWidth / 2), int(self._lineLength), int(self._lineWidth))\n painter.drawRoundedRect(rect, self._roundness, self._roundness, Qt.SizeMode.RelativeSize)\n painter.restore()\n\n def start(self):\n self.updatePosition()\n self._isSpinning = True\n self.show()\n\n if self.parentWidget and self._disableParentWhenSpinning:\n self.parentWidget().setEnabled(False)\n\n if not self._timer.isActive():\n self._timer.start()\n self._currentCounter = 0\n\n def stop(self):\n self._isSpinning = False\n self.hide()\n\n if self.parentWidget() and self._disableParentWhenSpinning:\n self.parentWidget().setEnabled(True)\n\n if self._timer.isActive():\n self._timer.stop()\n self._currentCounter = 0\n\n def setNumberOfLines(self, lines):\n self._numberOfLines = lines\n self._currentCounter = 0\n self.updateTimer()\n\n def setLineLength(self, length):\n self._lineLength = length\n self.updateSize()\n\n def setLineWidth(self, width):\n self._lineWidth = width\n self.updateSize()\n\n def setInnerRadius(self, radius):\n self._innerRadius = radius\n self.updateSize()\n\n def color(self):\n return self._color\n\n def roundness(self):\n return self._roundness\n\n def minimumTrailOpacity(self):\n return self._minimumTrailOpacity\n\n def trailFadePercentage(self):\n return self._trailFadePercentage\n\n def revolutionsPersSecond(self):\n return self._revolutionsPerSecond\n\n def numberOfLines(self):\n return self._numberOfLines\n\n def lineLength(self):\n return self._lineLength\n\n def lineWidth(self):\n return self._lineWidth\n\n def innerRadius(self):\n return self._innerRadius\n\n def isSpinning(self):\n return self._isSpinning\n\n def setRoundness(self, roundness):\n self._roundness = max(0.0, min(100.0, roundness))\n\n def setColor(self, color=Qt.GlobalColor.black):\n self._color = QColor(color)\n\n def setRevolutionsPerSecond(self, revolutionsPerSecond):\n self._revolutionsPerSecond = revolutionsPerSecond\n self.updateTimer()\n\n def setTrailFadePercentage(self, trail):\n self._trailFadePercentage = trail\n\n def setMinimumTrailOpacity(self, minimumTrailOpacity):\n self._minimumTrailOpacity = minimumTrailOpacity\n\n def rotate(self):\n self._currentCounter += 1\n if self._currentCounter >= self._numberOfLines:\n self._currentCounter = 0\n self.update()\n\n def updateSize(self):\n size = int((self._innerRadius + self._lineLength) * 2)\n self.setFixedSize(size, size)\n\n def updateTimer(self):\n self._timer.setInterval(int(1000 / (self._numberOfLines * self._revolutionsPerSecond)))\n\n def updatePosition(self):\n if self.parentWidget() and self._centerOnParent:\n self.move(\n int(self.parentWidget().width() / 2 - self.width() / 2),\n int(self.parentWidget().height() / 2 - self.height() / 2),\n )\n\n def lineCountDistanceFromPrimary(self, current, primary, totalNrOfLines):\n distance = primary - current\n if distance < 0:\n distance += totalNrOfLines\n return distance\n\n def currentLineColor(self, countDistance, totalNrOfLines, trailFadePerc, minOpacity, colorinput):\n color = QColor(colorinput)\n if countDistance == 0:\n return color\n minAlphaF = minOpacity / 100.0\n distanceThreshold = int(math.ceil((totalNrOfLines - 1) * trailFadePerc / 100.0))\n if countDistance > distanceThreshold:\n color.setAlphaF(minAlphaF)\n else:\n alphaDiff = color.alphaF() - minAlphaF\n gradient = alphaDiff / float(distanceThreshold + 1)\n resultAlpha = color.alphaF() - gradient * countDistance\n # If alpha is out of bounds, clip it.\n resultAlpha = min(1.0, max(0.0, resultAlpha))\n color.setAlphaF(resultAlpha)\n return color" }, { "identifier": "Worker", "path": "tidal_dl_ng/worker.py", "snippet": "class Worker(QtCore.QRunnable):\n \"\"\"\n Worker thread\n\n Inherits from QRunnable to handler worker thread setup, signals and wrap-up.\n\n :param callback: The function callback to run on this worker thread. Supplied args and\n kwargs will be passed through to the runner.\n :type callback: function\n :param args: Arguments to pass to the callback function\n :param kwargs: Keywords to pass to the callback function\n\n \"\"\"\n\n def __init__(self, fn, *args, **kwargs):\n super().__init__()\n # Store constructor arguments (re-used for processing)\n self.fn = fn\n self.args = args\n self.kwargs = kwargs\n\n @QtCore.Slot() # QtCore.Slot\n def run(self):\n \"\"\"\n Initialise the runner function with passed args, kwargs.\n \"\"\"\n self.fn(*self.args, **self.kwargs)" } ]
import math import sys import qdarktheme import coloredlogs.converter from collections.abc import Callable from tidal_dl_ng.helper.path import get_format_template from PySide6 import QtCore, QtGui, QtWidgets from rich.progress import Progress from tidalapi import Album, Mix, Playlist, Quality, Track, UserPlaylist, Video from tidalapi.session import SearchTypes from tidal_dl_ng.config import Settings, Tidal from tidal_dl_ng.constants import QualityVideo, TidalLists from tidal_dl_ng.download import Download from tidal_dl_ng.logger import XStream, logger_gui from tidal_dl_ng.model.gui_data import ProgressBars, ResultSearch from tidal_dl_ng.ui.main import Ui_MainWindow from tidal_dl_ng.ui.spinner import QtWaitingSpinner from tidal_dl_ng.worker import Worker
12,217
try: except ImportError as e: print(e) print("Qt dependencies missing. Cannot start GUI. Please execute: 'pip install pyside6 pyqtdarktheme'") sys.exit(1) # TODO: Make more use of Exceptions # TODO: Add File -> Version class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): settings: Settings = None tidal: Tidal = None dl: Download = None threadpool: QtCore.QThreadPool = None spinner: QtWaitingSpinner = None spinner_start: QtCore.Signal = QtCore.Signal(QtWidgets.QWidget) spinner_stop: QtCore.Signal = QtCore.Signal() pb_item: QtWidgets.QProgressBar = None s_item_advance: QtCore.Signal = QtCore.Signal(float) s_item_name: QtCore.Signal = QtCore.Signal(str) pb_list: QtWidgets.QProgressBar = None s_list_advance: QtCore.Signal = QtCore.Signal(float) s_pb_reset: QtCore.Signal = QtCore.Signal() s_populate_tree_lists: QtCore.Signal = QtCore.Signal(list) def __init__(self, tidal: Tidal | None = None): super().__init__() self.setupUi(self) # self.setGeometry(50, 50, 500, 300) self.setWindowTitle("TIDAL Downloader Next Gen!") # TODO: Fix icons (make them visible). # my_pixmap = QtGui.QPixmap("tidal_dl_ng/ui/icon.png") my_icon = QtGui.QIcon("tidal_dl_ng/ui/icon.png") self.setWindowIcon(my_icon) tray = QtWidgets.QSystemTrayIcon() tray.setIcon(my_icon) tray.setVisible(True) # Logging redirect. XStream.stdout().messageWritten.connect(self._log_output) # XStream.stderr().messageWritten.connect(self._log_output) self.settings = Settings() self.threadpool = QtCore.QThreadPool() # TODO: Show GUI, create a progress bar showing the TIDAL querying progress. self._init_tree_results(self.tr_results) self._init_tree_lists(self.tr_lists_user) self._init_progressbar() self._populate_quality(self.cb_quality_audio, Quality)
try: except ImportError as e: print(e) print("Qt dependencies missing. Cannot start GUI. Please execute: 'pip install pyside6 pyqtdarktheme'") sys.exit(1) # TODO: Make more use of Exceptions # TODO: Add File -> Version class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): settings: Settings = None tidal: Tidal = None dl: Download = None threadpool: QtCore.QThreadPool = None spinner: QtWaitingSpinner = None spinner_start: QtCore.Signal = QtCore.Signal(QtWidgets.QWidget) spinner_stop: QtCore.Signal = QtCore.Signal() pb_item: QtWidgets.QProgressBar = None s_item_advance: QtCore.Signal = QtCore.Signal(float) s_item_name: QtCore.Signal = QtCore.Signal(str) pb_list: QtWidgets.QProgressBar = None s_list_advance: QtCore.Signal = QtCore.Signal(float) s_pb_reset: QtCore.Signal = QtCore.Signal() s_populate_tree_lists: QtCore.Signal = QtCore.Signal(list) def __init__(self, tidal: Tidal | None = None): super().__init__() self.setupUi(self) # self.setGeometry(50, 50, 500, 300) self.setWindowTitle("TIDAL Downloader Next Gen!") # TODO: Fix icons (make them visible). # my_pixmap = QtGui.QPixmap("tidal_dl_ng/ui/icon.png") my_icon = QtGui.QIcon("tidal_dl_ng/ui/icon.png") self.setWindowIcon(my_icon) tray = QtWidgets.QSystemTrayIcon() tray.setIcon(my_icon) tray.setVisible(True) # Logging redirect. XStream.stdout().messageWritten.connect(self._log_output) # XStream.stderr().messageWritten.connect(self._log_output) self.settings = Settings() self.threadpool = QtCore.QThreadPool() # TODO: Show GUI, create a progress bar showing the TIDAL querying progress. self._init_tree_results(self.tr_results) self._init_tree_lists(self.tr_lists_user) self._init_progressbar() self._populate_quality(self.cb_quality_audio, Quality)
self._populate_quality(self.cb_quality_video, QualityVideo)
3
2023-12-19 23:05:47+00:00
16k
zyrant/SPGroup3D
tests/test_data/test_datasets/test_scannet_dataset.py
[ { "identifier": "ScanNetDataset", "path": "mmdet3d/datasets/scannet_dataset.py", "snippet": "class ScanNetDataset(Custom3DDataset):\n r\"\"\"ScanNet Dataset for Detection Task.\n\n This class serves as the API for experiments on the ScanNet Dataset.\n\n Please refer to the `github repo <https://github.com/ScanNet/ScanNet>`_\n for data downloading.\n\n Args:\n data_root (str): Path of dataset root.\n ann_file (str): Path of annotation file.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n box_type_3d (str, optional): Type of 3D box of this dataset.\n Based on the `box_type_3d`, the dataset will encapsulate the box\n to its original format then converted them to `box_type_3d`.\n Defaults to 'Depth' in this dataset. Available options includes\n\n - 'LiDAR': Box in LiDAR coordinates.\n - 'Depth': Box in depth coordinates, usually for indoor dataset.\n - 'Camera': Box in camera coordinates.\n filter_empty_gt (bool, optional): Whether to filter empty GT.\n Defaults to True.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n \"\"\"\n CLASSES = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',\n 'bookshelf', 'picture', 'counter', 'desk', 'curtain',\n 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',\n 'garbagebin')\n\n def __init__(self,\n data_root,\n ann_file,\n pipeline=None,\n classes=None,\n modality=dict(use_camera=False, use_depth=True),\n box_type_3d='Depth',\n filter_empty_gt=True,\n test_mode=False,\n **kwargs):\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n pipeline=pipeline,\n classes=classes,\n modality=modality,\n box_type_3d=box_type_3d,\n filter_empty_gt=filter_empty_gt,\n test_mode=test_mode,\n **kwargs)\n assert 'use_camera' in self.modality and \\\n 'use_depth' in self.modality\n assert self.modality['use_camera'] or self.modality['use_depth']\n\n def get_data_info(self, index):\n \"\"\"Get data info according to the given index.\n\n Args:\n index (int): Index of the sample data to get.\n\n Returns:\n dict: Data information that will be passed to the data\n preprocessing pipelines. It includes the following keys:\n\n - sample_idx (str): Sample index.\n - pts_filename (str): Filename of point clouds.\n - file_name (str): Filename of point clouds.\n - img_prefix (str, optional): Prefix of image files.\n - img_info (dict, optional): Image info.\n - ann_info (dict): Annotation info.\n \"\"\"\n info = self.data_infos[index]\n sample_idx = info['point_cloud']['lidar_idx']\n pts_filename = osp.join(self.data_root, info['pts_path'])\n input_dict = dict(sample_idx=sample_idx)\n\n if self.modality['use_depth']:\n input_dict['pts_filename'] = pts_filename\n input_dict['file_name'] = pts_filename\n\n if self.modality['use_camera']:\n img_info = []\n for img_path in info['img_paths']:\n img_info.append(\n dict(filename=osp.join(self.data_root, img_path)))\n intrinsic = info['intrinsics']\n axis_align_matrix = self._get_axis_align_matrix(info)\n depth2img = []\n for extrinsic in info['extrinsics']:\n depth2img.append(\n intrinsic @ np.linalg.inv(axis_align_matrix @ extrinsic))\n\n input_dict['img_prefix'] = None\n input_dict['img_info'] = img_info\n input_dict['depth2img'] = depth2img\n\n if not self.test_mode:\n annos = self.get_ann_info(index)\n input_dict['ann_info'] = annos\n if self.filter_empty_gt and ~(annos['gt_labels_3d'] != -1).any():\n return None\n return input_dict\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n\n - gt_bboxes_3d (:obj:`DepthInstance3DBoxes`):\n 3D ground truth bboxes\n - gt_labels_3d (np.ndarray): Labels of ground truths.\n - pts_instance_mask_path (str): Path of instance masks.\n - pts_semantic_mask_path (str): Path of semantic masks.\n - axis_align_matrix (np.ndarray): Transformation matrix for\n global scene alignment.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n if info['annos']['gt_num'] != 0:\n gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(\n np.float32) # k, 6\n gt_labels_3d = info['annos']['class'].astype(np.int64)\n else:\n gt_bboxes_3d = np.zeros((0, 6), dtype=np.float32)\n gt_labels_3d = np.zeros((0, ), dtype=np.int64)\n\n # to target box structure\n gt_bboxes_3d = DepthInstance3DBoxes(\n gt_bboxes_3d,\n box_dim=gt_bboxes_3d.shape[-1],\n with_yaw=False,\n origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)\n\n pts_instance_mask_path = osp.join(self.data_root,\n info['pts_instance_mask_path'])\n pts_semantic_mask_path = osp.join(self.data_root,\n info['pts_semantic_mask_path'])\n\n axis_align_matrix = self._get_axis_align_matrix(info)\n\n anns_results = dict(\n gt_bboxes_3d=gt_bboxes_3d,\n gt_labels_3d=gt_labels_3d,\n pts_instance_mask_path=pts_instance_mask_path,\n pts_semantic_mask_path=pts_semantic_mask_path,\n axis_align_matrix=axis_align_matrix)\n return anns_results\n\n def prepare_test_data(self, index):\n \"\"\"Prepare data for testing.\n\n We should take axis_align_matrix from self.data_infos since we need\n to align point clouds.\n\n Args:\n index (int): Index for accessing the target data.\n\n Returns:\n dict: Testing data dict of the corresponding index.\n \"\"\"\n input_dict = self.get_data_info(index)\n # take the axis_align_matrix from data_infos\n input_dict['ann_info'] = dict(\n axis_align_matrix=self._get_axis_align_matrix(\n self.data_infos[index]))\n self.pre_pipeline(input_dict)\n example = self.pipeline(input_dict)\n return example\n\n @staticmethod\n def _get_axis_align_matrix(info):\n \"\"\"Get axis_align_matrix from info. If not exist, return identity mat.\n\n Args:\n info (dict): one data info term.\n\n Returns:\n np.ndarray: 4x4 transformation matrix.\n \"\"\"\n if 'axis_align_matrix' in info['annos'].keys():\n return info['annos']['axis_align_matrix'].astype(np.float32)\n else:\n warnings.warn(\n 'axis_align_matrix is not found in ScanNet data info, please '\n 'use new pre-process scripts to re-generate ScanNet data')\n return np.eye(4).astype(np.float32)\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(type='GlobalAlignment', rotation_axis=2),\n dict(\n type='DefaultFormatBundle3D',\n class_names=self.CLASSES,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ]\n return Compose(pipeline)\n\n def show(self, results, out_dir, show=True, pipeline=None):\n \"\"\"Results visualization.\n\n Args:\n results (list[dict]): List of bounding boxes results.\n out_dir (str): Output directory of visualization result.\n show (bool): Visualize the results online.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n pipeline = self._build_default_pipeline()\n for i, result in enumerate(results):\n data_info = self.data_infos[i]\n pts_path = data_info['pts_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points = self._extract_data(i, pipeline, 'points', load_annos=True).numpy()\n gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d']\n gt_bboxes = gt_bboxes.corners.numpy() if len(gt_bboxes) else None\n gt_labels = self.get_ann_info(i)['gt_labels_3d']\n pred_bboxes = result['boxes_3d']\n pred_bboxes = pred_bboxes.corners.numpy() if len(pred_bboxes) else None\n pred_labels = result['labels_3d']\n show_result_v2(points, gt_bboxes, gt_labels,\n pred_bboxes, pred_labels, out_dir, file_name)" }, { "identifier": "ScanNetInstanceSegDataset", "path": "mmdet3d/datasets/scannet_dataset.py", "snippet": "class ScanNetInstanceSegDataset(Custom3DSegDataset):\n CLASSES = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',\n 'bookshelf', 'picture', 'counter', 'desk', 'curtain',\n 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',\n 'garbagebin')\n\n VALID_CLASS_IDS = (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34,\n 36, 39)\n\n ALL_CLASS_IDS = tuple(range(41))\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n - pts_semantic_mask_path (str): Path of semantic masks.\n - pts_instance_mask_path (str): Path of instance masks.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n\n pts_instance_mask_path = osp.join(self.data_root,\n info['pts_instance_mask_path'])\n pts_semantic_mask_path = osp.join(self.data_root,\n info['pts_semantic_mask_path'])\n\n anns_results = dict(\n pts_instance_mask_path=pts_instance_mask_path,\n pts_semantic_mask_path=pts_semantic_mask_path)\n return anns_results\n\n def get_classes_and_palette(self, classes=None, palette=None):\n \"\"\"Get class names of current dataset. Palette is simply ignored for\n instance segmentation.\n\n Args:\n classes (Sequence[str] | str | None): If classes is None, use\n default CLASSES defined by builtin dataset. If classes is a\n string, take it as a file name. The file contains the name of\n classes where each line contains one class name. If classes is\n a tuple or list, override the CLASSES defined by the dataset.\n Defaults to None.\n palette (Sequence[Sequence[int]]] | np.ndarray | None):\n The palette of segmentation map. If None is given, random\n palette will be generated. Defaults to None.\n \"\"\"\n if classes is not None:\n return classes, None\n return self.CLASSES, None\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n use_color=True,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=False,\n with_label_3d=False,\n with_mask_3d=True,\n with_seg_3d=True),\n dict(\n type='PointSegClassMapping',\n valid_cat_ids=self.VALID_CLASS_IDS,\n max_cat_id=40),\n dict(\n type='DefaultFormatBundle3D',\n with_label=False,\n class_names=self.CLASSES),\n dict(\n type='Collect3D',\n keys=['points', 'pts_semantic_mask', 'pts_instance_mask'])\n ]\n return Compose(pipeline)\n\n def evaluate(self,\n results,\n metric=None,\n options=None,\n logger=None,\n show=False,\n out_dir=None,\n pipeline=None):\n \"\"\"Evaluation in instance segmentation protocol.\n\n Args:\n results (list[dict]): List of results.\n metric (str | list[str]): Metrics to be evaluated.\n options (dict, optional): options for instance_seg_eval.\n logger (logging.Logger | None | str): Logger used for printing\n related information during evaluation. Defaults to None.\n show (bool, optional): Whether to visualize.\n Defaults to False.\n out_dir (str, optional): Path to save the visualization results.\n Defaults to None.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n\n Returns:\n dict: Evaluation results.\n \"\"\"\n assert isinstance(\n results, list), f'Expect results to be list, got {type(results)}.'\n assert len(results) > 0, 'Expect length of results > 0.'\n assert len(results) == len(self.data_infos)\n assert isinstance(\n results[0], dict\n ), f'Expect elements in results to be dict, got {type(results[0])}.'\n\n load_pipeline = self._get_pipeline(pipeline)\n pred_instance_masks = [result['instance_mask'] for result in results]\n pred_instance_labels = [result['instance_label'] for result in results]\n pred_instance_scores = [result['instance_score'] for result in results]\n gt_semantic_masks, gt_instance_masks = zip(*[\n self._extract_data(\n index=i,\n pipeline=load_pipeline,\n key=['pts_semantic_mask', 'pts_instance_mask'],\n load_annos=True) for i in range(len(self.data_infos))\n ])\n ret_dict = instance_seg_eval(\n gt_semantic_masks,\n gt_instance_masks,\n pred_instance_masks,\n pred_instance_labels,\n pred_instance_scores,\n valid_class_ids=self.VALID_CLASS_IDS,\n class_labels=self.CLASSES,\n options=options,\n logger=logger)\n\n if show:\n raise NotImplementedError('show is not implemented for now')\n\n return ret_dict" }, { "identifier": "ScanNetSegDataset", "path": "mmdet3d/datasets/scannet_dataset.py", "snippet": "class ScanNetSegDataset(Custom3DSegDataset):\n r\"\"\"ScanNet Dataset for Semantic Segmentation Task.\n\n This class serves as the API for experiments on the ScanNet Dataset.\n\n Please refer to the `github repo <https://github.com/ScanNet/ScanNet>`_\n for data downloading.\n\n Args:\n data_root (str): Path of dataset root.\n ann_file (str): Path of annotation file.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n palette (list[list[int]], optional): The palette of segmentation map.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n ignore_index (int, optional): The label index to be ignored, e.g.\n unannotated points. If None is given, set to len(self.CLASSES).\n Defaults to None.\n scene_idxs (np.ndarray | str, optional): Precomputed index to load\n data. For scenes with many points, we may sample it several times.\n Defaults to None.\n \"\"\"\n CLASSES = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',\n 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',\n 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink',\n 'bathtub', 'otherfurniture')\n\n VALID_CLASS_IDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28,\n 33, 34, 36, 39)\n\n ALL_CLASS_IDS = tuple(range(41))\n\n PALETTE = [\n [174, 199, 232],\n [152, 223, 138],\n [31, 119, 180],\n [255, 187, 120],\n [188, 189, 34],\n [140, 86, 75],\n [255, 152, 150],\n [214, 39, 40],\n [197, 176, 213],\n [148, 103, 189],\n [196, 156, 148],\n [23, 190, 207],\n [247, 182, 210],\n [219, 219, 141],\n [255, 127, 14],\n [158, 218, 229],\n [44, 160, 44],\n [112, 128, 144],\n [227, 119, 194],\n [82, 84, 163],\n ]\n\n def __init__(self,\n data_root,\n ann_file,\n pipeline=None,\n classes=None,\n palette=None,\n modality=None,\n test_mode=False,\n ignore_index=None,\n scene_idxs=None,\n **kwargs):\n\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n pipeline=pipeline,\n classes=classes,\n palette=palette,\n modality=modality,\n test_mode=test_mode,\n ignore_index=ignore_index,\n scene_idxs=scene_idxs,\n **kwargs)\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n\n - pts_semantic_mask_path (str): Path of semantic masks.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n\n pts_semantic_mask_path = osp.join(self.data_root,\n info['pts_semantic_mask_path'])\n\n anns_results = dict(pts_semantic_mask_path=pts_semantic_mask_path)\n return anns_results\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n use_color=True,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=False,\n with_label_3d=False,\n with_mask_3d=False,\n with_seg_3d=True),\n dict(\n type='PointSegClassMapping',\n valid_cat_ids=self.VALID_CLASS_IDS,\n max_cat_id=np.max(self.ALL_CLASS_IDS)),\n dict(\n type='DefaultFormatBundle3D',\n with_label=False,\n class_names=self.CLASSES),\n dict(type='Collect3D', keys=['points', 'pts_semantic_mask'])\n ]\n return Compose(pipeline)\n\n def show(self, results, out_dir, show=True, pipeline=None):\n \"\"\"Results visualization.\n\n Args:\n results (list[dict]): List of bounding boxes results.\n out_dir (str): Output directory of visualization result.\n show (bool): Visualize the results online.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n pipeline = self._get_pipeline(pipeline)\n for i, result in enumerate(results):\n data_info = self.data_infos[i]\n pts_path = data_info['pts_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points, gt_sem_mask = self._extract_data(\n i, pipeline, ['points', 'pts_semantic_mask'], load_annos=True)\n points = points.numpy()\n pred_sem_mask = result['semantic_mask'].numpy()\n show_seg_result(points, gt_sem_mask,\n pred_sem_mask, out_dir, file_name,\n np.array(self.PALETTE), self.ignore_index, show)\n\n def get_scene_idxs(self, scene_idxs):\n \"\"\"Compute scene_idxs for data sampling.\n\n We sample more times for scenes with more points.\n \"\"\"\n # when testing, we load one whole scene every time\n if not self.test_mode and scene_idxs is None:\n raise NotImplementedError(\n 'please provide re-sampled scene indexes for training')\n\n return super().get_scene_idxs(scene_idxs)\n\n def format_results(self, results, txtfile_prefix=None):\n r\"\"\"Format the results to txt file. Refer to `ScanNet documentation\n <http://kaldir.vc.in.tum.de/scannet_benchmark/documentation>`_.\n\n Args:\n outputs (list[dict]): Testing results of the dataset.\n txtfile_prefix (str): The prefix of saved files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (outputs, tmp_dir), outputs is the detection results,\n tmp_dir is the temporal directory created for saving submission\n files when ``submission_prefix`` is not specified.\n \"\"\"\n import mmcv\n\n if txtfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n txtfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n mmcv.mkdir_or_exist(txtfile_prefix)\n\n # need to map network output to original label idx\n pred2label = np.zeros(len(self.VALID_CLASS_IDS)).astype(np.int)\n for original_label, output_idx in self.label_map.items():\n if output_idx != self.ignore_index:\n pred2label[output_idx] = original_label\n\n outputs = []\n for i, result in enumerate(results):\n info = self.data_infos[i]\n sample_idx = info['point_cloud']['lidar_idx']\n pred_sem_mask = result['semantic_mask'].numpy().astype(np.int)\n pred_label = pred2label[pred_sem_mask]\n curr_file = f'{txtfile_prefix}/{sample_idx}.txt'\n np.savetxt(curr_file, pred_label, fmt='%d')\n outputs.append(dict(seg_mask=pred_label))\n\n return outputs, tmp_dir" }, { "identifier": "ScanNetInstanceSegV2Dataset", "path": "mmdet3d/datasets/scannet_dataset.py", "snippet": "class ScanNetInstanceSegV2Dataset(ScanNetDataset):\n VALID_CLASS_IDS = (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28,\n 33, 34, 36, 39)\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n use_color=True,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=False,\n with_label_3d=False,\n with_mask_3d=True,\n with_seg_3d=True),\n dict(\n type='DefaultFormatBundle3D',\n with_label=False,\n class_names=self.CLASSES),\n dict(\n type='Collect3D',\n keys=['points', 'pts_semantic_mask', 'pts_instance_mask'])\n ]\n return Compose(pipeline)\n\n def evaluate(self,\n results,\n metric=None,\n options=None,\n logger=None,\n show=False,\n out_dir=None,\n pipeline=None):\n \"\"\"Evaluation in instance segmentation protocol.\n\n Args:\n results (list[dict]): List of results.\n metric (str | list[str]): Metrics to be evaluated.\n options (dict, optional): options for instance_seg_eval.\n logger (logging.Logger | None | str): Logger used for printing\n related information during evaluation. Defaults to None.\n show (bool, optional): Whether to visualize.\n Defaults to False.\n out_dir (str, optional): Path to save the visualization results.\n Defaults to None.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n\n Returns:\n dict: Evaluation results.\n \"\"\"\n assert isinstance(\n results, list), f'Expect results to be list, got {type(results)}.'\n assert len(results) > 0, 'Expect length of results > 0.'\n assert len(results) == len(self.data_infos)\n assert isinstance(\n results[0], dict\n ), f'Expect elements in results to be dict, got {type(results[0])}.'\n\n load_pipeline = self._build_default_pipeline()\n pred_instance_masks = [result['instance_mask'] for result in results]\n pred_instance_labels = [result['instance_label'] for result in results]\n pred_instance_scores = [result['instance_score'] for result in results]\n gt_semantic_masks, gt_instance_masks = zip(*[\n self._extract_data(\n index=i,\n pipeline=load_pipeline,\n key=['pts_semantic_mask', 'pts_instance_mask'],\n load_annos=True) for i in range(len(self.data_infos))\n ])\n ret_dict = instance_seg_eval_v2(\n gt_semantic_masks,\n gt_instance_masks,\n pred_instance_masks,\n pred_instance_labels,\n pred_instance_scores,\n valid_class_ids=self.VALID_CLASS_IDS,\n class_labels=self.CLASSES,\n options=options,\n logger=logger)\n\n if show:\n self.show(results, out_dir)\n\n return ret_dict\n\n def show(self, results, out_dir, show=True, pipeline=None):\n assert out_dir is not None, 'Expect out_dir, got none.'\n load_pipeline = self._build_default_pipeline()\n for i, result in enumerate(results):\n data_info = self.data_infos[i]\n pts_path = data_info['pts_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points, gt_instance_mask, gt_sem_mask = self._extract_data(\n i, load_pipeline, ['points', 'pts_instance_mask', 'pts_semantic_mask'], load_annos=True)\n points = points.numpy()\n gt_inst_mask_final = np.zeros_like(gt_instance_mask)\n for cls_idx in self.VALID_CLASS_IDS:\n mask = gt_sem_mask == cls_idx\n gt_inst_mask_final += mask.numpy()\n gt_instance_mask[gt_inst_mask_final == 0] = -1\n\n pred_instance_masks = result['instance_mask']\n pred_instance_scores = result['instance_score']\n\n pred_instance_masks_sort = pred_instance_masks[pred_instance_scores.argsort()]\n pred_instance_masks_label = pred_instance_masks_sort[0].long() - 1\n for i in range(1, pred_instance_masks_sort.shape[0]):\n pred_instance_masks_label[pred_instance_masks_sort[i]] = i\n\n palette = np.random.random((max(max(pred_instance_masks_label) + 2, max(gt_instance_mask) + 2), 3)) * 255\n palette[-1] = 255\n\n show_seg_result(points, gt_instance_mask,\n pred_instance_masks_label, out_dir, file_name,\n palette)" } ]
import copy import numpy as np import pytest import torch import tempfile import tempfile import mmcv import tempfile import tempfile import mmcv import mmcv from mmdet3d.datasets import (ScanNetDataset, ScanNetInstanceSegDataset, ScanNetSegDataset, ScanNetInstanceSegV2Dataset) from mmdet3d.core.bbox.structures import DepthInstance3DBoxes from os import path as osp from mmdet3d.core.bbox import DepthInstance3DBoxes from os import path as osp from os import path as osp
11,431
], [ -4.3207e-01, 1.8154e+00, 1.7455e-01, 4.0392e-01, 3.8039e-01, 4.1961e-01 ]]) data = scannet_dataset[0] points = data['points']._data[:5] pts_semantic_mask = data['pts_semantic_mask']._data[:5] pts_instance_mask = data['pts_instance_mask']._data[:5] expected_semantic_mask = np.array([11, 18, 18, 0, 4]) expected_instance_mask = np.array([6, 56, 10, 9, 35]) assert torch.allclose(points, expected_points, 1e-2) assert np.all(pts_semantic_mask.numpy() == expected_semantic_mask) assert np.all(pts_instance_mask.numpy() == expected_instance_mask) def test_instance_seg_evaluate(): root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') test_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points']) ] scannet_dataset = ScanNetInstanceSegDataset( data_root=root_path, ann_file=ann_file, pipeline=test_pipeline, test_mode=True) pred_mask = torch.tensor([ 1, -1, -1, -1, 7, 11, 2, -1, 1, 10, -1, -1, 5, -1, -1, -1, -1, 1, -1, -1, -1, -1, 0, -1, 1, -1, 12, -1, -1, -1, 8, 5, 1, 5, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 8, -1, -1, -1, 0, 4, 3, -1, 9, -1, -1, 6, -1, -1, -1, -1, 13, -1, -1, 5, -1, 5, -1, -1, 9, 0, 5, -1, -1, 2, 3, 4, -1, -1, -1, 2, -1, -1, -1, 5, 9, -1, 1, -1, 4, 10, 4, -1 ]).long() pred_labels = torch.tensor( [4, 11, 11, 10, 0, 3, 12, 4, 14, 1, 0, 0, 0, 5, 5]).long() pred_scores = torch.tensor([.99 for _ in range(len(pred_labels))]) results = [ dict( instance_mask=pred_mask, instance_label=pred_labels, instance_score=torch.tensor(pred_scores)) ] eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=True, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=['points', 'pts_semantic_mask', 'pts_instance_mask']) ] # We add options here as default min_region_size # is much bigger than test instances. ret_dict = scannet_dataset.evaluate( results, pipeline=eval_pipeline, options=dict(min_region_sizes=np.array([1]))) assert abs(ret_dict['all_ap'] - 0.90625) < 0.001 assert abs(ret_dict['all_ap_50%'] - 0.90625) < 0.001 assert abs(ret_dict['all_ap_25%'] - 0.94444) < 0.001 assert abs(ret_dict['classes']['cabinet']['ap25%'] - 1.0) < 0.001 assert abs(ret_dict['classes']['cabinet']['ap50%'] - 0.65625) < 0.001 assert abs(ret_dict['classes']['door']['ap25%'] - 0.5) < 0.001 assert abs(ret_dict['classes']['door']['ap50%'] - 0.5) < 0.001 def test_instance_seg_evaluate_v2(): root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') test_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points']) ]
# Copyright (c) OpenMMLab. All rights reserved. def test_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') pipelines = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=True, load_dim=6, use_dim=[0, 1, 2]), dict( type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_mask_3d=True, with_seg_3d=True), dict(type='GlobalAlignment', rotation_axis=2), dict( type='PointSegClassMapping', valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39)), dict(type='PointSample', num_points=5), dict( type='RandomFlip3D', sync_2d=False, flip_ratio_bev_horizontal=1.0, flip_ratio_bev_vertical=1.0), dict( type='GlobalRotScaleTrans', rot_range=[-0.087266, 0.087266], scale_ratio_range=[1.0, 1.0], shift_height=True), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=[ 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', 'pts_instance_mask' ], meta_keys=['file_name', 'sample_idx', 'pcd_rotation']), ] scannet_dataset = ScanNetDataset(root_path, ann_file, pipelines) data = scannet_dataset[0] points = data['points']._data gt_bboxes_3d = data['gt_bboxes_3d']._data gt_labels = data['gt_labels_3d']._data pts_semantic_mask = data['pts_semantic_mask']._data pts_instance_mask = data['pts_instance_mask']._data file_name = data['img_metas']._data['file_name'] pcd_rotation = data['img_metas']._data['pcd_rotation'] sample_idx = data['img_metas']._data['sample_idx'] expected_rotation = np.array([[0.99654, 0.08311407, 0.], [-0.08311407, 0.99654, 0.], [0., 0., 1.]]) assert file_name == './tests/data/scannet/points/scene0000_00.bin' assert np.allclose(pcd_rotation, expected_rotation, 1e-3) assert sample_idx == 'scene0000_00' expected_points = torch.tensor( [[1.8339e+00, 2.1093e+00, 2.2900e+00, 2.3895e+00], [3.6079e+00, 1.4592e-01, 2.0687e+00, 2.1682e+00], [4.1886e+00, 5.0614e+00, -1.0841e-01, -8.8736e-03], [6.8790e+00, 1.5086e+00, -9.3154e-02, 6.3816e-03], [4.8253e+00, 2.6668e-01, 1.4917e+00, 1.5912e+00]]) expected_gt_bboxes_3d = torch.tensor( [[-1.1835, -3.6317, 1.5704, 1.7577, 0.3761, 0.5724, 0.0000], [-3.1832, 3.2269, 1.1911, 0.6727, 0.2251, 0.6715, 0.0000], [-0.9598, -2.2864, 0.0093, 0.7506, 2.5709, 1.2145, 0.0000], [-2.6988, -2.7354, 0.8288, 0.7680, 1.8877, 0.2870, 0.0000], [3.2989, 0.2885, -0.0090, 0.7600, 3.8814, 2.1603, 0.0000]]) expected_gt_labels = np.array([ 6, 6, 4, 9, 11, 11, 10, 0, 15, 17, 17, 17, 3, 12, 4, 4, 14, 1, 0, 0, 0, 0, 0, 0, 5, 5, 5 ]) expected_pts_semantic_mask = np.array([0, 18, 18, 18, 18]) expected_pts_instance_mask = np.array([44, 22, 10, 10, 57]) original_classes = scannet_dataset.CLASSES assert scannet_dataset.CLASSES == class_names assert torch.allclose(points, expected_points, 1e-2) assert gt_bboxes_3d.tensor[:5].shape == (5, 7) assert torch.allclose(gt_bboxes_3d.tensor[:5], expected_gt_bboxes_3d, 1e-2) assert np.all(gt_labels.numpy() == expected_gt_labels) assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask) assert np.all(pts_instance_mask.numpy() == expected_pts_instance_mask) assert original_classes == class_names scannet_dataset = ScanNetDataset( root_path, ann_file, pipeline=None, classes=['cabinet', 'bed']) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'bed'] scannet_dataset = ScanNetDataset( root_path, ann_file, pipeline=None, classes=('cabinet', 'bed')) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ('cabinet', 'bed') # Test load classes from file with tempfile.TemporaryDirectory() as tmpdir: path = tmpdir + 'classes.txt' with open(path, 'w') as f: f.write('cabinet\nbed\n') scannet_dataset = ScanNetDataset( root_path, ann_file, pipeline=None, classes=path) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'bed'] def test_evaluate(): if not torch.cuda.is_available(): pytest.skip() root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetDataset(root_path, ann_file) results = [] pred_boxes = dict() pred_boxes['boxes_3d'] = DepthInstance3DBoxes( torch.tensor([[ 1.4813e+00, 3.5207e+00, 1.5704e+00, 1.7445e+00, 2.3196e-01, 5.7235e-01, 0.0000e+00 ], [ 2.9040e+00, -3.4803e+00, 1.1911e+00, 6.6078e-01, 1.7072e-01, 6.7154e-01, 0.0000e+00 ], [ 1.1466e+00, 2.1987e+00, 9.2576e-03, 5.4184e-01, 2.5346e+00, 1.2145e+00, 0.0000e+00 ], [ 2.9168e+00, 2.5016e+00, 8.2875e-01, 6.1697e-01, 1.8428e+00, 2.8697e-01, 0.0000e+00 ], [ -3.3114e+00, -1.3351e-02, -8.9524e-03, 4.4082e-01, 3.8582e+00, 2.1603e+00, 0.0000e+00 ], [ -2.0135e+00, -3.4857e+00, 9.3848e-01, 1.9911e+00, 2.1603e-01, 1.2767e+00, 0.0000e+00 ], [ -2.1945e+00, -3.1402e+00, -3.8165e-02, 1.4801e+00, 6.8676e-01, 1.0586e+00, 0.0000e+00 ], [ -2.7553e+00, 2.4055e+00, -2.9972e-02, 1.4764e+00, 1.4927e+00, 2.3380e+00, 0.0000e+00 ]])) pred_boxes['labels_3d'] = torch.tensor([6, 6, 4, 9, 11, 11]) pred_boxes['scores_3d'] = torch.tensor([0.5, 1.0, 1.0, 1.0, 1.0, 0.5]) results.append(pred_boxes) metric = [0.25, 0.5] ret_dict = scannet_dataset.evaluate(results, metric) assert abs(ret_dict['table_AP_0.25'] - 0.3333) < 0.01 assert abs(ret_dict['window_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['counter_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['curtain_AP_0.25'] - 1.0) < 0.01 # test evaluate with pipeline class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, load_dim=6, use_dim=[0, 1, 2]), dict(type='GlobalAlignment', rotation_axis=2), dict( type='DefaultFormatBundle3D', class_names=class_names, with_label=False), dict(type='Collect3D', keys=['points']) ] ret_dict = scannet_dataset.evaluate( results, metric, pipeline=eval_pipeline) assert abs(ret_dict['table_AP_0.25'] - 0.3333) < 0.01 assert abs(ret_dict['window_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['counter_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['curtain_AP_0.25'] - 1.0) < 0.01 def test_show(): tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetDataset(root_path, ann_file) boxes_3d = DepthInstance3DBoxes( torch.tensor([[ -2.4053e+00, 9.2295e-01, 8.0661e-02, 2.4054e+00, 2.1468e+00, 8.5990e-01, 0.0000e+00 ], [ -1.9341e+00, -2.0741e+00, 3.0698e-03, 3.2206e-01, 2.5322e-01, 3.5144e-01, 0.0000e+00 ], [ -3.6908e+00, 8.0684e-03, 2.6201e-01, 4.1515e-01, 7.6489e-01, 5.3585e-01, 0.0000e+00 ], [ 2.6332e+00, 8.5143e-01, -4.9964e-03, 3.0367e-01, 1.3448e+00, 1.8329e+00, 0.0000e+00 ], [ 2.0221e-02, 2.6153e+00, 1.5109e-02, 7.3335e-01, 1.0429e+00, 1.0251e+00, 0.0000e+00 ]])) scores_3d = torch.tensor( [1.2058e-04, 2.3012e-03, 6.2324e-06, 6.6139e-06, 6.7965e-05]) labels_3d = torch.tensor([0, 0, 0, 0, 0]) result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d) results = [result] scannet_dataset.show(results, temp_dir, show=False) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() # show function with pipeline class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, load_dim=6, use_dim=[0, 1, 2]), dict(type='GlobalAlignment', rotation_axis=2), dict( type='DefaultFormatBundle3D', class_names=class_names, with_label=False), dict(type='Collect3D', keys=['points']) ] tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name scannet_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() def test_seg_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') palette = [ [174, 199, 232], [152, 223, 138], [31, 119, 180], [255, 187, 120], [188, 189, 34], [140, 86, 75], [255, 152, 150], [214, 39, 40], [197, 176, 213], [148, 103, 189], [196, 156, 148], [23, 190, 207], [247, 182, 210], [219, 219, 141], [255, 127, 14], [158, 218, 229], [44, 160, 44], [112, 128, 144], [227, 119, 194], [82, 84, 163], ] scene_idxs = [0 for _ in range(20)] # test network inputs are (xyz, rgb, normalized_xyz) pipelines = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict( type='IndoorPatchPointSample', num_points=5, block_size=1.5, ignore_index=len(class_names), use_normalized_coord=True, enlarge_size=0.2, min_unique_num=None), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=['points', 'pts_semantic_mask'], meta_keys=['file_name', 'sample_idx']) ] scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=pipelines, classes=None, palette=None, modality=None, test_mode=False, ignore_index=None, scene_idxs=scene_idxs) data = scannet_dataset[0] points = data['points']._data pts_semantic_mask = data['pts_semantic_mask']._data file_name = data['img_metas']._data['file_name'] sample_idx = data['img_metas']._data['sample_idx'] assert file_name == './tests/data/scannet/points/scene0000_00.bin' assert sample_idx == 'scene0000_00' expected_points = torch.tensor([[ 0.0000, 0.0000, 1.2427, 0.6118, 0.5529, 0.4471, -0.6462, -1.0046, 0.4280 ], [ 0.1553, -0.0074, 1.6077, 0.5882, 0.6157, 0.5569, -0.6001, -1.0068, 0.5537 ], [ 0.1518, 0.6016, 0.6548, 0.1490, 0.1059, 0.0431, -0.6012, -0.8309, 0.2255 ], [ -0.7494, 0.1033, 0.6756, 0.5216, 0.4353, 0.3333, -0.8687, -0.9748, 0.2327 ], [ -0.6836, -0.0203, 0.5884, 0.5765, 0.5020, 0.4510, -0.8491, -1.0105, 0.2027 ]]) expected_pts_semantic_mask = np.array([13, 13, 12, 2, 0]) original_classes = scannet_dataset.CLASSES original_palette = scannet_dataset.PALETTE assert scannet_dataset.CLASSES == class_names assert scannet_dataset.ignore_index == 20 assert torch.allclose(points, expected_points, 1e-2) assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask) assert original_classes == class_names assert original_palette == palette assert scannet_dataset.scene_idxs.dtype == np.int32 assert np.all(scannet_dataset.scene_idxs == np.array(scene_idxs)) # test network inputs are (xyz, rgb) np.random.seed(0) new_pipelines = copy.deepcopy(pipelines) new_pipelines[3] = dict( type='IndoorPatchPointSample', num_points=5, block_size=1.5, ignore_index=len(class_names), use_normalized_coord=False, enlarge_size=0.2, min_unique_num=None) scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=new_pipelines, scene_idxs=scene_idxs) data = scannet_dataset[0] points = data['points']._data assert torch.allclose(points, expected_points[:, :6], 1e-2) # test network inputs are (xyz, normalized_xyz) np.random.seed(0) new_pipelines = copy.deepcopy(pipelines) new_pipelines[0] = dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=False, load_dim=6, use_dim=[0, 1, 2]) new_pipelines.remove(new_pipelines[4]) scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=new_pipelines, scene_idxs=scene_idxs) data = scannet_dataset[0] points = data['points']._data assert torch.allclose(points, expected_points[:, [0, 1, 2, 6, 7, 8]], 1e-2) # test network inputs are (xyz,) np.random.seed(0) new_pipelines = copy.deepcopy(pipelines) new_pipelines[0] = dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=False, load_dim=6, use_dim=[0, 1, 2]) new_pipelines[3] = dict( type='IndoorPatchPointSample', num_points=5, block_size=1.5, ignore_index=len(class_names), use_normalized_coord=False, enlarge_size=0.2, min_unique_num=None) new_pipelines.remove(new_pipelines[4]) scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=new_pipelines, scene_idxs=scene_idxs) data = scannet_dataset[0] points = data['points']._data assert torch.allclose(points, expected_points[:, :3], 1e-2) # test dataset with selected classes scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=None, classes=['cabinet', 'chair'], scene_idxs=scene_idxs) label_map = {i: 20 for i in range(41)} label_map.update({3: 0, 5: 1}) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'chair'] assert scannet_dataset.PALETTE == [palette[2], palette[4]] assert scannet_dataset.VALID_CLASS_IDS == [3, 5] assert scannet_dataset.label_map == label_map assert scannet_dataset.label2cat == {0: 'cabinet', 1: 'chair'} # test load classes from file with tempfile.TemporaryDirectory() as tmpdir: path = tmpdir + 'classes.txt' with open(path, 'w') as f: f.write('cabinet\nchair\n') scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=None, classes=path, scene_idxs=scene_idxs) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'chair'] assert scannet_dataset.PALETTE == [palette[2], palette[4]] assert scannet_dataset.VALID_CLASS_IDS == [3, 5] assert scannet_dataset.label_map == label_map assert scannet_dataset.label2cat == {0: 'cabinet', 1: 'chair'} # test scene_idxs in dataset # we should input scene_idxs in train mode with pytest.raises(NotImplementedError): scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=None, scene_idxs=None) # test mode scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=None, test_mode=True, scene_idxs=scene_idxs) assert np.all(scannet_dataset.scene_idxs == np.array([0])) def test_seg_evaluate(): if not torch.cuda.is_available(): pytest.skip() root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, test_mode=True) results = [] pred_sem_mask = dict( semantic_mask=torch.tensor([ 13, 5, 1, 2, 6, 2, 13, 1, 14, 2, 0, 0, 5, 5, 3, 0, 1, 14, 0, 0, 0, 18, 6, 15, 13, 0, 2, 4, 0, 3, 16, 6, 13, 5, 13, 0, 0, 0, 0, 1, 7, 3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results.append(pred_sem_mask) class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) ] ret_dict = scannet_dataset.evaluate(results, pipeline=eval_pipeline) assert abs(ret_dict['miou'] - 0.5308) < 0.01 assert abs(ret_dict['acc'] - 0.8219) < 0.01 assert abs(ret_dict['acc_cls'] - 0.7649) < 0.01 def test_seg_show(): tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, scene_idxs=[0]) result = dict( semantic_mask=torch.tensor([ 13, 5, 1, 2, 6, 2, 13, 1, 14, 2, 0, 0, 5, 5, 3, 0, 1, 14, 0, 0, 0, 18, 6, 15, 13, 0, 2, 4, 0, 3, 16, 6, 13, 5, 13, 0, 0, 0, 0, 1, 7, 3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results = [result] scannet_dataset.show(results, temp_dir, show=False) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() # test show with pipeline tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) ] scannet_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() def test_seg_format_results(): root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, test_mode=True) results = [] pred_sem_mask = dict( semantic_mask=torch.tensor([ 13, 5, 1, 2, 6, 2, 13, 1, 14, 2, 0, 0, 5, 5, 3, 0, 1, 14, 0, 0, 0, 18, 6, 15, 13, 0, 2, 4, 0, 3, 16, 6, 13, 5, 13, 0, 0, 0, 0, 1, 7, 3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results.append(pred_sem_mask) result_files, tmp_dir = scannet_dataset.format_results(results) expected_label = np.array([ 16, 6, 2, 3, 7, 3, 16, 2, 24, 3, 1, 1, 6, 6, 4, 1, 2, 24, 1, 1, 1, 36, 7, 28, 16, 1, 3, 5, 1, 4, 33, 7, 16, 6, 16, 1, 1, 1, 1, 2, 8, 4, 39, 14, 9, 1, 12, 1, 1, 2, 3, 16, 34, 2, 2, 2, 7, 3, 16, 39, 5, 34, 1, 24, 2, 8, 3, 2, 8, 3, 1, 6, 34, 6, 1, 1, 4, 7, 6, 12, 2, 16, 16, 3, 4, 2, 1, 16, 39, 2, 24, 6, 4, 2, 16, 2, 3, 4, 3, 2 ]) expected_txt_path = osp.join(tmp_dir.name, 'results', 'scene0000_00.txt') assert np.all(result_files[0]['seg_mask'] == expected_label) mmcv.check_file_exist(expected_txt_path) def test_instance_seg_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') train_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=True, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=['points', 'pts_semantic_mask', 'pts_instance_mask']) ] scannet_dataset = ScanNetInstanceSegDataset( data_root=root_path, ann_file=ann_file, pipeline=train_pipeline, classes=class_names, test_mode=False) expected_points = torch.tensor([[ -3.4742e+00, 7.8792e-01, 1.7397e+00, 3.3725e-01, 3.5294e-01, 3.0588e-01 ], [ 2.7216e+00, 3.4164e+00, 2.4572e+00, 6.6275e-01, 6.2745e-01, 5.1373e-01 ], [ 1.3404e+00, -1.4675e+00, -4.4059e-02, 3.8431e-01, 3.6078e-01, 3.5686e-01 ], [ -3.0335e+00, 2.7273e+00, 1.5181e+00, 2.3137e-01, 1.6078e-01, 8.2353e-02 ], [ -4.3207e-01, 1.8154e+00, 1.7455e-01, 4.0392e-01, 3.8039e-01, 4.1961e-01 ]]) data = scannet_dataset[0] points = data['points']._data[:5] pts_semantic_mask = data['pts_semantic_mask']._data[:5] pts_instance_mask = data['pts_instance_mask']._data[:5] expected_semantic_mask = np.array([11, 18, 18, 0, 4]) expected_instance_mask = np.array([6, 56, 10, 9, 35]) assert torch.allclose(points, expected_points, 1e-2) assert np.all(pts_semantic_mask.numpy() == expected_semantic_mask) assert np.all(pts_instance_mask.numpy() == expected_instance_mask) def test_instance_seg_evaluate(): root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') test_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points']) ] scannet_dataset = ScanNetInstanceSegDataset( data_root=root_path, ann_file=ann_file, pipeline=test_pipeline, test_mode=True) pred_mask = torch.tensor([ 1, -1, -1, -1, 7, 11, 2, -1, 1, 10, -1, -1, 5, -1, -1, -1, -1, 1, -1, -1, -1, -1, 0, -1, 1, -1, 12, -1, -1, -1, 8, 5, 1, 5, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 8, -1, -1, -1, 0, 4, 3, -1, 9, -1, -1, 6, -1, -1, -1, -1, 13, -1, -1, 5, -1, 5, -1, -1, 9, 0, 5, -1, -1, 2, 3, 4, -1, -1, -1, 2, -1, -1, -1, 5, 9, -1, 1, -1, 4, 10, 4, -1 ]).long() pred_labels = torch.tensor( [4, 11, 11, 10, 0, 3, 12, 4, 14, 1, 0, 0, 0, 5, 5]).long() pred_scores = torch.tensor([.99 for _ in range(len(pred_labels))]) results = [ dict( instance_mask=pred_mask, instance_label=pred_labels, instance_score=torch.tensor(pred_scores)) ] eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=True, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=['points', 'pts_semantic_mask', 'pts_instance_mask']) ] # We add options here as default min_region_size # is much bigger than test instances. ret_dict = scannet_dataset.evaluate( results, pipeline=eval_pipeline, options=dict(min_region_sizes=np.array([1]))) assert abs(ret_dict['all_ap'] - 0.90625) < 0.001 assert abs(ret_dict['all_ap_50%'] - 0.90625) < 0.001 assert abs(ret_dict['all_ap_25%'] - 0.94444) < 0.001 assert abs(ret_dict['classes']['cabinet']['ap25%'] - 1.0) < 0.001 assert abs(ret_dict['classes']['cabinet']['ap50%'] - 0.65625) < 0.001 assert abs(ret_dict['classes']['door']['ap25%'] - 0.5) < 0.001 assert abs(ret_dict['classes']['door']['ap50%'] - 0.5) < 0.001 def test_instance_seg_evaluate_v2(): root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') test_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points']) ]
scannet_dataset = ScanNetInstanceSegV2Dataset(
3
2023-12-21 12:50:35+00:00
16k
v3ucn/Bert-vits2-V2.2
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 384)\n\n self.empty_emo = torch.squeeze(\n torch.load(\"empty_emo.npy\", map_location=\"cpu\"), dim=1\n )\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, en_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n\n if np.random.rand() > 0.1:\n emo = torch.squeeze(\n torch.load(audiopath.replace(\".wav\", \".emo.npy\"), map_location=\"cpu\"),\n dim=1,\n )\n else:\n emo = self.empty_emo\n return (phones, spec, wav, sid, tone, language, bert, ja_bert, en_bert, emo)\n\n def get_audio(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n if config.train_ms_config.spec_cache:\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert_ori = torch.load(bert_path)\n assert bert_ori.shape[-1] == len(phone)\n except Exception as e:\n logger.warning(\"Bert load Failed\")\n logger.warning(e)\n\n if language_str == \"ZH\":\n bert = bert_ori\n ja_bert = torch.rand(1024, len(phone))\n en_bert = torch.rand(1024, len(phone))\n elif language_str == \"JP\":\n bert = torch.rand(1024, len(phone))\n ja_bert = bert_ori\n en_bert = torch.rand(1024, len(phone))\n elif language_str == \"EN\":\n bert = torch.rand(1024, len(phone))\n ja_bert = torch.rand(1024, len(phone))\n en_bert = bert_ori\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, en_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n en_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n emo = torch.FloatTensor(len(batch), 512)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n en_bert_padded.zero_()\n emo.zero_()\n\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n en_bert = row[8]\n en_bert_padded[i, :, : en_bert.size(1)] = en_bert\n\n emo[i, :] = row[9]\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n en_bert_padded,\n emo,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=4,\n flow_share_parameter=False,\n use_transformer_flow=True,\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n self.n_speakers,\n gin_channels=self.enc_gin_channels,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(\n self,\n x,\n x_lengths,\n y,\n y_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n emo=None,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask, loss_commit = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, emo, sid, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n g,\n loss_commit,\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n emo=None,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask, _ = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, emo, sid, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import generator_loss, discriminator_loss, feature_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
11,641
epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, emo, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) emo = emo.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), g, loss_commit, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, emo, )
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 torch.backends.cuda.enable_math_sdp(True) global_step = 0 def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) backend = "nccl" if platform.system() == "Windows": backend = "gloo" # If Windows,switch to gloo backend. dist.init_process_group( backend=backend, init_method="env://", timeout=datetime.timedelta(seconds=300), ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=min(config.train_ms_config.num_workers, os.cpu_count() - 1), shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) if getattr(hps.train, "freeze_ZH_bert", False): print("Freezing ZH bert encoder !!!") for param in net_g.enc_p.bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_EN_bert", False): print("Freezing EN bert encoder !!!") for param in net_g.enc_p.en_bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_JP_bert", False): print("Freezing JP bert encoder !!!") for param in net_g.enc_p.ja_bert_proj.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[local_rank], bucket_cap_mb=512) net_d = DDP(net_d, device_ids=[local_rank], bucket_cap_mb=512) dur_resume_lr = None if net_dur_disc is not None: net_dur_disc = DDP( net_dur_disc, device_ids=[local_rank], find_unused_parameters=True, bucket_cap_mb=512, ) # 下载底模 if config.train_ms_config.base["use_base_model"]: utils.download_checkpoint( hps.model_dir, config.train_ms_config.base, token=config.openi_token, mirror=config.mirror, ) try: if net_dur_disc is not None: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr epoch_str = max(epoch_str, 1) # global_step = (epoch_str - 1) * len(train_loader) global_step = int( utils.get_steps(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth")) ) print( f"******************检测到模型存在,epoch为 {epoch_str},gloabl step为 {global_step}*********************" ) except Exception as e: print(e) epoch_str = 1 global_step = 0 scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, emo, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) emo = emo.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), g, loss_commit, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, emo, )
mel = spec_to_mel_torch(
12
2023-12-18 04:54:46+00:00
16k
m-abr/FCPCodebase
scripts/gyms/Basic_Run.py
[ { "identifier": "Base_Agent", "path": "agent/Base_Agent.py", "snippet": "class Base_Agent():\n all_agents = []\n\n def __init__(self, host:str, agent_port:int, monitor_port:int, unum:int, robot_type:int, team_name:str, enable_log:bool=True,\n enable_draw:bool=True, apply_play_mode_correction:bool=True, wait_for_server:bool=True, hear_callback=None) -> None:\n\n self.radio = None # hear_message may be called during Server_Comm instantiation\n self.logger = Logger(enable_log, f\"{team_name}_{unum}\")\n self.world = World(robot_type, team_name, unum, apply_play_mode_correction, enable_draw, self.logger, host)\n self.world_parser = World_Parser(self.world, self.hear_message if hear_callback is None else hear_callback)\n self.scom = Server_Comm(host,agent_port,monitor_port,unum,robot_type,team_name,self.world_parser,self.world,Base_Agent.all_agents,wait_for_server)\n self.inv_kinematics = Inverse_Kinematics(self.world.robot)\n self.behavior = Behavior(self)\n self.path_manager = Path_Manager(self.world)\n self.radio = Radio(self.world, self.scom.commit_announcement)\n self.behavior.create_behaviors()\n Base_Agent.all_agents.append(self)\n\n @abstractmethod\n def think_and_send(self):\n pass\n\n def hear_message(self, msg:bytearray, direction, timestamp:float) -> None:\n if direction != \"self\" and self.radio is not None:\n self.radio.receive(msg)\n\n def terminate(self):\n # close shared monitor socket if this is the last agent on this thread\n self.scom.close(close_monitor_socket=(len(Base_Agent.all_agents)==1))\n Base_Agent.all_agents.remove(self)\n\n @staticmethod\n def terminate_all():\n for o in Base_Agent.all_agents:\n o.scom.close(True) # close shared monitor socket, if it exists\n Base_Agent.all_agents = []" }, { "identifier": "Step", "path": "behaviors/custom/Step/Step.py", "snippet": "class Step():\n\n def __init__(self, base_agent : Base_Agent) -> None:\n self.world = base_agent.world\n self.ik = base_agent.inv_kinematics\n self.description = \"Step (Skill-Set-Primitive)\"\n self.auto_head = True\n\n nao_specs = self.ik.NAO_SPECS\n self.leg_length = nao_specs[1] + nao_specs[3] # upper leg height + lower leg height\n\n feet_y_dev = nao_specs[0] * 1.2 # wider step\n sample_time = self.world.robot.STEPTIME\n max_ankle_z = nao_specs[5]\n\n # Initialize step generator with constants\n self.step_generator = Step_Generator(feet_y_dev, sample_time, max_ankle_z)\n\n\n def execute(self,reset, ts_per_step=7, z_span=0.03, z_max=0.8):\n\n lfy,lfz,rfy,rfz = self.step_generator.get_target_positions(reset, ts_per_step, z_span, self.leg_length * z_max)\n \n #----------------- Apply IK to each leg + Set joint targets\n \n # Left leg \n indices, self.values_l, error_codes = self.ik.leg((0,lfy,lfz), (0,0,0), True, dynamic_pose=False)\n for i in error_codes:\n print(f\"Joint {i} is out of range!\" if i!=-1 else \"Position is out of reach!\")\n\n self.world.robot.set_joints_target_position_direct(indices, self.values_l)\n\n # Right leg\n indices, self.values_r, error_codes = self.ik.leg((0,rfy,rfz), (0,0,0), False, dynamic_pose=False)\n for i in error_codes:\n print(f\"Joint {i} is out of range!\" if i!=-1 else \"Position is out of reach!\")\n\n self.world.robot.set_joints_target_position_direct(indices, self.values_r)\n\n # ----------------- Fixed arms\n\n indices = [14,16,18,20]\n values = np.array([-80,20,90,0])\n self.world.robot.set_joints_target_position_direct(indices,values)\n\n indices = [15,17,19,21]\n values = np.array([-80,20,90,0])\n self.world.robot.set_joints_target_position_direct(indices,values)\n\n return False\n \n\n def is_ready(self):\n ''' Returns True if Step Behavior is ready to start under current game/robot conditions '''\n return True" }, { "identifier": "Draw", "path": "world/commons/Draw.py", "snippet": "class Draw():\n _socket = None\n\n def __init__(self, is_enabled:bool, unum:int, host:str, port:int) -> None:\n self.enabled = is_enabled \n self._is_team_right = None\n self._unum = unum \n self._prefix = f'?{unum}_'.encode() # temporary prefix that should never be used in normal circumstances\n \n #Create one socket for all instances\n if Draw._socket is None:\n Draw._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM )\n Draw._socket.connect((host, port))\n Draw.clear_all()\n\n\n def set_team_side(self, is_right):\n ''' Called by world parser to switch side '''\n '''\n Generate an appropriate player ID\n RoboViz has a bug/feature: we send \"swap buffers for player: 'l_1' and RoboViz\n will swap every buffer that contains 'l_1' in the name, including \n 'l_10' and 'l_11'. To avoid that, we swap the separator to 'l-10', 'l-11'\n '''\n self._is_team_right = is_right\n self._prefix = f\"{'r' if is_right else 'l'}{'_' if self._unum < 10 else '-'}{self._unum}_\".encode() #e.g. b'l_5', b'l-10'\n\n\n @staticmethod\n def _send(msg, id, flush):\n ''' Private method to send message if RoboViz is accessible '''\n try:\n if flush:\n Draw._socket.send(msg + id + b'\\x00\\x00\\x00' + id + b'\\x00')\n else:\n Draw._socket.send(msg + id + b'\\x00')\n except ConnectionRefusedError:\n pass\n\n \n def circle(self, pos2d, radius, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw circle\n\n Examples\n ----------\n Circle in 2D (z=0): circle((-1,2), 3, 2, Draw.Color.red, \"my_circle\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos2d).any(), \"Argument 'pos2d' contains 'nan' values\"\n\n if self._is_team_right:\n pos2d = (-pos2d[0],-pos2d[1]) \n\n msg = b'\\x01\\x00' + (\n f'{f\"{pos2d[0] :.4f}\":.6s}'\n f'{f\"{pos2d[1] :.4f}\":.6s}'\n f'{f\"{radius :.4f}\":.6s}'\n f'{f\"{thickness :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def line(self, p1, p2, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw line\n\n Examples\n ----------\n Line in 3D: line((0,0,0), (0,0,2), 3, Draw.Color.red, \"my_line\") \n Line in 2D (z=0): line((0,0), (0,1), 3, Draw.Color.red, \"my_line\") \n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(p1).any(), \"Argument 'p1' contains 'nan' values\"\n assert not np.isnan(p2).any(), \"Argument 'p2' contains 'nan' values\"\n\n z1 = p1[2] if len(p1)==3 else 0\n z2 = p2[2] if len(p2)==3 else 0\n\n if self._is_team_right: \n p1 = (-p1[0],-p1[1],p1[2]) if len(p1)==3 else (-p1[0],-p1[1])\n p2 = (-p2[0],-p2[1],p2[2]) if len(p2)==3 else (-p2[0],-p2[1])\n\n msg = b'\\x01\\x01' + (\n f'{f\"{p1[0] :.4f}\":.6s}'\n f'{f\"{p1[1] :.4f}\":.6s}'\n f'{f\"{z1 :.4f}\":.6s}'\n f'{f\"{p2[0] :.4f}\":.6s}'\n f'{f\"{p2[1] :.4f}\":.6s}'\n f'{f\"{z2 :.4f}\":.6s}'\n f'{f\"{thickness :.4f}\":.6s}').encode() + color\n\n Draw._send(msg, self._prefix + id.encode(), flush)\n \n\n def point(self, pos, size, color:bytes, id:str, flush=True):\n ''' \n Draw point\n\n Examples\n ----------\n Point in 3D: point((1,1,1), 3, Draw.Color.red, \"my_point\")\n Point in 2D (z=0): point((1,1), 3, Draw.Color.red, \"my_point\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos).any(), \"Argument 'pos' contains 'nan' values\"\n\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x01\\x02' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}'\n f'{f\"{size :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def sphere(self, pos, radius, color:bytes, id:str, flush=True):\n ''' \n Draw sphere\n\n Examples\n ----------\n Sphere in 3D: sphere((1,1,1), 3, Draw.Color.red, \"my_sphere\")\n Sphere in 2D (z=0): sphere((1,1), 3, Draw.Color.red, \"my_sphere\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos).any(), \"Argument 'pos' contains 'nan' values\"\n\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x01\\x03' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}'\n f'{f\"{radius :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def polygon(self, vertices, color:bytes, alpha:int, id:str, flush=True):\n ''' \n Draw polygon\n\n Examples\n ----------\n Polygon in 3D: polygon(((0,0,0),(1,0,0),(0,1,0)), Draw.Color.red, 255, \"my_polygon\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert 0<=alpha<=255, \"The alpha channel (degree of opacity) must be in range [0,255]\"\n\n if self._is_team_right: \n vertices = [(-v[0],-v[1],v[2]) for v in vertices]\n\n msg = b'\\x01\\x04' + bytes([len(vertices)]) + color + alpha.to_bytes(1,'big')\n\n for v in vertices:\n msg += (\n f'{f\"{v[0] :.4f}\":.6s}'\n f'{f\"{v[1] :.4f}\":.6s}'\n f'{f\"{v[2] :.4f}\":.6s}').encode()\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def annotation(self, pos, text, color:bytes, id:str, flush=True):\n ''' \n Draw annotation\n\n Examples\n ----------\n Annotation in 3D: annotation((1,1,1), \"SOMEtext!\", Draw.Color.red, \"my_annotation\")\n Annotation in 2D (z=0): annotation((1,1), \"SOMEtext!\", Draw.Color.red, \"my_annotation\")\n '''\n if not self.enabled: return\n if type(text) != bytes: text = str(text).encode()\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x02\\x00' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}').encode() + color + text + b'\\x00'\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n \n def arrow(self, p1, p2, arrowhead_size, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw arrow\n\n Examples\n ----------\n Arrow in 3D: arrow((0,0,0), (0,0,2), 0.1, 3, Draw.Color.red, \"my_arrow\")\n Arrow in 2D (z=0): arrow((0,0), (0,1), 0.1, 3, Draw.Color.red, \"my_arrow\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n\n # No need to invert sides, the called shapes will handle that\n if len(p1)==2: p1 = M.to_3d(p1) \n else: p1 = np.asarray(p1)\n if len(p2)==2: p2 = M.to_3d(p2) \n else: p2 = np.asarray(p2)\n\n vec = p2-p1\n vec_size = np.linalg.norm(vec)\n if vec_size == 0: return #return without warning/error\n if arrowhead_size > vec_size: arrowhead_size = vec_size\n\n ground_proj_perpendicular = np.array([ vec[1], -vec[0], 0 ])\n\n if np.all(ground_proj_perpendicular == 0): #vertical arrow\n ground_proj_perpendicular = np.array([ arrowhead_size/2, 0, 0 ])\n else:\n ground_proj_perpendicular *= arrowhead_size/2 / np.linalg.norm(ground_proj_perpendicular)\n\n head_start = p2 - vec * (arrowhead_size/vec_size)\n head_pt1 = head_start + ground_proj_perpendicular\n head_pt2 = head_start - ground_proj_perpendicular\n\n self.line(p1,p2,thickness,color,id,False)\n self.line(p2,head_pt1,thickness,color,id,False)\n self.line(p2,head_pt2,thickness,color,id,flush)\n\n\n def flush(self, id):\n ''' Flush specific drawing by ID '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix + id.encode(), False)\n\n def clear(self, id):\n ''' Clear specific drawing by ID '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix + id.encode(), True) #swap buffer twice\n\n\n def clear_player(self):\n ''' Clear all drawings made by this player '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix, True) #swap buffer twice\n\n\n @staticmethod\n def clear_all():\n ''' Clear all drawings of all players '''\n if Draw._socket is not None:\n Draw._send(b'\\x00\\x00\\x00\\x00\\x00',b'',False) #swap buffer twice using no id\n\n\n class Color():\n '''\n Based on X11 colors\n The names are restructured to make better suggestions\n '''\n pink_violet = b'\\xC7\\x15\\x85'\n pink_hot = b'\\xFF\\x14\\x93'\n pink_violet_pale = b'\\xDB\\x70\\x93'\n pink = b'\\xFF\\x69\\xB4'\n pink_pale = b'\\xFF\\xB6\\xC1'\n \n red_dark = b'\\x8B\\x00\\x00'\n red = b'\\xFF\\x00\\x00'\n red_brick = b'\\xB2\\x22\\x22'\n red_crimson = b'\\xDC\\x14\\x3C'\n red_indian = b'\\xCD\\x5C\\x5C'\n red_salmon = b'\\xFA\\x80\\x72'\n\n orange_red = b'\\xFF\\x45\\x00'\n orange = b'\\xFF\\x8C\\x00'\n orange_ligth = b'\\xFF\\xA5\\x00'\n\n yellow_gold = b'\\xFF\\xD7\\x00'\n yellow = b'\\xFF\\xFF\\x00'\n yellow_light = b'\\xBD\\xB7\\x6B'\n\n brown_maroon =b'\\x80\\x00\\x00'\n brown_dark = b'\\x8B\\x45\\x13'\n brown = b'\\xA0\\x52\\x2D'\n brown_gold = b'\\xB8\\x86\\x0B'\n brown_light = b'\\xCD\\x85\\x3F'\n brown_pale = b'\\xDE\\xB8\\x87'\n\n green_dark = b'\\x00\\x64\\x00' \n green = b'\\x00\\x80\\x00' \n green_lime = b'\\x32\\xCD\\x32' \n green_light = b'\\x00\\xFF\\x00' \n green_lawn = b'\\x7C\\xFC\\x00' \n green_pale = b'\\x90\\xEE\\x90' \n\n cyan_dark = b'\\x00\\x80\\x80' \n cyan_medium = b'\\x00\\xCE\\xD1' \n cyan = b'\\x00\\xFF\\xFF' \n cyan_light = b'\\xAF\\xEE\\xEE'\n\n blue_dark = b'\\x00\\x00\\x8B' \n blue = b'\\x00\\x00\\xFF' \n blue_royal = b'\\x41\\x69\\xE1' \n blue_medium = b'\\x1E\\x90\\xFF' \n blue_light = b'\\x00\\xBF\\xFF'\n blue_pale = b'\\x87\\xCE\\xEB'\n\n purple_violet = b'\\x94\\x00\\xD3' \n purple_magenta = b'\\xFF\\x00\\xFF' \n purple_light = b'\\xBA\\x55\\xD3' \n purple_pale = b'\\xDD\\xA0\\xDD'\n\n white = b'\\xFF\\xFF\\xFF'\n gray_10 = b'\\xE6\\xE6\\xE6'\n gray_20 = b'\\xCC\\xCC\\xCC'\n gray_30 = b'\\xB2\\xB2\\xB2' \n gray_40 = b'\\x99\\x99\\x99'\n gray_50 = b'\\x80\\x80\\x80'\n gray_60 = b'\\x66\\x66\\x66'\n gray_70 = b'\\x4C\\x4C\\x4C'\n gray_80 = b'\\x33\\x33\\x33'\n gray_90 = b'\\x1A\\x1A\\x1A'\n black = b'\\x00\\x00\\x00' \n\n @staticmethod\n def get(r,g,b):\n ''' Get RGB color (0-255) '''\n return bytes([int(r),int(g),int(b)])" }, { "identifier": "Server", "path": "scripts/commons/Server.py", "snippet": "class Server():\n def __init__(self, first_server_p, first_monitor_p, n_servers) -> None:\n try:\n import psutil\n self.check_running_servers(psutil, first_server_p, first_monitor_p, n_servers)\n except ModuleNotFoundError:\n print(\"Info: Cannot check if the server is already running, because the psutil module was not found\")\n \n self.first_server_p = first_server_p\n self.n_servers = n_servers\n self.rcss_processes = []\n\n # makes it easier to kill test servers without affecting train servers\n cmd = \"simspark\" if n_servers == 1 else \"rcssserver3d\"\n for i in range(n_servers):\n self.rcss_processes.append(\n subprocess.Popen((f\"{cmd} --agent-port {first_server_p+i} --server-port {first_monitor_p+i}\").split(),\n stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, start_new_session=True)\n )\n\n def check_running_servers(self, psutil, first_server_p, first_monitor_p, n_servers):\n ''' Check if any server is running on chosen ports '''\n found = False\n p_list = [p for p in psutil.process_iter() if p.cmdline() and p.name() in [\"rcssserver3d\",\"simspark\"]]\n range1 = (first_server_p, first_server_p + n_servers)\n range2 = (first_monitor_p,first_monitor_p + n_servers)\n bad_processes = []\n\n for p in p_list: \n # currently ignoring remaining default port when only one of the ports is specified (uncommon scenario)\n ports = [int(arg) for arg in p.cmdline()[1:] if arg.isdigit()]\n if len(ports) == 0:\n ports = [3100,3200] # default server ports (changing this is unlikely)\n\n conflicts = [str(port) for port in ports if (\n (range1[0] <= port < range1[1]) or (range2[0] <= port < range2[1]) )]\n\n if len(conflicts)>0:\n if not found:\n print(\"\\nThere are already servers running on the same port(s)!\")\n found = True\n bad_processes.append(p)\n print(f\"Port(s) {','.join(conflicts)} already in use by \\\"{' '.join(p.cmdline())}\\\" (PID:{p.pid})\")\n\n if found:\n print()\n while True:\n inp = input(\"Enter 'kill' to kill these processes or ctrl+c to abort. \")\n if inp == \"kill\":\n for p in bad_processes:\n p.kill()\n return\n \n\n def kill(self):\n for p in self.rcss_processes:\n p.kill()\n print(f\"Killed {self.n_servers} rcssserver3d processes starting at {self.first_server_p}\")" }, { "identifier": "Train_Base", "path": "scripts/commons/Train_Base.py", "snippet": "class Train_Base():\n def __init__(self, script) -> None:\n '''\n When training with multiple environments (multiprocessing):\n The server port is incremented as follows:\n self.server_p, self.server_p+1, self.server_p+2, ...\n We add +1000 to the initial monitor port, so than we can have more than 100 environments:\n self.monitor_p+1000, self.monitor_p+1001, self.monitor_p+1002, ...\n When testing we use self.server_p and self.monitor_p\n '''\n\n args = script.args\n self.script = script\n self.ip = args.i\n self.server_p = args.p # (initial) server port\n self.monitor_p = args.m # monitor port when testing\n self.monitor_p_1000 = args.m + 1000 # initial monitor port when training\n self.robot_type = args.r\n self.team = args.t\n self.uniform = args.u\n self.cf_last_time = 0\n self.cf_delay = 0\n self.cf_target_period = World.STEPTIME # target simulation speed while testing (default: real-time)\n\n @staticmethod\n def prompt_user_for_model():\n\n gyms_logs_path = \"./scripts/gyms/logs/\"\n folders = [f for f in listdir(gyms_logs_path) if isdir(join(gyms_logs_path, f))]\n folders.sort(key=lambda f: os.path.getmtime(join(gyms_logs_path, f)), reverse=True) # sort by modification date\n\n while True:\n try:\n folder_name = UI.print_list(folders,prompt=\"Choose folder (ctrl+c to return): \")[1]\n except KeyboardInterrupt:\n print()\n return None # ctrl+c\n\n folder_dir = os.path.join(gyms_logs_path, folder_name)\n models = [m[:-4] for m in listdir(folder_dir) if isfile(join(folder_dir, m)) and m.endswith(\".zip\")]\n\n if not models:\n print(\"The chosen folder does not contain any .zip file!\")\n continue\n\n models.sort(key=lambda m: os.path.getmtime(join(folder_dir, m+\".zip\")), reverse=True) # sort by modification date\n \n try:\n model_name = UI.print_list(models,prompt=\"Choose model (ctrl+c to return): \")[1]\n break\n except KeyboardInterrupt:\n print()\n\n return {\"folder_dir\":folder_dir, \"folder_name\":folder_name, \"model_file\":os.path.join(folder_dir, model_name+\".zip\")}\n\n\n def control_fps(self, read_input = False):\n ''' Add delay to control simulation speed '''\n\n if read_input:\n speed = input()\n if speed == '':\n self.cf_target_period = 0\n print(f\"Changed simulation speed to MAX\")\n else:\n if speed == '0':\n inp = input(\"Paused. Set new speed or '' to use previous speed:\")\n if inp != '':\n speed = inp \n\n try:\n speed = int(speed)\n assert speed >= 0\n self.cf_target_period = World.STEPTIME * 100 / speed\n print(f\"Changed simulation speed to {speed}%\")\n except:\n print(\"\"\"Train_Base.py: \n Error: To control the simulation speed, enter a non-negative integer.\n To disable this control module, use test_model(..., enable_FPS_control=False) in your gym environment.\"\"\")\n\n now = time.time()\n period = now - self.cf_last_time\n self.cf_last_time = now\n self.cf_delay += (self.cf_target_period - period)*0.9\n if self.cf_delay > 0:\n time.sleep(self.cf_delay)\n else:\n self.cf_delay = 0\n\n\n def test_model(self, model:BaseAlgorithm, env, log_path:str=None, model_path:str=None, max_episodes=0, enable_FPS_control=True, verbose=1):\n '''\n Test model and log results\n\n Parameters\n ----------\n model : BaseAlgorithm\n Trained model \n env : Env\n Gym-like environment\n log_path : str\n Folder where statistics file is saved, default is `None` (no file is saved)\n model_path : str\n Folder where it reads evaluations.npz to plot it and create evaluations.csv, default is `None` (no plot, no csv)\n max_episodes : int\n Run tests for this number of episodes\n Default is 0 (run until user aborts)\n verbose : int\n 0 - no output (except if enable_FPS_control=True)\n 1 - print episode statistics\n '''\n\n if model_path is not None:\n assert os.path.isdir(model_path), f\"{model_path} is not a valid path\"\n self.display_evaluations(model_path)\n\n if log_path is not None:\n assert os.path.isdir(log_path), f\"{log_path} is not a valid path\"\n\n # If file already exists, don't overwrite\n if os.path.isfile(log_path + \"/test.csv\"):\n for i in range(1000):\n p = f\"{log_path}/test_{i:03}.csv\"\n if not os.path.isfile(p):\n log_path = p\n break\n else:\n log_path += \"/test.csv\"\n \n with open(log_path, 'w') as f:\n f.write(\"reward,ep. length,rew. cumulative avg., ep. len. cumulative avg.\\n\")\n print(\"Train statistics are saved to:\", log_path)\n\n if enable_FPS_control: # control simulation speed (using non blocking user input)\n print(\"\\nThe simulation speed can be changed by sending a non-negative integer\\n\"\n \"(e.g. '50' sets speed to 50%, '0' pauses the simulation, '' sets speed to MAX)\\n\")\n\n ep_reward = 0\n ep_length = 0\n rewards_sum = 0\n reward_min = math.inf\n reward_max = -math.inf\n ep_lengths_sum = 0\n ep_no = 0\n\n obs = env.reset()\n while True:\n action, _states = model.predict(obs, deterministic=True)\n obs, reward, done, info = env.step(action)\n ep_reward += reward\n ep_length += 1\n\n if enable_FPS_control: # control simulation speed (using non blocking user input)\n self.control_fps(select.select([sys.stdin], [], [], 0)[0]) \n\n if done:\n obs = env.reset()\n rewards_sum += ep_reward\n ep_lengths_sum += ep_length\n reward_max = max(ep_reward, reward_max)\n reward_min = min(ep_reward, reward_min)\n ep_no += 1\n avg_ep_lengths = ep_lengths_sum/ep_no\n avg_rewards = rewards_sum/ep_no\n\n if verbose > 0:\n print( f\"\\rEpisode: {ep_no:<3} Ep.Length: {ep_length:<4.0f} Reward: {ep_reward:<6.2f} \\n\",\n end=f\"--AVERAGE-- Ep.Length: {avg_ep_lengths:<4.0f} Reward: {avg_rewards:<6.2f} (Min: {reward_min:<6.2f} Max: {reward_max:<6.2f})\", flush=True)\n \n if log_path is not None:\n with open(log_path, 'a') as f:\n writer = csv.writer(f)\n writer.writerow([ep_reward, ep_length, avg_rewards, avg_ep_lengths])\n \n if ep_no == max_episodes:\n return\n\n ep_reward = 0\n ep_length = 0\n\n def learn_model(self, model:BaseAlgorithm, total_steps:int, path:str, eval_env=None, eval_freq=None, eval_eps=5, save_freq=None, backup_env_file=None, export_name=None):\n '''\n Learn Model for a specific number of time steps\n\n Parameters\n ----------\n model : BaseAlgorithm\n Model to train\n total_steps : int\n The total number of samples (env steps) to train on\n path : str\n Path where the trained model is saved\n If the path already exists, an incrementing number suffix is added\n eval_env : Env\n Environment to periodically test the model\n Default is None (no periodical evaluation)\n eval_freq : int\n Evaluate the agent every X steps\n Default is None (no periodical evaluation)\n eval_eps : int\n Evaluate the agent for X episodes (both eval_env and eval_freq must be defined)\n Default is 5\n save_freq : int\n Saves model at every X steps\n Default is None (no periodical checkpoint)\n backup_gym_file : str\n Generates backup of environment file in model's folder\n Default is None (no backup)\n export_name : str\n If export_name and save_freq are defined, a model is exported every X steps\n Default is None (no export)\n\n Returns\n -------\n model_path : str\n Directory where model was actually saved (considering incremental suffix)\n\n Notes\n -----\n If `eval_env` and `eval_freq` were specified:\n - The policy will be evaluated in `eval_env` every `eval_freq` steps\n - Evaluation results will be saved in `path` and shown at the end of training\n - Every time the results improve, the model is saved\n '''\n\n start = time.time()\n start_date = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n\n # If path already exists, add suffix to avoid overwriting\n if os.path.isdir(path):\n for i in count():\n p = path.rstrip(\"/\")+f'_{i:03}/'\n if not os.path.isdir(p):\n path = p\n break\n os.makedirs(path)\n\n # Backup environment file\n if backup_env_file is not None:\n backup_file = os.path.join(path, os.path.basename(backup_env_file))\n copy(backup_env_file, backup_file)\n\n evaluate = bool(eval_env is not None and eval_freq is not None)\n\n # Create evaluation callback\n eval_callback = None if not evaluate else EvalCallback(eval_env, n_eval_episodes=eval_eps, eval_freq=eval_freq, log_path=path,\n best_model_save_path=path, deterministic=True, render=False)\n\n # Create custom callback to display evaluations\n custom_callback = None if not evaluate else Cyclic_Callback(eval_freq, lambda:self.display_evaluations(path,True))\n\n # Create checkpoint callback\n checkpoint_callback = None if save_freq is None else CheckpointCallback(save_freq=save_freq, save_path=path, name_prefix=\"model\", verbose=1)\n\n # Create custom callback to export checkpoint models\n export_callback = None if save_freq is None or export_name is None else Export_Callback(save_freq, path, export_name)\n\n callbacks = CallbackList([c for c in [eval_callback, custom_callback, checkpoint_callback, export_callback] if c is not None])\n\n model.learn( total_timesteps=total_steps, callback=callbacks )\n model.save( os.path.join(path, \"last_model\") )\n\n # Display evaluations if they exist\n if evaluate:\n self.display_evaluations(path)\n\n # Display timestamps + Model path\n end_date = datetime.now().strftime('%d/%m/%Y %H:%M:%S')\n duration = timedelta(seconds=int(time.time()-start))\n print(f\"Train start: {start_date}\")\n print(f\"Train end: {end_date}\")\n print(f\"Train duration: {duration}\")\n print(f\"Model path: {path}\")\n \n # Append timestamps to backup environment file\n if backup_env_file is not None:\n with open(backup_file, 'a') as f:\n f.write(f\"\\n# Train start: {start_date}\\n\")\n f.write( f\"# Train end: {end_date}\\n\")\n f.write( f\"# Train duration: {duration}\")\n\n return path\n\n def display_evaluations(self, path, save_csv=False):\n\n eval_npz = os.path.join(path, \"evaluations.npz\")\n\n if not os.path.isfile(eval_npz):\n return\n\n console_width = 80\n console_height = 18\n symb_x = \"\\u2022\"\n symb_o = \"\\u007c\"\n symb_xo = \"\\u237f\"\n\n with np.load(eval_npz) as data:\n time_steps = data[\"timesteps\"]\n results_raw = np.mean(data[\"results\"],axis=1)\n ep_lengths_raw = np.mean(data[\"ep_lengths\"],axis=1)\n sample_no = len(results_raw)\n\n xvals = np.linspace(0, sample_no-1, 80)\n results = np.interp(xvals, range(sample_no), results_raw)\n ep_lengths = np.interp(xvals, range(sample_no), ep_lengths_raw)\n\n results_limits = np.min(results), np.max(results)\n ep_lengths_limits = np.min(ep_lengths), np.max(ep_lengths)\n\n results_discrete = np.digitize(results, np.linspace(results_limits[0]-1e-5, results_limits[1]+1e-5, console_height+1))-1\n ep_lengths_discrete = np.digitize(ep_lengths, np.linspace(0, ep_lengths_limits[1]+1e-5, console_height+1))-1\n\n matrix = np.zeros((console_height, console_width, 2), int)\n matrix[results_discrete[0] ][0][0] = 1 # draw 1st column\n matrix[ep_lengths_discrete[0]][0][1] = 1 # draw 1st column\n rng = [[results_discrete[0], results_discrete[0]], [ep_lengths_discrete[0], ep_lengths_discrete[0]]]\n\n # Create continuous line for both plots\n for k in range(2):\n for i in range(1,console_width):\n x = [results_discrete, ep_lengths_discrete][k][i]\n if x > rng[k][1]:\n rng[k] = [rng[k][1]+1, x]\n elif x < rng[k][0]:\n rng[k] = [x, rng[k][0]-1]\n else:\n rng[k] = [x,x]\n for j in range(rng[k][0],rng[k][1]+1):\n matrix[j][i][k] = 1\n\n print(f'{\"-\"*console_width}')\n for l in reversed(range(console_height)):\n for c in range(console_width):\n if np.all(matrix[l][c] == 0): print(end=\" \")\n elif np.all(matrix[l][c] == 1): print(end=symb_xo)\n elif matrix[l][c][0] == 1: print(end=symb_x)\n else: print(end=symb_o)\n print()\n print(f'{\"-\"*console_width}')\n print(f\"({symb_x})-reward min:{results_limits[0]:11.2f} max:{results_limits[1]:11.2f}\")\n print(f\"({symb_o})-ep. length min:{ep_lengths_limits[0]:11.0f} max:{ep_lengths_limits[1]:11.0f} {time_steps[-1]/1000:15.0f}k steps\")\n print(f'{\"-\"*console_width}')\n\n # save CSV\n if save_csv:\n eval_csv = os.path.join(path, \"evaluations.csv\")\n with open(eval_csv, 'a+') as f:\n writer = csv.writer(f)\n if sample_no == 1:\n writer.writerow([\"time_steps\", \"reward ep.\", \"length\"])\n writer.writerow([time_steps[-1],results_raw[-1],ep_lengths_raw[-1]])\n\n\n def generate_slot_behavior(self, path, slots, auto_head:bool, XML_name):\n '''\n Function that generates the XML file for the optimized slot behavior, overwriting previous files\n '''\n\n file = os.path.join( path, XML_name )\n\n # create the file structure\n auto_head = '1' if auto_head else '0'\n EL_behavior = ET.Element('behavior',{'description':'Add description to XML file', \"auto_head\":auto_head})\n\n for i,s in enumerate(slots):\n EL_slot = ET.SubElement(EL_behavior, 'slot', {'name':str(i), 'delta':str(s[0]/1000)})\n for j in s[1]: # go through all joint indices\n ET.SubElement(EL_slot, 'move', {'id':str(j), 'angle':str(s[2][j])})\n\n # create XML file\n xml_rough = ET.tostring( EL_behavior, 'utf-8' )\n xml_pretty = minidom.parseString(xml_rough).toprettyxml(indent=\" \")\n with open(file, \"w\") as x:\n x.write(xml_pretty)\n \n print(file, \"was created!\")\n\n @staticmethod\n def linear_schedule(initial_value: float) -> Callable[[float], float]:\n '''\n Linear learning rate schedule\n\n Parameters\n ----------\n initial_value : float\n Initial learning rate\n \n Returns\n -------\n schedule : Callable[[float], float]\n schedule that computes current learning rate depending on remaining progress\n '''\n def func(progress_remaining: float) -> float:\n '''\n Compute learning rate according to current progress\n\n Parameters\n ----------\n progress_remaining : float\n Progress will decrease from 1 (beginning) to 0\n \n Returns\n -------\n learning_rate : float\n Learning rate according to current progress\n '''\n return progress_remaining * initial_value\n\n return func\n\n @staticmethod\n def export_model(input_file, output_file, add_sufix=True):\n '''\n Export model weights to binary file\n\n Parameters\n ----------\n input_file : str\n Input file, compatible with algorithm\n output_file : str\n Output file, including directory\n add_sufix : bool\n If true, a suffix is appended to the file name: output_file + \"_{index}.pkl\"\n '''\n\n # If file already exists, don't overwrite\n if add_sufix:\n for i in count():\n f = f\"{output_file}_{i:03}.pkl\"\n if not os.path.isfile(f):\n output_file = f\n break\n \n model = PPO.load(input_file)\n weights = model.policy.state_dict() # dictionary containing network layers\n\n w = lambda name : weights[name].detach().cpu().numpy() # extract weights from policy\n\n var_list = []\n for i in count(0,2): # add hidden layers (step=2 because that's how SB3 works)\n if f\"mlp_extractor.policy_net.{i}.bias\" not in weights:\n break\n var_list.append([w(f\"mlp_extractor.policy_net.{i}.bias\"), w(f\"mlp_extractor.policy_net.{i}.weight\"), \"tanh\"])\n\n var_list.append( [w(\"action_net.bias\"), w(\"action_net.weight\"), \"none\"] ) # add final layer\n \n with open(output_file,\"wb\") as f:\n pickle.dump(var_list, f, protocol=4) # protocol 4 is backward compatible with Python 3.4" } ]
from agent.Base_Agent import Base_Agent as Agent from behaviors.custom.Step.Step import Step from world.commons.Draw import Draw from stable_baselines3 import PPO from stable_baselines3.common.vec_env import SubprocVecEnv from scripts.commons.Server import Server from scripts.commons.Train_Base import Train_Base from time import sleep import os, gym import numpy as np
10,926
''' Objective: Learn how to run forward using step primitive ---------- - class Basic_Run: implements an OpenAI custom gym - class Train: implements algorithms to train a new model or test an existing model ''' class Basic_Run(gym.Env): def __init__(self, ip, server_p, monitor_p, r_type, enable_draw) -> None: self.robot_type = r_type # Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw self.player = Agent(ip, server_p, monitor_p, 1, self.robot_type, "Gym", True, enable_draw) self.step_counter = 0 # to limit episode size
''' Objective: Learn how to run forward using step primitive ---------- - class Basic_Run: implements an OpenAI custom gym - class Train: implements algorithms to train a new model or test an existing model ''' class Basic_Run(gym.Env): def __init__(self, ip, server_p, monitor_p, r_type, enable_draw) -> None: self.robot_type = r_type # Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw self.player = Agent(ip, server_p, monitor_p, 1, self.robot_type, "Gym", True, enable_draw) self.step_counter = 0 # to limit episode size
self.step_obj : Step = self.player.behavior.get_custom_behavior_object("Step") # Step behavior object
1
2023-12-16 23:40:23+00:00
16k
Angryrou/udao
udao/optimization/tests/moo/test_parallel_progressive_frontier.py
[ { "identifier": "DataProcessor", "path": "udao/data/handler/data_processor.py", "snippet": "class DataProcessor(Generic[IT]):\n \"\"\"\n Parameters\n ----------\n iterator_cls: Type[BaseDatasetIterator]\n Dataset iterator class type.\n\n feature_extractors: Mapping[str, Tuple[FeatureExtractorType, Any]]\n Dict that links a feature name to tuples of the form (Extractor, args)\n where Extractor implements FeatureExtractor and args are the arguments\n to be passed at initialization.\n N.B.: Feature names must match the iterator's parameters.\n\n If Extractor is a StaticExtractor, the features are extracted\n independently of the split.\n\n If Extractor is a TrainedExtractor, the extractor is first fitted\n on the train split and then applied to the other splits.\n\n feature_preprocessors: Optional[Mapping[str, List[FeaturePreprocessor]]]\n Dict that links a feature name to a list of tuples of the form (Processor, args)\n where Processor implements FeatureProcessor and args are the arguments\n to be passed at initialization.\n This allows to apply a series of processors to different features, e.g.\n to normalize the features.\n N.B.: Feature names must match the iterator's parameters.\n If Processor is a StaticExtractor, the features are processed\n independently of the split.\n\n If Extractor is a TrainedExtractor, the processor is first fitted\n on the train split and then applied to the other splits\n (typically for normalization).\n\n tensors_dtype: Optional[th.dtype]\n Data type of the tensors returned by the iterator, by default None\n \"\"\"\n\n def __init__(\n self,\n iterator_cls: Type[IT],\n feature_extractors: Dict[str, FeatureExtractor],\n feature_preprocessors: Optional[\n Mapping[\n str,\n Sequence[FeaturePreprocessor],\n ]\n ] = None,\n tensors_dtype: Optional[th.dtype] = None,\n ) -> None:\n self.iterator_cls = iterator_cls\n self.feature_extractors = feature_extractors\n self.feature_processors = feature_preprocessors or {}\n\n def _apply_processing_function(\n self,\n function: Callable[..., BaseContainer],\n data: Union[DataFrame, BaseContainer],\n split: DatasetType,\n is_trained: bool,\n ) -> BaseContainer:\n if is_trained:\n features = function(data, split=split)\n else:\n features = function(data)\n\n return features\n\n def extract_features(\n self, data: DataFrame, split: DatasetType\n ) -> Dict[str, BaseContainer]:\n \"\"\"Extract features for the different splits of the data.\n\n Returns\n -------\n DataHandler\n self\n\n Raises\n ------\n ValueError\n Expects data to be split before extracting features.\n \"\"\"\n features: Dict[str, BaseContainer] = {}\n for name, extractor in self.feature_extractors.items():\n features[name] = self._apply_processing_function(\n extractor.extract_features,\n data,\n split=split,\n is_trained=extractor.trained,\n )\n for preprocessor in self.feature_processors.get(name, []):\n features[name] = self._apply_processing_function(\n preprocessor.preprocess,\n features[name],\n split=split,\n is_trained=preprocessor.trained,\n )\n\n return features\n\n def make_iterator(self, data: DataFrame, keys: Sequence, split: DatasetType) -> IT:\n return self.iterator_cls(keys, **self.extract_features(data, split=split))\n\n def inverse_transform(\n self, container: TabularContainer, pipeline_name: str\n ) -> DataFrame:\n \"\"\"Inverse transform the data to the original format.\n\n Parameters\n ----------\n container: TabularContainer\n Data to be inverse transformed.\n pipeline_name: str\n Name of the feature pipeline to be inverse transformed.\n Returns\n -------\n DataFrame\n Inverse transformed data.\n \"\"\"\n\n extractor = self.feature_extractors[pipeline_name]\n if not isinstance(extractor, TabularFeatureExtractor):\n raise ValueError(\n \"Only TabularFeatureExtractor supports\"\n \"transforming back to original dataframe.\"\n )\n preprocessors = self.feature_processors.get(pipeline_name, [])\n\n for preprocessor in preprocessors[::-1]:\n if not hasattr(preprocessor, \"inverse_transform\"):\n raise ValueError(\n f\"Feature preprocessor {pipeline_name} does \"\n \"not have an inverse transform method.\"\n )\n container = preprocessor.inverse_transform(container) # type: ignore\n df = cast(TabularContainer, container).data\n return df" }, { "identifier": "set_deterministic_torch", "path": "udao/model/utils/utils.py", "snippet": "def set_deterministic_torch(seed: int = 0) -> None:\n \"\"\"\n Set seeds and configurations to enable deterministic behavior in PyTorch.\n\n Parameters\n ----------\n seed : int\n Random seed to use.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.use_deterministic_algorithms(True) # type: ignore" }, { "identifier": "MOProblem", "path": "udao/optimization/concepts/problem.py", "snippet": "class MOProblem(BaseProblem):\n \"\"\"Multi-objective optimization problem.\"\"\"\n\n def __init__(\n self,\n objectives: Sequence[Objective],\n variables: Dict[str, Variable],\n constraints: Sequence[Constraint],\n data_processor: Optional[DataProcessor] = None,\n input_parameters: Optional[Dict[str, Any]] = None,\n ) -> None:\n self.objectives = objectives\n super().__init__(\n variables,\n constraints,\n data_processor=data_processor,\n input_parameters=input_parameters,\n )\n\n def __repr__(self) -> str:\n return (\n f\"MOProblem(objectives={self.objectives}, \"\n f\"variables={self.variables}, \"\n f\"constraints={self.constraints}, \"\n f\"input_parameters={self.input_parameters})\"\n )" }, { "identifier": "ParallelProgressiveFrontier", "path": "udao/optimization/moo/progressive_frontier/parallel_progressive_frontier.py", "snippet": "class ParallelProgressiveFrontier(BaseProgressiveFrontier):\n @dataclass\n class Params(BaseProgressiveFrontier.Params):\n processes: int = 1\n \"\"\"Processes to use for parallel processing\"\"\"\n n_grids: int = 2\n \"\"\"Number of splits per objective\"\"\"\n max_iters: int = 10\n \"\"\"Number of iterations to explore the space\"\"\"\n\n def __init__(\n self,\n solver: SOSolver,\n params: Params,\n ) -> None:\n super().__init__(\n solver,\n params,\n )\n self.processes = params.processes\n self.n_grids = params.n_grids\n self.max_iters = params.max_iters\n\n def solve(\n self,\n problem: MOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n solve MOO by PF-AP (Progressive Frontier - Approximation Parallel)\n\n Parameters\n ----------\n problem : MOProblem\n MOO problem to be solved\n Returns\n -------\n po_objs: ndarray\n Pareto optimal objective values, of shape\n (n_solutions, n_objs)\n po_vars: ndarray\n corresponding variables of Pareto solutions, of shape\n (n_solutions, n_vars)\n \"\"\"\n # create initial rectangle\n # get initial plans/form a intial hyperrectangle\n\n plans: List[Point] = []\n n_objs = len(problem.objectives)\n\n all_objs_list: List[np.ndarray] = []\n all_vars_list: List[Dict] = []\n for i in range(n_objs):\n anchor_point = self.get_anchor_point(problem=problem, obj_ind=i, seed=seed)\n if anchor_point.vars is None:\n raise Exception(\"This should not happen.\")\n plans.append(anchor_point)\n all_objs_list.append(anchor_point.objs)\n all_vars_list.append(anchor_point.vars)\n logger.debug(f\"the initial plans are: {plans}\")\n if n_objs < 2 or n_objs > 3:\n raise Exception(f\"{n_objs} objectives are not supported for now!\")\n\n for i in range(self.max_iters):\n # choose the cell with max volume to explore\n max_volume = -1\n input_ind = -1\n for i in range(len(all_objs_list) - 1):\n current_volume = abs(\n np.prod(np.array(all_objs_list)[i] - np.array(all_objs_list)[i + 1])\n )\n logger.debug(f\"volume {current_volume}\")\n if current_volume > max_volume:\n max_volume = current_volume\n input_ind = i\n\n plan = [\n Point(objs=np.array(all_objs_list)[input_ind]),\n Point(objs=np.array(all_objs_list)[input_ind + 1]),\n ]\n utopia, nadir = self.get_utopia_and_nadir(plan)\n if utopia is None or nadir is None:\n raise NoSolutionError(\"Cannot find utopia/nadir points\")\n # create uniform n_grids ^ (n_objs) grid cells based on the rectangle\n grid_cells_list = self._create_grid_cells(\n utopia, nadir, self.n_grids, n_objs\n )\n\n obj_bound_cells = []\n for cell in grid_cells_list:\n obj_bound_dict = self._form_obj_bounds_dict(\n problem, cell.utopia, cell.nadir\n )\n obj_bound_cells.append(obj_bound_dict)\n\n logger.debug(f\"the cells are: {obj_bound_cells}\")\n ret_list = self.parallel_soo(\n problem=problem,\n objective=problem.objectives[self.opt_obj_ind],\n cell_list=obj_bound_cells,\n seed=seed,\n )\n\n po_objs_list: List[np.ndarray] = []\n po_vars_list: List[Dict] = []\n for soo_obj, soo_vars in ret_list:\n if soo_obj is None:\n logger.debug(\"This is an empty area!\")\n continue\n else:\n po_objs_list.append(self._compute_objectives(problem, soo_vars))\n if soo_vars is None:\n raise Exception(\"Unexpected vars None for objective value.\")\n po_vars_list.append(soo_vars)\n\n logger.debug(f\"the po_objs_list is: {po_objs_list}\")\n logger.debug(f\"the po_vars_list is: {po_vars_list}\")\n all_objs_list.extend(po_objs_list)\n all_vars_list.extend(po_vars_list)\n logger.debug(f\"the all_objs_list is: {all_objs_list}\")\n all_objs, all_vars = moo_ut.summarize_ret(all_objs_list, all_vars_list)\n all_objs_list = all_objs.tolist() if all_objs is not None else []\n all_vars_list = all_vars.tolist() if all_vars is not None else []\n\n return np.array(all_objs_list), np.array(all_vars_list)\n\n def _solve_wrapper(\n self, problem: SOProblem, seed: Optional[int] = None\n ) -> Optional[Tuple[float, Dict[str, Any]]]:\n \"\"\"Handle exceptions in solver call for parallel processing.\"\"\"\n try:\n return self.solver.solve(problem, seed=seed)\n except NoSolutionError:\n logger.debug(f\"This is an empty area! {problem}\")\n return None\n\n def parallel_soo(\n self,\n problem: MOProblem,\n objective: Objective,\n cell_list: List[Dict[str, Any]],\n seed: Optional[int] = None,\n ) -> List[Tuple[float, Dict[str, Any]]]:\n \"\"\"Parallel calls to SOO Solver for each cell in cell_list, returns a\n candidate tuple (objective_value, variables) for each cell.\n\n Parameters\n ----------\n objective : Objective\n Objective to be optimized\n cell_list : List[Dict[str, Any]]\n List of cells to be optimized\n (a cell is a dict of bounds for each objective)\n input_parameters : Optional[Dict[str, Any]], optional\n Fixed parameters to be passed , by default None\n\n Returns\n -------\n List[Tuple[float, Dict[str, Any]]]\n List of candidate tuples (objective_value, variables)\n \"\"\"\n # generate the list of input parameters for constraint_so_opt\n args_list: List[Tuple[SOProblem, Optional[int]]] = []\n for obj_bounds_dict in cell_list:\n so_problem = self._so_problem_from_bounds_dict(\n problem, obj_bounds_dict, objective\n )\n args_list.append((so_problem, seed))\n\n if th.cuda.is_available():\n th.multiprocessing.set_start_method(\"spawn\", force=True)\n if self.processes == 1:\n ret_list = [self._solve_wrapper(*args) for args in args_list]\n else:\n # call self.constraint_so_opt parallely\n with Pool(processes=self.processes) as pool:\n ret_list = pool.starmap(self._solve_wrapper, args_list)\n return [res for res in ret_list if res is not None]\n\n @staticmethod\n def _create_grid_cells(\n utopia: Point, nadir: Point, n_grids: int, n_objs: int\n ) -> List[Rectangle]:\n \"\"\"\n Create cells used in Progressive Frontier(PF)-Approximation\n Parallel (AP) algorithm\n\n Parameters\n ----------\n utopia: Point\n the utopia point\n nadir: Point\n the nadir point\n n_grids: int\n the number of grids per objective\n n_objs: int\n the number of objectives\n\n Returns\n -------\n List[Rectangle]\n The rectangles in which to perform optimization.\n \"\"\"\n grids_per_var = np.linspace(\n utopia.objs, nadir.objs, num=n_grids + 1, endpoint=True\n )\n objs_list = [grids_per_var[:, i] for i in range(n_objs)]\n\n ## generate cartesian product of indices for grids\n grids_inds_per_var = np.linspace(0, n_grids - 1, num=n_grids, endpoint=True)\n x = np.tile(grids_inds_per_var, (n_objs, 1))\n grids_inds = np.array([list(i) for i in itertools.product(*x)]).astype(int)\n\n grid_cell_list = []\n for grid_ind in grids_inds:\n sub_u_objs = np.array([objs_list[i][id] for i, id in enumerate(grid_ind)])\n sub_u_point = Point(sub_u_objs)\n sub_nadir_objs = np.array(\n [objs_list[i][id + 1] for i, id in enumerate(grid_ind)]\n )\n sub_nadir_point = Point(sub_nadir_objs)\n assert all((sub_nadir_objs - sub_u_objs) >= 0)\n cell = Rectangle(sub_u_point, sub_nadir_point)\n grid_cell_list.append(cell)\n if len(grid_cell_list) != (n_grids**n_objs):\n raise Exception(\n f\"Unexpected: the number of grid cells is\"\n f\"not equal to {n_grids**n_objs}\"\n )\n\n return grid_cell_list" }, { "identifier": "MOGD", "path": "udao/optimization/soo/mogd.py", "snippet": "class MOGD(SOSolver):\n \"\"\"MOGD solver for single-objective optimization.\n\n Performs gradient descent on input variables by minimizing an\n objective loss and a constraint loss.\n \"\"\"\n\n @dataclass\n class Params:\n learning_rate: float\n \"\"\"learning rate of Adam optimizer applied to input variables\"\"\"\n max_iters: int\n \"\"\"maximum number of iterations for a single local search\"\"\"\n patience: int\n \"\"\"maximum number of iterations without improvement\"\"\"\n multistart: int\n \"\"\"number of random starts for gradient descent\"\"\"\n objective_stress: float = 10.0\n \"\"\"stress term for objective functions\"\"\"\n constraint_stress: float = 1e5\n \"\"\"stress term for constraint functions\"\"\"\n strict_rounding: bool = False\n \"\"\"whether strictly rounding integer variables at each iteration. \"\"\"\n batch_size: int = 1\n \"\"\"batch size for gradient descent\"\"\"\n device: Optional[th.device] = field(default_factory=get_default_device)\n \"\"\"device on which to perform torch operations, by default available device.\"\"\"\n dtype: th.dtype = th.float32\n \"\"\"type of the tensors\"\"\"\n\n def __init__(self, params: Params) -> None:\n super().__init__()\n self.lr = params.learning_rate\n self.max_iter = params.max_iters\n self.patience = params.patience\n self.multistart = params.multistart\n self.objective_stress = params.objective_stress\n self.constraint_stress = params.constraint_stress\n self.strict_rounding = params.strict_rounding\n self.batch_size = params.batch_size\n self.device = params.device\n self.dtype = params.dtype\n\n def _get_unprocessed_input_values(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n input_parameters: Optional[Dict[str, Any]] = None,\n seed: Optional[int] = None,\n ) -> Tuple[Dict[str, th.Tensor], Dict[str, Any]]:\n \"\"\"\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables for which to get random values\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n seed : Optional[int], optional\n Random seed, by default None\n\n Returns\n -------\n Tuple[Dict[str, th.Tensor], Dict[str, Any]]\n - random values as a tensor for each numeric variable\n - input parameters valuies\n \"\"\"\n numeric_values: Dict[str, np.ndarray] = {}\n\n for i, (name, variable) in enumerate(numeric_variables.items()):\n numeric_values[name] = co.variable.get_random_variable_values(\n variable, self.batch_size, seed=seed + i if seed is not None else None\n )\n return derive_unprocessed_input(\n input_variables=numeric_values,\n input_parameters=input_parameters,\n device=self.device,\n )\n\n def _get_processed_input_values(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n data_processor: DataProcessor,\n input_parameters: Optional[Dict[str, Any]] = None,\n seed: Optional[int] = None,\n ) -> Tuple[UdaoInput, UdaoItemShape, Callable[[th.Tensor], TabularContainer]]:\n \"\"\"Get random values for numeric variables\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n data_processor : DataProcessor\n Data processor to process input variables\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n\n Returns\n -------\n Tuple[UdaoInput, UdaoInputShape, Callable[[th.Tensor], TabularContainer]]\n - random values for numeric variables\n - shape of the input\n - function to convert a tensor to a TabularContainer\n \"\"\"\n numeric_values: Dict[str, np.ndarray] = {}\n\n for i, (name, variable) in enumerate(numeric_variables.items()):\n numeric_values[name] = co.variable.get_random_variable_values(\n variable, self.batch_size, seed=seed + i if seed is not None else None\n )\n input_data, iterator = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters or {},\n input_variables=numeric_values,\n device=self.device,\n )\n make_tabular_container = cast(\n UdaoIterator, iterator\n ).get_tabular_features_container\n\n input_data_shape = iterator.shape\n\n return (\n input_data,\n input_data_shape,\n make_tabular_container,\n )\n\n def _get_unprocessed_input_bounds(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n ) -> Tuple[Dict[str, float], Dict[str, float]]:\n \"\"\"\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Variables for which to get bounds\n\n Returns\n -------\n Tuple[Dict[str, float], Dict[str, float]]\n - lower bounds of numeric variables\n - upper bounds of numeric variables\n \"\"\"\n lower_numeric_values = {\n name: variable.lower for name, variable in numeric_variables.items()\n }\n upper_numeric_values = {\n name: variable.upper for name, variable in numeric_variables.items()\n }\n return lower_numeric_values, upper_numeric_values\n\n def _get_processed_input_bounds(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n data_processor: DataProcessor,\n input_parameters: Optional[Dict[str, Any]] = None,\n ) -> Tuple[UdaoInput, UdaoInput]:\n \"\"\"Get bounds of numeric variables\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n data_processor : DataProcessor\n Data processor to process input variables\n input_parameters : Optional[Dict[str, Any]], optional\n Input parameters, by default None\n\n Returns\n -------\n Tuple[UdaoInput, UdaoInput]\n Lower and upper bounds of numeric\n variables in the form of a UdaoInput\n \"\"\"\n lower_numeric_values = {\n name: variable.lower for name, variable in numeric_variables.items()\n }\n upper_numeric_values = {\n name: variable.upper for name, variable in numeric_variables.items()\n }\n lower_input, _ = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters,\n input_variables=lower_numeric_values,\n )\n upper_input, _ = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters,\n input_variables=upper_numeric_values,\n )\n if self.device:\n return lower_input.to(self.device), upper_input.to(self.device)\n else:\n return lower_input, upper_input\n\n def _gradient_descent(\n self,\n problem: co.SOProblem,\n input_data: Union[UdaoInput, Dict],\n optimizer: th.optim.Optimizer,\n ) -> Tuple[int, float, float]:\n \"\"\"Perform a gradient descent step on input variables\n\n Parameters\n ----------\n problem : co.SOProblem\n Single-objective optimization problem\n input_data : Union[UdaoInput, Dict]\n Input data - can have different types depending on whether\n the input variables are processed or not.\n - UdaoInput: the naive input\n - Dict: {\"input_variables\": ..., \"input_parameters\": ...}\n\n optimizer : th.optim.Optimizer\n PyTorch optimizer\n\n Returns\n -------\n Tuple[int, float, float]\n - index of minimum loss\n - minimum loss\n - objective value at minimum loss\n\n Raises\n ------\n UncompliantSolutionError\n If no solution within bounds is found\n \"\"\"\n # Compute objective, constraints and corresponding losses\n\n loss_meta = self._compute_loss(problem, input_data)\n sum_loss = loss_meta[\"sum_loss\"]\n min_loss = loss_meta[\"min_loss\"]\n min_loss_id = loss_meta[\"min_loss_id\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n\n optimizer.zero_grad()\n sum_loss.backward() # type: ignore\n optimizer.step()\n\n if is_within_constraint and (\n self.within_objective_bounds(best_obj, problem.objective)\n ):\n return min_loss_id, min_loss, best_obj\n else:\n raise UncompliantSolutionError(\"No solution within bounds found!\")\n\n def _log_success(\n self,\n problem: co.SOProblem,\n iter: int,\n best_obj: float,\n best_iter: int,\n best_feature_input: Any,\n ) -> None:\n logger.debug(\n f\"Finished at iteration {iter}, best local {problem.objective.name} \"\n f\"found {best_obj:.5f}\"\n f\" \\nat iteration {best_iter},\"\n f\" \\nwith vars: {best_feature_input}, for \"\n f\"objective {problem.objective} and constraints {problem.constraints}\"\n )\n\n def _log_failure(\n self,\n problem: co.SOProblem,\n iter: int,\n ) -> None:\n logger.debug(\n f\"Finished at iteration {iter}, no valid {problem.objective.name}\"\n f\" found for input parameters {problem.input_parameters} with \"\n f\"objective {problem.objective} and constraints {problem.constraints}\"\n )\n\n def _unprocessed_single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization, in the case where\n no data processor is defined.\n The input variables are transformed to a dictionary of tensors and are\n optimized directly, by being passed to the objective function along\n with the input parameters.\n \"\"\"\n best_iter: Optional[int] = None\n best_loss = np.inf\n best_obj: Optional[float] = None\n best_feature_input: Optional[Dict[str, th.Tensor]] = None\n\n (\n input_variable_values,\n input_parameter_values,\n ) = self._get_unprocessed_input_values(\n cast(Dict[str, co.NumericVariable], problem.variables),\n input_parameters=problem.input_parameters,\n seed=seed,\n )\n lower_input, upper_input = self._get_unprocessed_input_bounds(\n cast(Dict[str, co.NumericVariable], problem.variables)\n )\n for name in input_variable_values:\n input_variable_values[name].requires_grad_(True)\n optimizer = optim.Adam([t for t in input_variable_values.values()], lr=self.lr)\n i = 0\n while i < self.max_iter:\n with th.no_grad():\n input_variable_values_backup = {\n k: v.detach().clone() for k, v in input_variable_values.items()\n }\n try:\n min_loss_id, min_loss, local_best_obj = self._gradient_descent(\n problem,\n {\n \"input_variables\": input_variable_values,\n \"input_parameters\": input_parameter_values,\n },\n optimizer=optimizer,\n )\n except UncompliantSolutionError:\n pass\n else:\n if min_loss < best_loss:\n best_loss = min_loss\n best_obj = local_best_obj\n best_feature_input = {\n k: v[min_loss_id].reshape(1, -1)\n for k, v in input_variable_values_backup.items()\n }\n best_iter = i\n\n with th.no_grad():\n # Update input_variable_values with constrained values\n for k in input_variable_values:\n input_variable_values[k].data = th.clip(\n input_variable_values[k].data,\n lower_input[k],\n upper_input[k],\n )\n\n if self.strict_rounding:\n # Round all integer variables at each iteration\n for k in input_variable_values:\n if isinstance(problem.variables[k], co.IntegerVariable):\n input_variable_values[k].data = input_variable_values[\n k\n ].data.round()\n\n if best_iter is not None and i > best_iter + self.patience:\n break\n i += 1\n\n if best_iter is None or best_obj is None or best_feature_input is None:\n self._log_failure(problem, i)\n raise NoSolutionError\n\n if not self.strict_rounding:\n for k in best_feature_input:\n if isinstance(problem.variables[k], co.IntegerVariable):\n best_feature_input[k].data = best_feature_input[k].data.round()\n loss_meta = self._compute_loss(\n problem,\n {\n \"input_variables\": best_feature_input,\n \"input_parameters\": input_parameter_values,\n },\n )\n best_loss = loss_meta[\"min_loss\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n if (\n best_obj is None\n or not is_within_constraint\n or not self.within_objective_bounds(best_obj, problem.objective)\n ):\n self._log_failure(problem, i)\n raise NoSolutionError\n\n best_raw_vars = {\n name: best_feature_input[name]\n .cpu()\n .numpy()\n .squeeze()\n .tolist() # turn np.ndarray to float\n for name in problem.variables\n }\n self._log_success(problem, i, best_obj, best_iter, best_raw_vars)\n return best_obj, best_raw_vars, best_loss\n\n def _processed_single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization, in the case where\n a data processor is defined.\n\n input variables and parameters are processed by the data processor.\n Gradient descent is performed on the processed input variables.\n Variables are then inverse transformed to get the raw variables.\n \"\"\"\n if not problem.data_processor:\n raise Exception(\"Data processor is not defined!\")\n best_iter: Optional[int] = None\n best_loss = np.inf\n best_obj: Optional[float] = None\n best_feature_input: Optional[th.Tensor] = None\n # Random numeric variables and their characteristics\n (\n input_data,\n input_data_shape,\n make_tabular_container,\n ) = self._get_processed_input_values(\n cast(Dict[str, co.NumericVariable], problem.variables),\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters,\n seed=seed,\n )\n # Bounds of numeric variables\n lower_input, upper_input = self._get_processed_input_bounds(\n cast(Dict[str, co.NumericVariable], problem.variables),\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters,\n )\n # Indices of numeric variables on which to apply gradients\n mask = th.tensor(\n [i in problem.variables for i in input_data_shape.feature_names],\n device=self.device,\n )\n grad_indices = th.nonzero(mask, as_tuple=False).squeeze()\n input_vars_subvector = input_data.features[:, grad_indices].clone().detach()\n input_vars_subvector.requires_grad_(True)\n\n optimizer = optim.Adam([input_vars_subvector], lr=self.lr)\n i = 0\n while i < self.max_iter:\n input_data.features = input_data.features.clone().detach()\n input_data.features[:, grad_indices] = input_vars_subvector\n try:\n min_loss_id, min_loss, local_best_obj = self._gradient_descent(\n problem,\n input_data,\n optimizer=optimizer,\n )\n except UncompliantSolutionError:\n pass\n else:\n if min_loss < best_loss:\n best_loss = min_loss\n best_obj = local_best_obj\n best_feature_input = (\n input_data.features.detach()[min_loss_id].clone().reshape(1, -1)\n )\n best_iter = i\n\n with th.no_grad():\n # Update input_vars_subvector with constrained values\n input_vars_subvector.data = th.clip(\n input_vars_subvector.data,\n # Use .data to avoid gradient tracking during update\n lower_input.features[0, grad_indices],\n upper_input.features[0, grad_indices],\n )\n\n if self.strict_rounding:\n # Round all integer variables at each iteration\n input_data.features[:, grad_indices] = input_vars_subvector.data\n feature_container = make_tabular_container(\n input_data.features.detach()\n )\n best_raw_df = problem.data_processor.inverse_transform(\n feature_container, \"tabular_features\"\n )\n numeric_values: Dict[str, np.ndarray] = {\n name: best_raw_df[[name]].values.round()[:, 0]\n if isinstance(variable, co.IntegerVariable)\n else best_raw_df[[name]].values[:, 0]\n for name, variable in problem.variables.items()\n }\n input_data_raw, _ = derive_processed_input(\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters or {},\n input_variables=numeric_values,\n device=self.device,\n )\n input_vars_subvector.data = input_data_raw.features[:, grad_indices]\n\n if best_iter is not None and i > best_iter + self.patience:\n break\n i += 1\n\n if best_iter is None or best_obj is None or best_feature_input is None:\n self._log_failure(problem, i)\n raise NoSolutionError\n\n with th.no_grad():\n best_feature_input = cast(th.Tensor, best_feature_input)\n feature_container = make_tabular_container(best_feature_input)\n best_raw_df = problem.data_processor.inverse_transform(\n feature_container, \"tabular_features\"\n )\n if not self.strict_rounding:\n best_raw_vars: Dict[str, Any] = {\n name: best_raw_df[[name]].values.round()[:, 0]\n if isinstance(variable, co.IntegerVariable)\n else best_raw_df[[name]].values[:, 0]\n for name, variable in problem.variables.items()\n }\n input_data_best_raw, _ = derive_processed_input(\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters or {},\n input_variables=best_raw_vars,\n device=self.device,\n )\n loss_meta = self._compute_loss(problem, input_data_best_raw)\n best_loss = loss_meta[\"min_loss\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n if (\n best_obj is None\n or not is_within_constraint\n or not self.within_objective_bounds(best_obj, problem.objective)\n ):\n self._log_failure(problem, i)\n raise NoSolutionError\n else:\n best_raw_vars = {\n name: best_raw_df[[name]]\n .values.squeeze()\n .tolist() # turn np.ndarray to float\n for name in problem.variables\n }\n self._log_success(problem, i, best_obj, best_iter, best_raw_vars)\n return best_obj, best_raw_vars, best_loss\n\n def _single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization.\n Categorical variables are fixed to the values in input_parameters.\n (a grid search of categorical variables is performed in solve)\n This is where gradient descent is performed.\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n objective : co.Objective\n Objective to be optimized\n constraints : Sequence[co.Constraint]\n Constraints to be satisfied\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n seed: int, by default None\n random seed\n\n Returns\n -------\n Tuple[float, Dict[str, float], flat]\n - objective value\n - variables\n - best loss value\n\n Raises\n ------\n NoSolutionError\n No valid solution is found\n \"\"\"\n\n if not problem.data_processor:\n return self._unprocessed_single_start_opt(problem, seed=seed)\n else:\n return self._processed_single_start_opt(problem, seed=seed)\n\n def solve(\n self, problem: co.SOProblem, seed: Optional[int] = None\n ) -> Tuple[float, Dict[str, float]]:\n if seed is not None:\n th.manual_seed(seed)\n if self.device:\n for constraint in problem.constraints:\n constraint.to(self.device)\n problem.objective.to(self.device)\n\n categorical_variables = [\n name\n for name, variable in problem.variables.items()\n if isinstance(variable, co.EnumVariable)\n ]\n numeric_variables = {\n name: variable\n for name, variable in problem.variables.items()\n if isinstance(variable, co.NumericVariable)\n }\n\n meshed_categorical_vars = self.get_meshed_categorical_vars(problem.variables)\n\n if meshed_categorical_vars is None:\n meshed_categorical_vars = np.array([0])\n\n best_loss_list: List[float] = []\n obj_list: List[float] = []\n vars_list: List[Dict] = []\n for i in range(self.multistart):\n for categorical_cell in meshed_categorical_vars:\n categorical_values = {\n name: categorical_cell[ind]\n for ind, name in enumerate(categorical_variables)\n } # from {id: value} to {name: value}\n fixed_values = {\n **categorical_values,\n **(problem.input_parameters or {}),\n }\n try:\n (\n obj_pred,\n best_raw_vars,\n best_loss,\n ) = self._single_start_opt(\n co.SOProblem(\n variables=numeric_variables, # type: ignore\n input_parameters=fixed_values,\n objective=problem.objective,\n constraints=problem.constraints or [],\n data_processor=problem.data_processor,\n ),\n seed=seed + i if seed is not None else None,\n )\n except NoSolutionError:\n continue\n else:\n best_loss_list.append(best_loss)\n obj_list.append(obj_pred)\n vars_list.append(best_raw_vars)\n if not obj_list:\n raise NoSolutionError(\"No valid solutions and variables found!\")\n\n idx = np.argmin(best_loss_list)\n vars_cand = vars_list[idx]\n if vars_cand is not None:\n obj_cand = obj_list[idx]\n if obj_cand is None:\n raise Exception(f\"Unexpected objs_list[{idx}] is None.\")\n else:\n raise NoSolutionError(\"No valid solutions and variables found!\")\n\n return obj_cand, vars_cand\n\n ##################\n ## _loss ##\n ##################\n def constraints_loss(\n self, constraint_values: List[th.Tensor], constraints: Sequence[co.Constraint]\n ) -> th.Tensor:\n \"\"\"\n compute loss of the values of each constraint function fixme: double-check\n\n Parameters\n ----------\n constraint_values : List[th.Tensor]\n values of each constraint function\n constraints : Sequence[co.Constraint]\n constraint functions\n\n Returns\n -------\n th.Tensor\n loss of the values of each constraint function\n\n \"\"\"\n\n # vars: a tensor\n # get loss for constraint functions defined in the problem setting\n total_loss = th.zeros_like(\n constraint_values[0], device=self.device, dtype=self.dtype\n )\n for i, (constraint_value, constraint) in enumerate(\n zip(constraint_values, constraints)\n ):\n stress = (\n self.objective_stress\n if isinstance(constraint, co.Objective)\n else self.constraint_stress\n )\n constraint_violation = th.zeros_like(\n constraint_values[0], device=self.device, dtype=self.dtype\n )\n if constraint.upper is not None and constraint.lower is not None:\n if constraint.upper == constraint.lower:\n constraint_violation = th.abs(constraint_value - constraint.upper)\n else:\n normed_constraint = (constraint_value - constraint.lower) / (\n constraint.upper - constraint.lower\n )\n constraint_violation = th.where(\n (normed_constraint < 0) | (normed_constraint > 1),\n (normed_constraint - 0.5),\n 0,\n )\n elif constraint.lower is not None:\n constraint_violation = th.relu(constraint.lower - constraint_value)\n elif constraint.upper is not None:\n constraint_violation = th.relu(constraint_value - constraint.upper)\n total_loss += (\n constraint_violation**2 + stress * (constraint_violation > 0).float()\n )\n\n return total_loss\n\n def objective_loss(\n self, objective_value: th.Tensor, objective: co.Objective\n ) -> th.Tensor:\n \"\"\"Compute the objective loss for a given objective value:\n - if no bounds are specified, use the squared objective value\n - if both bounds are specified, use the squared normalized\n objective value if it is within the bounds, otherwise\n add a stress term to a squared distance to middle of the bounds\n\n Parameters\n ----------\n objective_value : th.Tensor\n Tensor of objective values\n objective : co.Objective\n Objective function\n\n Returns\n -------\n th.Tensor\n Tensor of objective losses\n\n Raises\n ------\n NotImplementedError\n If only one bound is specified for the objective\n\n \"\"\"\n\n if objective.upper is None and objective.lower is None:\n loss = (\n th.sign(objective_value) * (objective_value**2) * objective.direction\n )\n elif objective.upper is not None and objective.lower is not None:\n norm_cst_obj_pred = (objective_value - objective.lower) / (\n objective.upper - objective.lower\n ) # scaled\n loss = th.where(\n (norm_cst_obj_pred < 0) | (norm_cst_obj_pred > 1),\n (norm_cst_obj_pred - 0.5) ** 2 + self.objective_stress,\n norm_cst_obj_pred * objective.direction,\n )\n else:\n raise NotImplementedError(\"Objective with only one bound is not supported\")\n return loss\n\n def _obj_forward(\n self,\n optimization_element: co.Constraint,\n input_data: Union[UdaoInput, Dict],\n ) -> th.Tensor:\n if isinstance(input_data, UdaoInput):\n return optimization_element.function(input_data) # type: ignore\n else:\n # Dict when unprocessed inputs\n return optimization_element.function(**input_data)\n\n def _compute_loss(\n self, problem: co.SOProblem, input_data: Union[UdaoInput, Dict]\n ) -> Dict[str, Any]:\n obj_output = self._obj_forward(problem.objective, input_data)\n objective_loss = self.objective_loss(obj_output, problem.objective)\n constraint_loss = th.zeros_like(objective_loss, device=self.device)\n\n if problem.constraints:\n const_outputs = [\n self._obj_forward(constraint, input_data)\n for constraint in problem.constraints\n ]\n constraint_loss = self.constraints_loss(const_outputs, problem.constraints)\n\n loss = objective_loss + constraint_loss\n min_loss_id = int(th.argmin(loss).cpu().item())\n\n return {\n \"sum_loss\": th.sum(loss),\n \"min_loss\": th.min(loss).cpu().item(),\n \"min_loss_id\": min_loss_id,\n \"best_obj\": obj_output[min_loss_id].cpu().item(),\n \"is_within_constraint\": bool((constraint_loss[min_loss_id] == 0).item()),\n }\n\n ##################\n ## _get (vars) ##\n ##################\n\n def get_meshed_categorical_vars(\n self, variables: Dict[str, co.Variable]\n ) -> Optional[np.ndarray]:\n \"\"\"\n Get combinations of all categorical (binary, enum) variables\n\n Parameters\n ----------\n variables : Dict[str, co.Variable]\n Variables to be optimized\n\n Returns\n -------\n Optional[np.ndarray]\n Combinations of all categorical variables\n of shape (n_samples, n_vars)\n \"\"\"\n cv_value_list = [\n variable.values\n for variable in variables.values()\n if isinstance(variable, co.EnumVariable)\n ]\n if not cv_value_list:\n return None\n meshed_cv_value_list = [x_.reshape(-1, 1) for x_ in np.meshgrid(*cv_value_list)]\n meshed_cv_value = np.concatenate(meshed_cv_value_list, axis=1)\n return meshed_cv_value\n\n ##################\n ## _check ##\n ##################\n\n @staticmethod\n def within_objective_bounds(obj_value: float, objective: co.Objective) -> bool:\n \"\"\"\n check whether violating the objective value var_ranges\n :param pred_dict: dict, keys are objective names,\n values are objective values\n :param obj_bounds: dict, keys are objective names,\n values are lower and upper var_ranges of each objective value\n :return: True or False\n \"\"\"\n within_bounds = True\n if objective.upper is not None:\n within_bounds = obj_value <= objective.upper\n if objective.lower is not None:\n within_bounds = within_bounds and obj_value >= objective.lower\n return within_bounds" }, { "identifier": "Point", "path": "udao/optimization/utils/moo_utils.py", "snippet": "class Point:\n def __init__(self, objs: np.ndarray, vars: Optional[Dict] = None) -> None:\n \"\"\"\n A point in the objective space.\n Variables are optional, and are not specified for imaginary points\n (e.g., utopia and nadir)\n\n Parameters\n ----------\n objs : np.ndarray\n Array of objective values of shape (n_objs,)\n vars :np.ndarray, optional\n Array of variable values of shape (n_vars,), by default None\n \"\"\"\n self.objs = objs\n self.vars = vars\n self.n_objs = objs.shape[0]\n\n def __repr__(self) -> str:\n return f\"Point(objs={self.objs}, vars={self.vars})\"\n\n def __eq__(self, other: \"Point\") -> bool: # type: ignore\n return bool(np.all(self.objs == other.objs) and np.all(self.vars == other.vars))" }, { "identifier": "Rectangle", "path": "udao/optimization/utils/moo_utils.py", "snippet": "class Rectangle:\n def __init__(self, utopia: Point, nadir: Point) -> None:\n \"\"\"\n\n Parameters\n ----------\n utopia : Points\n utopia point\n nadir : Points\n nadir point\n \"\"\"\n\n self.upper_bounds = nadir.objs\n self.lower_bounds = utopia.objs\n self.n_objs = nadir.objs.shape[0]\n self.volume = self.cal_volume(nadir.objs, utopia.objs)\n self.neg_vol = -self.volume\n self.utopia = utopia\n self.nadir = nadir\n\n def __repr__(self) -> str:\n return f\"Rectangle(utopia={self.utopia}, nadir={self.nadir})\"\n\n def cal_volume(self, upper_bounds: np.ndarray, lower_bounds: np.ndarray) -> float:\n \"\"\"\n Calculate the volume of the hyper_rectangle\n\n Parameters\n ----------\n upper_bounds : np.ndarray(\n Array of upper bounds of the hyper_rectangle, of shape (n_objs,)\n lower_bounds : np.ndarrays\n Array of lower bounds of the hyper_rectangle of shape (n_objs,)\n\n Returns\n -------\n float\n volume of the hyper_rectangle\n \"\"\"\n volume = np.abs(np.prod(upper_bounds - lower_bounds))\n return volume\n\n # Override the `__lt__()` function to make `Rectangles`\n # class work with min-heap (referred from VLDB2022)\n def __lt__(self, other: \"Rectangle\") -> bool:\n return self.neg_vol < other.neg_vol\n\n def __eq__(self, other: \"Rectangle\") -> bool: # type: ignore\n return bool(\n np.all(self.upper_bounds == other.upper_bounds)\n and np.all(self.lower_bounds == other.lower_bounds)\n )" } ]
from typing import cast from ....data.handler.data_processor import DataProcessor from ....model.utils.utils import set_deterministic_torch from ...concepts.problem import MOProblem from ...moo.progressive_frontier import ParallelProgressiveFrontier from ...soo.mogd import MOGD from ...utils.moo_utils import Point, Rectangle import numpy as np import pytest import torch as th
12,177
@pytest.fixture def ppf(data_processor: DataProcessor, mogd: MOGD) -> ParallelProgressiveFrontier: ppf = ParallelProgressiveFrontier( params=ParallelProgressiveFrontier.Params( processes=1, n_grids=2, max_iters=4, ), solver=mogd, ) return ppf class TestParallelProgressiveFrontier: def test_create_grid_cells(self, ppf: ParallelProgressiveFrontier) -> None: utopia = Point(np.array([0, 2, 0])) nadir = Point(np.array([4, 10, 1])) grid_rectangles = ppf._create_grid_cells(utopia, nadir, 2, 3) assert len(grid_rectangles) == 8 expected = [ Rectangle( utopia=Point(objs=np.array([0.0, 2.0, 0.0])), nadir=Point(objs=np.array([2.0, 6.0, 0.5])), ), Rectangle( utopia=Point(objs=np.array([0.0, 2.0, 0.5])), nadir=Point(objs=np.array([2.0, 6.0, 1.0])), ), Rectangle( utopia=Point(objs=np.array([0.0, 6.0, 0.0])), nadir=Point(objs=np.array([2.0, 10.0, 0.5])), ), Rectangle( utopia=Point(objs=np.array([0.0, 6.0, 0.5])), nadir=Point(objs=np.array([2.0, 10.0, 1.0])), ), Rectangle( utopia=Point(objs=np.array([2.0, 2.0, 0.0])), nadir=Point(objs=np.array([4.0, 6.0, 0.5])), ), Rectangle( utopia=Point(objs=np.array([2.0, 2.0, 0.5])), nadir=Point(objs=np.array([4.0, 6.0, 1.0])), ), Rectangle( utopia=Point(objs=np.array([2.0, 6.0, 0.0])), nadir=Point(objs=np.array([4.0, 10.0, 0.5])), ), Rectangle( utopia=Point(objs=np.array([2.0, 6.0, 0.5])), nadir=Point(objs=np.array([4.0, 10.0, 1.0])), ), ] for i, rect in enumerate(expected): assert rect == grid_rectangles[i] def test_solve_with_two_objectives(
@pytest.fixture def ppf(data_processor: DataProcessor, mogd: MOGD) -> ParallelProgressiveFrontier: ppf = ParallelProgressiveFrontier( params=ParallelProgressiveFrontier.Params( processes=1, n_grids=2, max_iters=4, ), solver=mogd, ) return ppf class TestParallelProgressiveFrontier: def test_create_grid_cells(self, ppf: ParallelProgressiveFrontier) -> None: utopia = Point(np.array([0, 2, 0])) nadir = Point(np.array([4, 10, 1])) grid_rectangles = ppf._create_grid_cells(utopia, nadir, 2, 3) assert len(grid_rectangles) == 8 expected = [ Rectangle( utopia=Point(objs=np.array([0.0, 2.0, 0.0])), nadir=Point(objs=np.array([2.0, 6.0, 0.5])), ), Rectangle( utopia=Point(objs=np.array([0.0, 2.0, 0.5])), nadir=Point(objs=np.array([2.0, 6.0, 1.0])), ), Rectangle( utopia=Point(objs=np.array([0.0, 6.0, 0.0])), nadir=Point(objs=np.array([2.0, 10.0, 0.5])), ), Rectangle( utopia=Point(objs=np.array([0.0, 6.0, 0.5])), nadir=Point(objs=np.array([2.0, 10.0, 1.0])), ), Rectangle( utopia=Point(objs=np.array([2.0, 2.0, 0.0])), nadir=Point(objs=np.array([4.0, 6.0, 0.5])), ), Rectangle( utopia=Point(objs=np.array([2.0, 2.0, 0.5])), nadir=Point(objs=np.array([4.0, 6.0, 1.0])), ), Rectangle( utopia=Point(objs=np.array([2.0, 6.0, 0.0])), nadir=Point(objs=np.array([4.0, 10.0, 0.5])), ), Rectangle( utopia=Point(objs=np.array([2.0, 6.0, 0.5])), nadir=Point(objs=np.array([4.0, 10.0, 1.0])), ), ] for i, rect in enumerate(expected): assert rect == grid_rectangles[i] def test_solve_with_two_objectives(
self, ppf: ParallelProgressiveFrontier, two_obj_problem: MOProblem
2
2023-12-20 09:10:42+00:00
16k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/connection.py
[ { "identifier": "HTTPHeaderDict", "path": ".venv/Lib/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n _container: typing.MutableMapping[str, list[str]]\n\n def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):\n super().__init__()\n self._container = {} # 'dict' is insert-ordered\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key: str, val: str) -> None:\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n self._container[key.lower()] = [key, val]\n\n def __getitem__(self, key: str) -> str:\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key: str) -> None:\n del self._container[key.lower()]\n\n def __contains__(self, key: object) -> bool:\n if isinstance(key, str):\n return key.lower() in self._container\n return False\n\n def setdefault(self, key: str, default: str = \"\") -> str:\n return super().setdefault(key, default)\n\n def __eq__(self, other: object) -> bool:\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return False\n else:\n other_as_http_header_dict = type(self)(maybe_constructable)\n\n return {k.lower(): v for k, v in self.itermerged()} == {\n k.lower(): v for k, v in other_as_http_header_dict.itermerged()\n }\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n def __len__(self) -> int:\n return len(self._container)\n\n def __iter__(self) -> typing.Iterator[str]:\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def discard(self, key: str) -> None:\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key: str, val: str, *, combine: bool = False) -> None:\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n If this is called with combine=True, instead of adding a new header value\n as a distinct item during iteration, this will instead append the value to\n any existing header value with a comma. If no existing header value exists\n for the key, then the value will simply be added, ignoring the combine parameter.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n >>> list(headers.items())\n [('foo', 'bar'), ('foo', 'baz')]\n >>> headers.add('foo', 'quz', combine=True)\n >>> list(headers.items())\n [('foo', 'bar, baz, quz')]\n \"\"\"\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n # if there are values here, then there is at least the initial\n # key/value pair\n assert len(vals) >= 2\n if combine:\n vals[-1] = vals[-1] + \", \" + val\n else:\n vals.append(val)\n\n def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n f\"extend() takes at most 1 positional arguments ({len(args)} given)\"\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, typing.Mapping):\n for key, val in other.items():\n self.add(key, val)\n elif isinstance(other, typing.Iterable):\n other = typing.cast(typing.Iterable[typing.Tuple[str, str]], other)\n for key, value in other:\n self.add(key, value)\n elif hasattr(other, \"keys\") and hasattr(other, \"__getitem__\"):\n # THIS IS NOT A TYPESAFE BRANCH\n # In this branch, the object has a `keys` attr but is not a Mapping or any of\n # the other types indicated in the method signature. We do some stuff with\n # it as though it partially implements the Mapping interface, but we're not\n # doing that stuff safely AT ALL.\n for key in other.keys():\n self.add(key, other[key])\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n @typing.overload\n def getlist(self, key: str) -> list[str]:\n ...\n\n @typing.overload\n def getlist(self, key: str, default: _DT) -> list[str] | _DT:\n ...\n\n def getlist(\n self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed\n ) -> list[str] | _DT:\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is _Sentinel.not_passed:\n # _DT is unbound; empty list is instance of List[str]\n return []\n # _DT is bound; default is instance of _DT\n return default\n else:\n # _DT may or may not be bound; vals[1:] is instance of List[str], which\n # meets our external interface requirement of `Union[List[str], _DT]`.\n return vals[1:]\n\n def _prepare_for_method_change(self) -> Self:\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({dict(self.itermerged())})\"\n\n def _copy_from(self, other: HTTPHeaderDict) -> None:\n for key in other:\n val = other.getlist(key)\n self._container[key.lower()] = [key, *val]\n\n def copy(self) -> HTTPHeaderDict:\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]\n return HTTPHeaderDictItemView(self)\n\n def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:\n if header_name in self:\n return potential_value in self._container[header_name.lower()][1:]\n return False\n\n def __ior__(self, other: object) -> HTTPHeaderDict:\n # Supports extending a header dict in-place using operator |=\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n self.extend(maybe_constructable)\n return self\n\n def __or__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator |\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = self.copy()\n result.extend(maybe_constructable)\n return result\n\n def __ror__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator | when other is on left side\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = type(self)(maybe_constructable)\n result.extend(self)\n return result" }, { "identifier": "assert_header_parsing", "path": ".venv/Lib/site-packages/urllib3/util/response.py", "snippet": "def assert_header_parsing(headers: httplib.HTTPMessage) -> None:\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param http.client.HTTPMessage headers: Headers to verify.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError(f\"expected httplib.Message, got {type(headers)}.\")\n\n unparsed_data = None\n\n # get_payload is actually email.message.Message.get_payload;\n # we're only interested in the result if it's not a multipart message\n if not headers.is_multipart():\n payload = headers.get_payload()\n\n if isinstance(payload, (bytes, str)):\n unparsed_data = payload\n\n # httplib is assuming a response body is available\n # when parsing headers even when httplib only sends\n # header data to parse_headers() This results in\n # defects on multipart responses in particular.\n # See: https://github.com/urllib3/urllib3/issues/800\n\n # So we ignore the following defects:\n # - StartBoundaryNotFoundDefect:\n # The claimed start boundary was never found.\n # - MultipartInvariantViolationDefect:\n # A message claimed to be a multipart but no subparts were found.\n defects = [\n defect\n for defect in headers.defects\n if not isinstance(\n defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)\n )\n ]\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)" }, { "identifier": "_DEFAULT_TIMEOUT", "path": ".venv/Lib/site-packages/urllib3/util/timeout.py", "snippet": "_DEFAULT_TIMEOUT: Final[_TYPE_DEFAULT] = _TYPE_DEFAULT.token" }, { "identifier": "_TYPE_TIMEOUT", "path": ".venv/Lib/site-packages/urllib3/util/timeout.py", "snippet": "_TYPE_TIMEOUT = typing.Optional[typing.Union[float, _TYPE_DEFAULT]]" }, { "identifier": "Timeout", "path": ".venv/Lib/site-packages/urllib3/util/timeout.py", "snippet": "class Timeout:\n \"\"\"Timeout configuration.\n\n Timeouts can be defined as a default for a pool:\n\n .. code-block:: python\n\n import urllib3\n\n timeout = urllib3.util.Timeout(connect=2.0, read=7.0)\n\n http = urllib3.PoolManager(timeout=timeout)\n\n resp = http.request(\"GET\", \"https://example.com/\")\n\n print(resp.status)\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", timeout=Timeout(10))\n\n Timeouts can be disabled by setting all the parameters to ``None``:\n\n .. code-block:: python\n\n no_timeout = Timeout(connect=None, read=None)\n response = http.request(\"GET\", \"https://example.com/\", timeout=no_timeout)\n\n\n :param total:\n This combines the connect and read timeouts into one; the read timeout\n will be set to the time leftover from the connect attempt. In the\n event that both a connect timeout and a total are specified, or a read\n timeout and a total are specified, the shorter timeout will be applied.\n\n Defaults to None.\n\n :type total: int, float, or None\n\n :param connect:\n The maximum amount of time (in seconds) to wait for a connection\n attempt to a server to succeed. Omitting the parameter will default the\n connect timeout to the system default, probably `the global default\n timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout for connection attempts.\n\n :type connect: int, float, or None\n\n :param read:\n The maximum amount of time (in seconds) to wait between consecutive\n read operations for a response from the server. Omitting the parameter\n will default the read timeout to the system default, probably `the\n global default timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout.\n\n :type read: int, float, or None\n\n .. note::\n\n Many factors can affect the total amount of time for urllib3 to return\n an HTTP response.\n\n For example, Python's DNS resolver does not obey the timeout specified\n on the socket. Other factors that can affect total request time include\n high CPU load, high swap, the program running at a low priority level,\n or other behaviors.\n\n In addition, the read and total timeouts only measure the time between\n read operations on the socket connecting the client and the server,\n not the total amount of time for the request to return a complete\n response. For most requests, the timeout is raised because the server\n has not sent the first byte in the specified time. This is not always\n the case; if a server streams one byte every fifteen seconds, a timeout\n of 20 seconds will not trigger, even though the request will take\n several minutes to complete.\n\n If your goal is to cut off any request after a set amount of wall clock\n time, consider having a second \"watcher\" thread to cut off a slow\n request.\n \"\"\"\n\n #: A sentinel object representing the default timeout value\n DEFAULT_TIMEOUT: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT\n\n def __init__(\n self,\n total: _TYPE_TIMEOUT = None,\n connect: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n read: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n ) -> None:\n self._connect = self._validate_timeout(connect, \"connect\")\n self._read = self._validate_timeout(read, \"read\")\n self.total = self._validate_timeout(total, \"total\")\n self._start_connect: float | None = None\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}(connect={self._connect!r}, read={self._read!r}, total={self.total!r})\"\n\n # __str__ provided for backwards compatibility\n __str__ = __repr__\n\n @staticmethod\n def resolve_default_timeout(timeout: _TYPE_TIMEOUT) -> float | None:\n return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout\n\n @classmethod\n def _validate_timeout(cls, value: _TYPE_TIMEOUT, name: str) -> _TYPE_TIMEOUT:\n \"\"\"Check that a timeout attribute is valid.\n\n :param value: The timeout value to validate\n :param name: The name of the timeout attribute to validate. This is\n used to specify in error messages.\n :return: The validated and casted version of the given value.\n :raises ValueError: If it is a numeric value less than or equal to\n zero, or the type is not an integer, float, or None.\n \"\"\"\n if value is None or value is _DEFAULT_TIMEOUT:\n return value\n\n if isinstance(value, bool):\n raise ValueError(\n \"Timeout cannot be a boolean value. It must \"\n \"be an int, float or None.\"\n )\n try:\n float(value)\n except (TypeError, ValueError):\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n try:\n if value <= 0:\n raise ValueError(\n \"Attempted to set %s timeout to %s, but the \"\n \"timeout cannot be set to a value less \"\n \"than or equal to 0.\" % (name, value)\n )\n except TypeError:\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n return value\n\n @classmethod\n def from_float(cls, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Create a new Timeout from a legacy timeout value.\n\n The timeout value used by httplib.py sets the same timeout on the\n connect(), and recv() socket requests. This creates a :class:`Timeout`\n object that sets the individual timeouts to the ``timeout`` value\n passed to this function.\n\n :param timeout: The legacy timeout value.\n :type timeout: integer, float, :attr:`urllib3.util.Timeout.DEFAULT_TIMEOUT`, or None\n :return: Timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n return Timeout(read=timeout, connect=timeout)\n\n def clone(self) -> Timeout:\n \"\"\"Create a copy of the timeout object\n\n Timeout properties are stored per-pool but each request needs a fresh\n Timeout object to ensure each one has its own start/stop configured.\n\n :return: a copy of the timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n # We can't use copy.deepcopy because that will also create a new object\n # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to\n # detect the user default.\n return Timeout(connect=self._connect, read=self._read, total=self.total)\n\n def start_connect(self) -> float:\n \"\"\"Start the timeout clock, used during a connect() attempt\n\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to start a timer that has been started already.\n \"\"\"\n if self._start_connect is not None:\n raise TimeoutStateError(\"Timeout timer has already been started.\")\n self._start_connect = time.monotonic()\n return self._start_connect\n\n def get_connect_duration(self) -> float:\n \"\"\"Gets the time elapsed since the call to :meth:`start_connect`.\n\n :return: Elapsed time in seconds.\n :rtype: float\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to get duration for a timer that hasn't been started.\n \"\"\"\n if self._start_connect is None:\n raise TimeoutStateError(\n \"Can't get connect duration for timer that has not started.\"\n )\n return time.monotonic() - self._start_connect\n\n @property\n def connect_timeout(self) -> _TYPE_TIMEOUT:\n \"\"\"Get the value to use when setting a connection timeout.\n\n This will be a positive float or integer, the value None\n (never timeout), or the default system timeout.\n\n :return: Connect timeout.\n :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None\n \"\"\"\n if self.total is None:\n return self._connect\n\n if self._connect is None or self._connect is _DEFAULT_TIMEOUT:\n return self.total\n\n return min(self._connect, self.total) # type: ignore[type-var]\n\n @property\n def read_timeout(self) -> float | None:\n \"\"\"Get the value for the read timeout.\n\n This assumes some time has elapsed in the connection timeout and\n computes the read timeout appropriately.\n\n If self.total is set, the read timeout is dependent on the amount of\n time taken by the connect timeout. If the connection time has not been\n established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be\n raised.\n\n :return: Value to use for the read timeout.\n :rtype: int, float or None\n :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`\n has not yet been called on this object.\n \"\"\"\n if (\n self.total is not None\n and self.total is not _DEFAULT_TIMEOUT\n and self._read is not None\n and self._read is not _DEFAULT_TIMEOUT\n ):\n # In case the connect timeout has not yet been established.\n if self._start_connect is None:\n return self._read\n return max(0, min(self.total - self.get_connect_duration(), self._read))\n elif self.total is not None and self.total is not _DEFAULT_TIMEOUT:\n return max(0, self.total - self.get_connect_duration())\n else:\n return self.resolve_default_timeout(self._read)" }, { "identifier": "to_str", "path": ".venv/Lib/site-packages/urllib3/util/util.py", "snippet": "def to_str(\n x: str | bytes, encoding: str | None = None, errors: str | None = None\n) -> str:\n if isinstance(x, str):\n return x\n elif not isinstance(x, bytes):\n raise TypeError(f\"not expecting type {type(x).__name__}\")\n if encoding or errors:\n return x.decode(encoding or \"utf-8\", errors=errors or \"strict\")\n return x.decode()" }, { "identifier": "wait_for_read", "path": ".venv/Lib/site-packages/urllib3/util/wait.py", "snippet": "def wait_for_read(sock: socket.socket, timeout: float | None = None) -> bool:\n \"\"\"Waits for reading to be available on a given socket.\n Returns True if the socket is readable, or False if the timeout expired.\n \"\"\"\n return wait_for_socket(sock, read=True, timeout=timeout)" }, { "identifier": "_TYPE_BODY", "path": ".venv/Lib/site-packages/urllib3/_base_connection.py", "snippet": "_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str]" }, { "identifier": "ProxyConfig", "path": ".venv/Lib/site-packages/urllib3/_base_connection.py", "snippet": "class ProxyConfig(typing.NamedTuple):\n ssl_context: ssl.SSLContext | None\n use_forwarding_for_https: bool\n assert_hostname: None | str | Literal[False]\n assert_fingerprint: str | None" }, { "identifier": "_ResponseOptions", "path": ".venv/Lib/site-packages/urllib3/_base_connection.py", "snippet": "class _ResponseOptions(typing.NamedTuple):\n # TODO: Remove this in favor of a better\n # HTTP request/response lifecycle tracking.\n request_method: str\n request_url: str\n preload_content: bool\n decode_content: bool\n enforce_content_length: bool" }, { "identifier": "__version__", "path": ".venv/Lib/site-packages/urllib3/_version.py", "snippet": "" }, { "identifier": "ConnectTimeoutError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class ConnectTimeoutError(TimeoutError):\n \"\"\"Raised when a socket timeout occurs while connecting to a server\"\"\"" }, { "identifier": "HeaderParsingError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class HeaderParsingError(HTTPError):\n \"\"\"Raised by assert_header_parsing, but we convert it to a log.warning statement.\"\"\"\n\n def __init__(\n self, defects: list[MessageDefect], unparsed_data: bytes | str | None\n ) -> None:\n message = f\"{defects or 'Unknown'}, unparsed data: {unparsed_data!r}\"\n super().__init__(message)" }, { "identifier": "NameResolutionError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class NameResolutionError(NewConnectionError):\n \"\"\"Raised when host name resolution fails.\"\"\"\n\n def __init__(self, host: str, conn: HTTPConnection, reason: socket.gaierror):\n message = f\"Failed to resolve '{host}' ({reason})\"\n super().__init__(conn, message)" }, { "identifier": "NewConnectionError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class NewConnectionError(ConnectTimeoutError, HTTPError):\n \"\"\"Raised when we fail to establish a new connection. Usually ECONNREFUSED.\"\"\"\n\n def __init__(self, conn: HTTPConnection, message: str) -> None:\n self.conn = conn\n super().__init__(f\"{conn}: {message}\")\n\n @property\n def pool(self) -> HTTPConnection:\n warnings.warn(\n \"The 'pool' property is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Use 'conn' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n return self.conn" }, { "identifier": "ProxyError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class ProxyError(HTTPError):\n \"\"\"Raised when the connection to a proxy fails.\"\"\"\n\n # The original error is also available as __cause__.\n original_error: Exception\n\n def __init__(self, message: str, error: Exception) -> None:\n super().__init__(message, error)\n self.original_error = error" }, { "identifier": "SystemTimeWarning", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class SystemTimeWarning(SecurityWarning):\n \"\"\"Warned when system time is suspected to be wrong\"\"\"" }, { "identifier": "connection", "path": ".venv/Lib/site-packages/urllib3/util/connection.py", "snippet": "_TYPE_SOCKET_OPTIONS = typing.Sequence[typing.Tuple[int, int, typing.Union[int, bytes]]]\nHAS_IPV6 = _has_ipv6(\"::1\")\ndef is_connection_dropped(conn: BaseHTTPConnection) -> bool: # Platform-specific\ndef create_connection(\n address: tuple[str, int],\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n socket_options: _TYPE_SOCKET_OPTIONS | None = None,\n) -> socket.socket:\ndef _set_socket_options(\n sock: socket.socket, options: _TYPE_SOCKET_OPTIONS | None\n) -> None:\ndef allowed_gai_family() -> socket.AddressFamily:\ndef _has_ipv6(host: str) -> bool:" }, { "identifier": "ssl_", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "HAS_NEVER_CHECK_COMMON_NAME = False\nIS_PYOPENSSL = False\nALPN_PROTOCOLS = [\"http/1.1\"]\n_TYPE_VERSION_INFO = typing.Tuple[int, int, int, str, int]\nHASHFUNC_MAP = {32: md5, 40: sha1, 64: sha256}\n_SSL_VERSION_TO_TLS_VERSION: dict[int, int] = {}\n HAS_NEVER_CHECK_COMMON_NAME = False\n OP_NO_COMPRESSION = 0x20000 # type: ignore[assignment]\n OP_NO_TICKET = 0x4000 # type: ignore[assignment]\n PROTOCOL_TLS_CLIENT = 16 # type: ignore[assignment]\n_TYPE_PEER_CERT_RET = typing.Union[\"_TYPE_PEER_CERT_RET_DICT\", bytes, None]\ndef _is_bpo_43522_fixed(\n implementation_name: str,\n version_info: _TYPE_VERSION_INFO,\n pypy_version_info: _TYPE_VERSION_INFO | None,\n) -> bool:\ndef _is_has_never_check_common_name_reliable(\n openssl_version: str,\n openssl_version_number: int,\n implementation_name: str,\n version_info: _TYPE_VERSION_INFO,\n pypy_version_info: _TYPE_VERSION_INFO | None,\n) -> bool:\ndef assert_fingerprint(cert: bytes | None, fingerprint: str) -> None:\ndef resolve_cert_reqs(candidate: None | int | str) -> VerifyMode:\ndef resolve_ssl_version(candidate: None | int | str) -> int:\ndef create_urllib3_context(\n ssl_version: int | None = None,\n cert_reqs: int | None = None,\n options: int | None = None,\n ciphers: str | None = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n) -> ssl.SSLContext:\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = ...,\n certfile: str | None = ...,\n cert_reqs: int | None = ...,\n ca_certs: str | None = ...,\n server_hostname: str | None = ...,\n ssl_version: int | None = ...,\n ciphers: str | None = ...,\n ssl_context: ssl.SSLContext | None = ...,\n ca_cert_dir: str | None = ...,\n key_password: str | None = ...,\n ca_cert_data: None | str | bytes = ...,\n tls_in_tls: Literal[False] = ...,\n) -> ssl.SSLSocket:\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = ...,\n certfile: str | None = ...,\n cert_reqs: int | None = ...,\n ca_certs: str | None = ...,\n server_hostname: str | None = ...,\n ssl_version: int | None = ...,\n ciphers: str | None = ...,\n ssl_context: ssl.SSLContext | None = ...,\n ca_cert_dir: str | None = ...,\n key_password: str | None = ...,\n ca_cert_data: None | str | bytes = ...,\n tls_in_tls: bool = ...,\n) -> ssl.SSLSocket | SSLTransportType:\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = None,\n certfile: str | None = None,\n cert_reqs: int | None = None,\n ca_certs: str | None = None,\n server_hostname: str | None = None,\n ssl_version: int | None = None,\n ciphers: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ca_cert_dir: str | None = None,\n key_password: str | None = None,\n ca_cert_data: None | str | bytes = None,\n tls_in_tls: bool = False,\n) -> ssl.SSLSocket | SSLTransportType:\ndef is_ipaddress(hostname: str | bytes) -> bool:\ndef _is_key_file_encrypted(key_file: str) -> bool:\ndef _ssl_wrap_socket_impl(\n sock: socket.socket,\n ssl_context: ssl.SSLContext,\n tls_in_tls: bool,\n server_hostname: str | None = None,\n) -> ssl.SSLSocket | SSLTransportType:\n class _TYPE_PEER_CERT_RET_DICT(TypedDict, total=False):" }, { "identifier": "SKIP_HEADER", "path": ".venv/Lib/site-packages/urllib3/util/request.py", "snippet": "SKIP_HEADER = \"@@@SKIP_HEADER@@@\"" }, { "identifier": "SKIPPABLE_HEADERS", "path": ".venv/Lib/site-packages/urllib3/util/request.py", "snippet": "SKIPPABLE_HEADERS = frozenset([\"accept-encoding\", \"host\", \"user-agent\"])" }, { "identifier": "body_to_chunks", "path": ".venv/Lib/site-packages/urllib3/util/request.py", "snippet": "def body_to_chunks(\n body: typing.Any | None, method: str, blocksize: int\n) -> ChunksAndContentLength:\n \"\"\"Takes the HTTP request method, body, and blocksize and\n transforms them into an iterable of chunks to pass to\n socket.sendall() and an optional 'Content-Length' header.\n\n A 'Content-Length' of 'None' indicates the length of the body\n can't be determined so should use 'Transfer-Encoding: chunked'\n for framing instead.\n \"\"\"\n\n chunks: typing.Iterable[bytes] | None\n content_length: int | None\n\n # No body, we need to make a recommendation on 'Content-Length'\n # based on whether that request method is expected to have\n # a body or not.\n if body is None:\n chunks = None\n if method.upper() not in _METHODS_NOT_EXPECTING_BODY:\n content_length = 0\n else:\n content_length = None\n\n # Bytes or strings become bytes\n elif isinstance(body, (str, bytes)):\n chunks = (to_bytes(body),)\n content_length = len(chunks[0])\n\n # File-like object, TODO: use seek() and tell() for length?\n elif hasattr(body, \"read\"):\n\n def chunk_readable() -> typing.Iterable[bytes]:\n nonlocal body, blocksize\n encode = isinstance(body, io.TextIOBase)\n while True:\n datablock = body.read(blocksize)\n if not datablock:\n break\n if encode:\n datablock = datablock.encode(\"iso-8859-1\")\n yield datablock\n\n chunks = chunk_readable()\n content_length = None\n\n # Otherwise we need to start checking via duck-typing.\n else:\n try:\n # Check if the body implements the buffer API.\n mv = memoryview(body)\n except TypeError:\n try:\n # Check if the body is an iterable\n chunks = iter(body)\n content_length = None\n except TypeError:\n raise TypeError(\n f\"'body' must be a bytes-like object, file-like \"\n f\"object, or iterable. Instead was {body!r}\"\n ) from None\n else:\n # Since it implements the buffer API can be passed directly to socket.sendall()\n chunks = (body,)\n content_length = mv.nbytes\n\n return ChunksAndContentLength(chunks=chunks, content_length=content_length)" }, { "identifier": "assert_fingerprint", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def assert_fingerprint(cert: bytes | None, fingerprint: str) -> None:\n \"\"\"\n Checks if given fingerprint matches the supplied certificate.\n\n :param cert:\n Certificate as bytes object.\n :param fingerprint:\n Fingerprint as string of hexdigits, can be interspersed by colons.\n \"\"\"\n\n if cert is None:\n raise SSLError(\"No certificate for the peer.\")\n\n fingerprint = fingerprint.replace(\":\", \"\").lower()\n digest_length = len(fingerprint)\n hashfunc = HASHFUNC_MAP.get(digest_length)\n if not hashfunc:\n raise SSLError(f\"Fingerprint of invalid length: {fingerprint}\")\n\n # We need encode() here for py32; works on py2 and p33.\n fingerprint_bytes = unhexlify(fingerprint.encode())\n\n cert_digest = hashfunc(cert).digest()\n\n if not hmac.compare_digest(cert_digest, fingerprint_bytes):\n raise SSLError(\n f'Fingerprints did not match. Expected \"{fingerprint}\", got \"{cert_digest.hex()}\"'\n )" }, { "identifier": "create_urllib3_context", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def create_urllib3_context(\n ssl_version: int | None = None,\n cert_reqs: int | None = None,\n options: int | None = None,\n ciphers: str | None = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n) -> ssl.SSLContext:\n \"\"\"Creates and configures an :class:`ssl.SSLContext` instance for use with urllib3.\n\n :param ssl_version:\n The desired protocol version to use. This will default to\n PROTOCOL_SSLv23 which will negotiate the highest protocol that both\n the server and your installation of OpenSSL support.\n\n This parameter is deprecated instead use 'ssl_minimum_version'.\n :param ssl_minimum_version:\n The minimum version of TLS to be used. Use the 'ssl.TLSVersion' enum for specifying the value.\n :param ssl_maximum_version:\n The maximum version of TLS to be used. Use the 'ssl.TLSVersion' enum for specifying the value.\n Not recommended to set to anything other than 'ssl.TLSVersion.MAXIMUM_SUPPORTED' which is the\n default value.\n :param cert_reqs:\n Whether to require the certificate verification. This defaults to\n ``ssl.CERT_REQUIRED``.\n :param options:\n Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,\n ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``, and ``ssl.OP_NO_TICKET``.\n :param ciphers:\n Which cipher suites to allow the server to select. Defaults to either system configured\n ciphers if OpenSSL 1.1.1+, otherwise uses a secure default set of ciphers.\n :returns:\n Constructed SSLContext object with specified options\n :rtype: SSLContext\n \"\"\"\n if SSLContext is None:\n raise TypeError(\"Can't create an SSLContext object without an ssl module\")\n\n # This means 'ssl_version' was specified as an exact value.\n if ssl_version not in (None, PROTOCOL_TLS, PROTOCOL_TLS_CLIENT):\n # Disallow setting 'ssl_version' and 'ssl_minimum|maximum_version'\n # to avoid conflicts.\n if ssl_minimum_version is not None or ssl_maximum_version is not None:\n raise ValueError(\n \"Can't specify both 'ssl_version' and either \"\n \"'ssl_minimum_version' or 'ssl_maximum_version'\"\n )\n\n # 'ssl_version' is deprecated and will be removed in the future.\n else:\n # Use 'ssl_minimum_version' and 'ssl_maximum_version' instead.\n ssl_minimum_version = _SSL_VERSION_TO_TLS_VERSION.get(\n ssl_version, TLSVersion.MINIMUM_SUPPORTED\n )\n ssl_maximum_version = _SSL_VERSION_TO_TLS_VERSION.get(\n ssl_version, TLSVersion.MAXIMUM_SUPPORTED\n )\n\n # This warning message is pushing users to use 'ssl_minimum_version'\n # instead of both min/max. Best practice is to only set the minimum version and\n # keep the maximum version to be it's default value: 'TLSVersion.MAXIMUM_SUPPORTED'\n warnings.warn(\n \"'ssl_version' option is deprecated and will be \"\n \"removed in urllib3 v2.1.0. Instead use 'ssl_minimum_version'\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n\n # PROTOCOL_TLS is deprecated in Python 3.10 so we always use PROTOCOL_TLS_CLIENT\n context = SSLContext(PROTOCOL_TLS_CLIENT)\n\n if ssl_minimum_version is not None:\n context.minimum_version = ssl_minimum_version\n else: # Python <3.10 defaults to 'MINIMUM_SUPPORTED' so explicitly set TLSv1.2 here\n context.minimum_version = TLSVersion.TLSv1_2\n\n if ssl_maximum_version is not None:\n context.maximum_version = ssl_maximum_version\n\n # Unless we're given ciphers defer to either system ciphers in\n # the case of OpenSSL 1.1.1+ or use our own secure default ciphers.\n if ciphers:\n context.set_ciphers(ciphers)\n\n # Setting the default here, as we may have no ssl module on import\n cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs\n\n if options is None:\n options = 0\n # SSLv2 is easily broken and is considered harmful and dangerous\n options |= OP_NO_SSLv2\n # SSLv3 has several problems and is now dangerous\n options |= OP_NO_SSLv3\n # Disable compression to prevent CRIME attacks for OpenSSL 1.0+\n # (issue #309)\n options |= OP_NO_COMPRESSION\n # TLSv1.2 only. Unless set explicitly, do not request tickets.\n # This may save some bandwidth on wire, and although the ticket is encrypted,\n # there is a risk associated with it being on wire,\n # if the server is not rotating its ticketing keys properly.\n options |= OP_NO_TICKET\n\n context.options |= options\n\n # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is\n # necessary for conditional client cert authentication with TLS 1.3.\n # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older\n # versions of Python. We only enable if certificate verification is enabled to work\n # around Python issue #37428\n # See: https://bugs.python.org/issue37428\n if (\n cert_reqs == ssl.CERT_REQUIRED\n and getattr(context, \"post_handshake_auth\", None) is not None\n ):\n context.post_handshake_auth = True\n\n # The order of the below lines setting verify_mode and check_hostname\n # matter due to safe-guards SSLContext has to prevent an SSLContext with\n # check_hostname=True, verify_mode=NONE/OPTIONAL.\n # We always set 'check_hostname=False' for pyOpenSSL so we rely on our own\n # 'ssl.match_hostname()' implementation.\n if cert_reqs == ssl.CERT_REQUIRED and not IS_PYOPENSSL:\n context.verify_mode = cert_reqs\n context.check_hostname = True\n else:\n context.check_hostname = False\n context.verify_mode = cert_reqs\n\n try:\n context.hostname_checks_common_name = False\n except AttributeError: # Defensive: for CPython < 3.8.9 and 3.9.3; for PyPy < 7.3.8\n pass\n\n # Enable logging of TLS session keys via defacto standard environment variable\n # 'SSLKEYLOGFILE', if the feature is available (Python 3.8+). Skip empty values.\n if hasattr(context, \"keylog_filename\"):\n sslkeylogfile = os.environ.get(\"SSLKEYLOGFILE\")\n if sslkeylogfile:\n context.keylog_filename = sslkeylogfile\n\n return context" }, { "identifier": "is_ipaddress", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def is_ipaddress(hostname: str | bytes) -> bool:\n \"\"\"Detects whether the hostname given is an IPv4 or IPv6 address.\n Also detects IPv6 addresses with Zone IDs.\n\n :param str hostname: Hostname to examine.\n :return: True if the hostname is an IP address, False otherwise.\n \"\"\"\n if isinstance(hostname, bytes):\n # IDN A-label bytes are ASCII compatible.\n hostname = hostname.decode(\"ascii\")\n return bool(_IPV4_RE.match(hostname) or _BRACELESS_IPV6_ADDRZ_RE.match(hostname))" }, { "identifier": "resolve_cert_reqs", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def resolve_cert_reqs(candidate: None | int | str) -> VerifyMode:\n \"\"\"\n Resolves the argument to a numeric constant, which can be passed to\n the wrap_socket function/method from the ssl module.\n Defaults to :data:`ssl.CERT_REQUIRED`.\n If given a string it is assumed to be the name of the constant in the\n :mod:`ssl` module or its abbreviation.\n (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.\n If it's neither `None` nor a string we assume it is already the numeric\n constant which can directly be passed to wrap_socket.\n \"\"\"\n if candidate is None:\n return CERT_REQUIRED\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, \"CERT_\" + candidate)\n return res # type: ignore[no-any-return]\n\n return candidate # type: ignore[return-value]" }, { "identifier": "resolve_ssl_version", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def resolve_ssl_version(candidate: None | int | str) -> int:\n \"\"\"\n like resolve_cert_reqs\n \"\"\"\n if candidate is None:\n return PROTOCOL_TLS\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, \"PROTOCOL_\" + candidate)\n return typing.cast(int, res)\n\n return candidate" }, { "identifier": "ssl_wrap_socket", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "@typing.overload\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = ...,\n certfile: str | None = ...,\n cert_reqs: int | None = ...,\n ca_certs: str | None = ...,\n server_hostname: str | None = ...,\n ssl_version: int | None = ...,\n ciphers: str | None = ...,\n ssl_context: ssl.SSLContext | None = ...,\n ca_cert_dir: str | None = ...,\n key_password: str | None = ...,\n ca_cert_data: None | str | bytes = ...,\n tls_in_tls: Literal[False] = ...,\n) -> ssl.SSLSocket:\n ..." }, { "identifier": "CertificateError", "path": ".venv/Lib/site-packages/urllib3/util/ssl_match_hostname.py", "snippet": "class CertificateError(ValueError):\n pass" }, { "identifier": "match_hostname", "path": ".venv/Lib/site-packages/urllib3/util/ssl_match_hostname.py", "snippet": "def match_hostname(\n cert: _TYPE_PEER_CERT_RET_DICT | None,\n hostname: str,\n hostname_checks_common_name: bool = False,\n) -> None:\n \"\"\"Verify that *cert* (in decoded format as returned by\n SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125\n rules are followed, but IP addresses are not accepted for *hostname*.\n\n CertificateError is raised on failure. On success, the function\n returns nothing.\n \"\"\"\n if not cert:\n raise ValueError(\n \"empty or no certificate, match_hostname needs a \"\n \"SSL socket or SSL context with either \"\n \"CERT_OPTIONAL or CERT_REQUIRED\"\n )\n try:\n # Divergence from upstream: ipaddress can't handle byte str\n #\n # The ipaddress module shipped with Python < 3.9 does not support\n # scoped IPv6 addresses so we unconditionally strip the Zone IDs for\n # now. Once we drop support for Python 3.9 we can remove this branch.\n if \"%\" in hostname:\n host_ip = ipaddress.ip_address(hostname[: hostname.rfind(\"%\")])\n else:\n host_ip = ipaddress.ip_address(hostname)\n\n except ValueError:\n # Not an IP address (common case)\n host_ip = None\n dnsnames = []\n san: tuple[tuple[str, str], ...] = cert.get(\"subjectAltName\", ())\n key: str\n value: str\n for key, value in san:\n if key == \"DNS\":\n if host_ip is None and _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n elif key == \"IP Address\":\n if host_ip is not None and _ipaddress_match(value, host_ip):\n return\n dnsnames.append(value)\n\n # We only check 'commonName' if it's enabled and we're not verifying\n # an IP address. IP addresses aren't valid within 'commonName'.\n if hostname_checks_common_name and host_ip is None and not dnsnames:\n for sub in cert.get(\"subject\", ()):\n for key, value in sub:\n if key == \"commonName\":\n if _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n\n if len(dnsnames) > 1:\n raise CertificateError(\n \"hostname %r \"\n \"doesn't match either of %s\" % (hostname, \", \".join(map(repr, dnsnames)))\n )\n elif len(dnsnames) == 1:\n raise CertificateError(f\"hostname {hostname!r} doesn't match {dnsnames[0]!r}\")\n else:\n raise CertificateError(\"no appropriate subjectAltName fields were found\")" }, { "identifier": "Url", "path": ".venv/Lib/site-packages/urllib3/util/url.py", "snippet": "class Url(\n typing.NamedTuple(\n \"Url\",\n [\n (\"scheme\", typing.Optional[str]),\n (\"auth\", typing.Optional[str]),\n (\"host\", typing.Optional[str]),\n (\"port\", typing.Optional[int]),\n (\"path\", typing.Optional[str]),\n (\"query\", typing.Optional[str]),\n (\"fragment\", typing.Optional[str]),\n ],\n )\n):\n \"\"\"\n Data structure for representing an HTTP URL. Used as a return value for\n :func:`parse_url`. Both the scheme and host are normalized as they are\n both case-insensitive according to RFC 3986.\n \"\"\"\n\n def __new__( # type: ignore[no-untyped-def]\n cls,\n scheme: str | None = None,\n auth: str | None = None,\n host: str | None = None,\n port: int | None = None,\n path: str | None = None,\n query: str | None = None,\n fragment: str | None = None,\n ):\n if path and not path.startswith(\"/\"):\n path = \"/\" + path\n if scheme is not None:\n scheme = scheme.lower()\n return super().__new__(cls, scheme, auth, host, port, path, query, fragment)\n\n @property\n def hostname(self) -> str | None:\n \"\"\"For backwards-compatibility with urlparse. We're nice like that.\"\"\"\n return self.host\n\n @property\n def request_uri(self) -> str:\n \"\"\"Absolute path including the query string.\"\"\"\n uri = self.path or \"/\"\n\n if self.query is not None:\n uri += \"?\" + self.query\n\n return uri\n\n @property\n def authority(self) -> str | None:\n \"\"\"\n Authority component as defined in RFC 3986 3.2.\n This includes userinfo (auth), host and port.\n\n i.e.\n userinfo@host:port\n \"\"\"\n userinfo = self.auth\n netloc = self.netloc\n if netloc is None or userinfo is None:\n return netloc\n else:\n return f\"{userinfo}@{netloc}\"\n\n @property\n def netloc(self) -> str | None:\n \"\"\"\n Network location including host and port.\n\n If you need the equivalent of urllib.parse's ``netloc``,\n use the ``authority`` property instead.\n \"\"\"\n if self.host is None:\n return None\n if self.port:\n return f\"{self.host}:{self.port}\"\n return self.host\n\n @property\n def url(self) -> str:\n \"\"\"\n Convert self into a url\n\n This function should more or less round-trip with :func:`.parse_url`. The\n returned url may not be exactly the same as the url inputted to\n :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls\n with a blank port will have : removed).\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n U = urllib3.util.parse_url(\"https://google.com/mail/\")\n\n print(U.url)\n # \"https://google.com/mail/\"\n\n print( urllib3.util.Url(\"https\", \"username:password\",\n \"host.com\", 80, \"/path\", \"query\", \"fragment\"\n ).url\n )\n # \"https://username:[email protected]:80/path?query#fragment\"\n \"\"\"\n scheme, auth, host, port, path, query, fragment = self\n url = \"\"\n\n # We use \"is not None\" we want things to happen with empty strings (or 0 port)\n if scheme is not None:\n url += scheme + \"://\"\n if auth is not None:\n url += auth + \"@\"\n if host is not None:\n url += host\n if port is not None:\n url += \":\" + str(port)\n if path is not None:\n url += path\n if query is not None:\n url += \"?\" + query\n if fragment is not None:\n url += \"#\" + fragment\n\n return url\n\n def __str__(self) -> str:\n return self.url" } ]
import datetime import logging import os import re import socket import sys import typing import warnings import ssl from http.client import HTTPConnection as _HTTPConnection from http.client import HTTPException as HTTPException # noqa: F401 from http.client import ResponseNotReady from socket import timeout as SocketTimeout from typing import Literal from .response import HTTPResponse from .util.ssl_ import _TYPE_PEER_CERT_RET_DICT from .util.ssltransport import SSLTransport from ._collections import HTTPHeaderDict from .util.response import assert_header_parsing from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT, Timeout from .util.util import to_str from .util.wait import wait_for_read from ._base_connection import _TYPE_BODY from ._base_connection import ProxyConfig as ProxyConfig from ._base_connection import _ResponseOptions as _ResponseOptions from ._version import __version__ from .exceptions import ( ConnectTimeoutError, HeaderParsingError, NameResolutionError, NewConnectionError, ProxyError, SystemTimeWarning, ) from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection, ssl_ from .util.request import body_to_chunks from .util.ssl_ import assert_fingerprint as _assert_fingerprint from .util.ssl_ import ( create_urllib3_context, is_ipaddress, resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, ) from .util.ssl_match_hostname import CertificateError, match_hostname from .util.url import Url from .response import HTTPResponse
13,844
from __future__ import annotations if typing.TYPE_CHECKING: try: # Compiled with SSL? BaseSSLError = ssl.SSLError except (ImportError, AttributeError): ssl = None # type: ignore[assignment] class BaseSSLError(BaseException): # type: ignore[no-redef] pass # Not a no-op, we're adding this to the namespace so it can be imported. ConnectionError = ConnectionError BrokenPipeError = BrokenPipeError log = logging.getLogger(__name__) port_by_scheme = {"http": 80, "https": 443} # When it comes time to update this value as a part of regular maintenance # (ie test_recent_date is failing) update it to ~6 months before the current date. RECENT_DATE = datetime.date(2022, 1, 1) _CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") _HAS_SYS_AUDIT = hasattr(sys, "audit") class HTTPConnection(_HTTPConnection): """ Based on :class:`http.client.HTTPConnection` but provides an extra constructor backwards-compatibility layer between older and newer Pythons. Additional keyword parameters are used to configure attributes of the connection. Accepted parameters include: - ``source_address``: Set the source address for the current connection. - ``socket_options``: Set specific options on the underlying socket. If not specified, then defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. For example, if you wish to enable TCP Keep Alive in addition to the defaults, you might pass: .. code-block:: python HTTPConnection.default_socket_options + [ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), ] Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). """ default_port: typing.ClassVar[int] = port_by_scheme["http"] # type: ignore[misc] #: Disable Nagle's algorithm by default. #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
from __future__ import annotations if typing.TYPE_CHECKING: try: # Compiled with SSL? BaseSSLError = ssl.SSLError except (ImportError, AttributeError): ssl = None # type: ignore[assignment] class BaseSSLError(BaseException): # type: ignore[no-redef] pass # Not a no-op, we're adding this to the namespace so it can be imported. ConnectionError = ConnectionError BrokenPipeError = BrokenPipeError log = logging.getLogger(__name__) port_by_scheme = {"http": 80, "https": 443} # When it comes time to update this value as a part of regular maintenance # (ie test_recent_date is failing) update it to ~6 months before the current date. RECENT_DATE = datetime.date(2022, 1, 1) _CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") _HAS_SYS_AUDIT = hasattr(sys, "audit") class HTTPConnection(_HTTPConnection): """ Based on :class:`http.client.HTTPConnection` but provides an extra constructor backwards-compatibility layer between older and newer Pythons. Additional keyword parameters are used to configure attributes of the connection. Accepted parameters include: - ``source_address``: Set the source address for the current connection. - ``socket_options``: Set specific options on the underlying socket. If not specified, then defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. For example, if you wish to enable TCP Keep Alive in addition to the defaults, you might pass: .. code-block:: python HTTPConnection.default_socket_options + [ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), ] Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). """ default_port: typing.ClassVar[int] = port_by_scheme["http"] # type: ignore[misc] #: Disable Nagle's algorithm by default. #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options: typing.ClassVar[connection._TYPE_SOCKET_OPTIONS] = [
17
2023-12-16 04:12:01+00:00
16k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n text -- text to be displayed\n header -- flag to indicate this cell is a header cell.\n \"\"\"\n self.text = text\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell.\"\"\"\n if self.header:\n return '<th>%s</th>' % (self.text)\n else:\n return '<td>%s</td>' % (self.text)" }, { "identifier": "SimpleTableImage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableImage(object):\n \"\"\"A table class to create table cells with an image.\n\n Example:\n cell = SimpleTableImage('images/image_1.jpg')\n \"\"\"\n\n def __init__(self, image_file, width=None, height=None):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n image_file -- relative filepath to image file to display.\n width -- (optional) width of the image in pixels\n height -- (optional) height of the image in pixels\n \"\"\"\n self.image_file = image_file\n if width:\n self.width = round(width)\n else:\n self.width = width\n if height:\n self.height = round(height)\n else:\n self.height = height\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell with the image.\"\"\"\n safe_filename = quote(self.image_file)\n output = '<a href=\"%s\" target=\"_blank\">' % (safe_filename)\n output += '<img src=\"%s\"' % (safe_filename)\n if self.height:\n output += ' height=\"%s\"' % (self.height)\n if self.width:\n output += ' width=\"%s\"' % (self.width)\n output += '></a>'\n\n return output" }, { "identifier": "SimpleTableRow", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableRow(object):\n \"\"\"A table class to create table rows, populated by table cells.\n\n Example:\n # Row from list\n row = SimpleTableRow(['Hello,', 'world!'])\n\n # Row from SimpleTableCell\n cell1 = SimpleTableCell('Hello,')\n cell2 = SimpleTableCell('world!')\n row = SimpleTableRow([cell1, cell2])\n \"\"\"\n\n def __init__(self, cells=None, header=False):\n \"\"\"Table row constructor.\n\n Keyword arguments:\n cells -- iterable of SimpleTableCell (default None)\n header -- flag to indicate this row is a header row.\n if the cells are SimpleTableCell, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n \"\"\"\n cells = cells or []\n if isinstance(cells[0], SimpleTableCell):\n self.cells = cells\n else:\n self.cells = [SimpleTableCell(cell, header=header) for cell in cells]\n\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table row and its cells as a string.\"\"\"\n row = []\n\n row.append('<tr>')\n\n for cell in self.cells:\n row.append(str(cell))\n\n row.append('</tr>')\n\n return '\\n'.join(row)\n\n def __iter__(self):\n \"\"\"Iterate through row cells\"\"\"\n for cell in self.cells:\n yield cell\n\n def add_cell(self, cell):\n \"\"\"Add a SimpleTableCell object to the list of cells.\"\"\"\n self.cells.append(cell)\n\n def add_cells(self, cells):\n \"\"\"Add a list of SimpleTableCell objects to the list of cells.\"\"\"\n for cell in cells:\n self.cells.append(cell)" }, { "identifier": "SimpleTable", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTable(object):\n \"\"\"A table class to create HTML tables, populated by HTML table rows.\n\n Example:\n # Table from lists\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']])\n\n # Table with header row\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']],\n header_row=['Header1', 'Header2', 'Header3'])\n\n # Table from SimpleTableRow\n rows = SimpleTableRow(['Hello,', 'world!'])\n table = SimpleTable(rows)\n \"\"\"\n\n def __init__(self, rows=None, header_row=None, css_class=None):\n \"\"\"Table constructor.\n\n Keyword arguments:\n rows -- iterable of SimpleTableRow\n header_row -- row that will be displayed at the beginning of the table.\n if this row is SimpleTableRow, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n css_class -- table CSS class\n \"\"\"\n rows = rows or []\n if isinstance(rows[0], SimpleTableRow):\n self.rows = rows\n else:\n self.rows = [SimpleTableRow(row) for row in rows]\n\n if header_row is None:\n self.header_row = None\n elif isinstance(header_row, SimpleTableRow):\n self.header_row = header_row\n else:\n self.header_row = SimpleTableRow(header_row, header=True)\n\n self.css_class = css_class\n\n def __str__(self):\n \"\"\"Return the HTML code for the table as a string.\"\"\"\n table = []\n\n if self.css_class:\n table.append('<table class=%s>' % self.css_class)\n else:\n table.append('<table>')\n\n if self.header_row:\n table.append(str(self.header_row))\n\n for row in self.rows:\n table.append(str(row))\n\n table.append('</table>')\n\n return '\\n'.join(table)\n\n def __iter__(self):\n \"\"\"Iterate through table rows\"\"\"\n for row in self.rows:\n yield row\n\n def add_row(self, row):\n \"\"\"Add a SimpleTableRow object to the list of rows.\"\"\"\n self.rows.append(row)\n\n def add_rows(self, rows):\n \"\"\"Add a list of SimpleTableRow objects to the list of rows.\"\"\"\n for row in rows:\n self.rows.append(row)" }, { "identifier": "HTMLPage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class HTMLPage(object):\n \"\"\"A class to create HTML pages containing CSS and tables.\"\"\"\n\n def __init__(self, tables=None, css=None, encoding=\"utf-8\"):\n \"\"\"HTML page constructor.\n\n Keyword arguments:\n tables -- List of SimpleTable objects\n css -- Cascading Style Sheet specification that is appended before the\n table string\n encoding -- Characters encoding. Default: UTF-8\n \"\"\"\n self.tables = tables or []\n self.css = css\n self.encoding = encoding\n\n def __str__(self):\n \"\"\"Return the HTML page as a string.\"\"\"\n page = []\n\n if self.css:\n page.append('<style type=\"text/css\">\\n%s\\n</style>' % self.css)\n\n # Set encoding\n page.append('<meta http-equiv=\"Content-Type\" content=\"text/html;'\n 'charset=%s\">' % self.encoding)\n\n for table in self.tables:\n page.append(str(table))\n page.append('<br />')\n\n return '\\n'.join(page)\n\n def __iter__(self):\n \"\"\"Iterate through tables\"\"\"\n for table in self.tables:\n yield table\n\n def save(self, filename):\n \"\"\"Save HTML page to a file using the proper encoding\"\"\"\n with codecs.open(filename, 'w', self.encoding) as outfile:\n for line in str(self):\n outfile.write(line)\n\n def add_table(self, table):\n \"\"\"Add a SimpleTable to the page list of tables\"\"\"\n self.tables.append(table)" }, { "identifier": "tqdm", "path": "backend/scenedetect/platform.py", "snippet": "class FakeTqdmObject:\nclass FakeTqdmLoggingRedirect:\nclass CommandTooLong(Exception):\nclass Template(string.Template):\n def __init__(self, **kawrgs):\n def update(self, n=1):\n def close(self):\n def set_description(self, desc=None, refresh=True):\n def __init__(self, **kawrgs):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\ndef get_cv2_imwrite_params() -> Dict[str, Union[int, None]]:\n def _get_cv2_param(param_name: str) -> Union[int, None]:\ndef get_file_name(file_path: AnyStr, include_extension=True) -> AnyStr:\ndef get_and_create_path(file_path: AnyStr, output_directory: Optional[AnyStr] = None) -> AnyStr:\ndef init_logger(log_level: int = logging.INFO,\n show_stdout: bool = False,\n log_file: Optional[str] = None):\ndef invoke_command(args: List[str]) -> int:\ndef get_ffmpeg_path() -> Optional[str]:\ndef get_ffmpeg_version() -> Optional[str]:\ndef get_mkvmerge_version() -> Optional[str]:\ndef get_system_version_info() -> str:\n INFO_TEMPLATE = '[PySceneDetect] %(message)s'\n DEBUG_TEMPLATE = '%(levelname)s: %(module)s.%(funcName)s(): %(message)s'" }, { "identifier": "FrameTimecode", "path": "backend/scenedetect/frame_timecode.py", "snippet": "class FrameTimecode:\n \"\"\"Object for frame-based timecodes, using the video framerate to compute back and\n forth between frame number and seconds/timecode.\n\n A timecode is valid only if it complies with one of the following three types/formats:\n\n 1. Timecode as `str` in the form 'HH:MM:SS[.nnn]' (`'01:23:45'` or `'01:23:45.678'`)\n 2. Number of seconds as `float`, or `str` in form 'Ss' or 'S.SSSs' (`'2s'` or `'2.3456s'`)\n 3. Exact number of frames as `int`, or `str` in form NNNNN (`123` or `'123'`)\n \"\"\"\n\n def __init__(self,\n timecode: Union[int, float, str, 'FrameTimecode'] = None,\n fps: Union[int, float, str, 'FrameTimecode'] = None):\n \"\"\"\n Arguments:\n timecode: A frame number (int), number of seconds (float), or timecode (str in\n the form `'HH:MM:SS'` or `'HH:MM:SS.nnn'`).\n fps: The framerate or FrameTimecode to use as a time base for all arithmetic.\n Raises:\n TypeError: Thrown if either `timecode` or `fps` are unsupported types.\n ValueError: Thrown when specifying a negative timecode or framerate.\n \"\"\"\n # The following two properties are what is used to keep track of time\n # in a frame-specific manner. Note that once the framerate is set,\n # the value should never be modified (only read if required).\n # TODO(v1.0): Make these actual @properties.\n self.framerate = None\n self.frame_num = None\n\n # Copy constructor. Only the timecode argument is used in this case.\n if isinstance(timecode, FrameTimecode):\n self.framerate = timecode.framerate\n self.frame_num = timecode.frame_num\n if fps is not None:\n raise TypeError('Framerate cannot be overwritten when copying a FrameTimecode.')\n else:\n # Ensure other arguments are consistent with API.\n if fps is None:\n raise TypeError('Framerate (fps) is a required argument.')\n if isinstance(fps, FrameTimecode):\n fps = fps.framerate\n\n # Process the given framerate, if it was not already set.\n if not isinstance(fps, (int, float)):\n raise TypeError('Framerate must be of type int/float.')\n if (isinstance(fps, int) and not fps > 0) or (isinstance(fps, float)\n and not fps >= MAX_FPS_DELTA):\n raise ValueError('Framerate must be positive and greater than zero.')\n self.framerate = float(fps)\n\n # Process the timecode value, storing it as an exact number of frames.\n if isinstance(timecode, str):\n self.frame_num = self._parse_timecode_string(timecode)\n else:\n self.frame_num = self._parse_timecode_number(timecode)\n\n # TODO(v1.0): Add a `frame` property to replace the existing one and deprecate this getter.\n def get_frames(self) -> int:\n \"\"\"Get the current time/position in number of frames. This is the\n equivalent of accessing the self.frame_num property (which, along\n with the specified framerate, forms the base for all of the other\n time measurement calculations, e.g. the :meth:`get_seconds` method).\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 10``).\n\n Returns:\n int: The current time in frames (the current frame number).\n \"\"\"\n return self.frame_num\n\n # TODO(v1.0): Add a `framerate` property to replace the existing one and deprecate this getter.\n def get_framerate(self) -> float:\n \"\"\"Get Framerate: Returns the framerate used by the FrameTimecode object.\n\n Returns:\n float: Framerate of the current FrameTimecode object, in frames per second.\n \"\"\"\n return self.framerate\n\n def equal_framerate(self, fps) -> bool:\n \"\"\"Equal Framerate: Determines if the passed framerate is equal to that of this object.\n\n Arguments:\n fps: Framerate to compare against within the precision constant defined in this module\n (see :data:`MAX_FPS_DELTA`).\n Returns:\n bool: True if passed fps matches the FrameTimecode object's framerate, False otherwise.\n\n \"\"\"\n return math.fabs(self.framerate - fps) < MAX_FPS_DELTA\n\n # TODO(v1.0): Add a `seconds` property to replace this and deprecate the existing one.\n def get_seconds(self) -> float:\n \"\"\"Get the frame's position in number of seconds.\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 1.0``).\n\n Returns:\n float: The current time/position in seconds.\n \"\"\"\n return float(self.frame_num) / self.framerate\n\n # TODO(v1.0): Add a `timecode` property to replace this and deprecate the existing one.\n def get_timecode(self, precision: int = 3, use_rounding: bool = True) -> str:\n \"\"\"Get a formatted timecode string of the form HH:MM:SS[.nnn].\n\n Args:\n precision: The number of decimal places to include in the output ``[.nnn]``.\n use_rounding: Rounds the output to the desired precision. If False, the value\n will be truncated to the specified precision.\n\n Returns:\n str: The current time in the form ``\"HH:MM:SS[.nnn]\"``.\n \"\"\"\n # Compute hours and minutes based off of seconds, and update seconds.\n secs = self.get_seconds()\n base = 60.0 * 60.0\n hrs = int(secs / base)\n secs -= (hrs * base)\n base = 60.0\n mins = int(secs / base)\n secs -= (mins * base)\n # Convert seconds into string based on required precision.\n if precision > 0:\n if use_rounding:\n secs = round(secs, precision)\n msec = format(secs, '.%df' % precision)[-precision:]\n secs = '%02d.%s' % (int(secs), msec)\n else:\n secs = '%02d' % int(round(secs, 0)) if use_rounding else '%02d' % int(secs)\n # Return hours, minutes, and seconds as a formatted timecode string.\n return '%02d:%02d:%s' % (hrs, mins, secs)\n\n # TODO(v1.0): Add a `previous` property to replace the existing one and deprecate this getter.\n def previous_frame(self) -> 'FrameTimecode':\n \"\"\"Return a new FrameTimecode for the previous frame (or 0 if on frame 0).\"\"\"\n new_timecode = FrameTimecode(self)\n new_timecode.frame_num = max(0, new_timecode.frame_num - 1)\n return new_timecode\n\n def _seconds_to_frames(self, seconds: float) -> int:\n \"\"\"Convert the passed value seconds to the nearest number of frames using\n the current FrameTimecode object's FPS (self.framerate).\n\n Returns:\n Integer number of frames the passed number of seconds represents using\n the current FrameTimecode's framerate property.\n \"\"\"\n return round(seconds * self.framerate)\n\n def _parse_timecode_number(self, timecode: Union[int, float]) -> int:\n \"\"\" Parse a timecode number, storing it as the exact number of frames.\n Can be passed as frame number (int), seconds (float)\n\n Raises:\n TypeError, ValueError\n \"\"\"\n # Process the timecode value, storing it as an exact number of frames.\n # Exact number of frames N\n if isinstance(timecode, int):\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive and greater than zero.')\n return timecode\n # Number of seconds S\n elif isinstance(timecode, float):\n if timecode < 0.0:\n raise ValueError('Timecode value must be positive and greater than zero.')\n return self._seconds_to_frames(timecode)\n # FrameTimecode\n elif isinstance(timecode, FrameTimecode):\n return timecode.frame_num\n elif timecode is None:\n raise TypeError('Timecode/frame number must be specified!')\n else:\n raise TypeError('Timecode format/type unrecognized.')\n\n def _parse_timecode_string(self, timecode_string: str) -> int:\n \"\"\"Parses a string based on the three possible forms (in timecode format,\n as an integer number of frames, or floating-point seconds, ending with 's').\n\n Requires that the `framerate` property is set before calling this method.\n Assuming a framerate of 30.0 FPS, the strings '00:05:00.000', '00:05:00',\n '9000', '300s', and '300.0s' are all possible valid values, all representing\n a period of time equal to 5 minutes, 300 seconds, or 9000 frames (at 30 FPS).\n\n Raises:\n TypeError, ValueError\n \"\"\"\n if self.framerate is None:\n raise TypeError('self.framerate must be set before calling _parse_timecode_string.')\n # Number of seconds S\n if timecode_string.endswith('s'):\n secs = timecode_string[:-1]\n if not secs.replace('.', '').isdigit():\n raise ValueError('All characters in timecode seconds string must be digits.')\n secs = float(secs)\n if secs < 0.0:\n raise ValueError('Timecode seconds value must be positive.')\n return self._seconds_to_frames(secs)\n # Exact number of frames N\n elif timecode_string.isdigit():\n timecode = int(timecode_string)\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive.')\n return timecode\n # Standard timecode in string format 'HH:MM:SS[.nnn]'\n else:\n tc_val = timecode_string.split(':')\n if not (len(tc_val) == 3 and tc_val[0].isdigit() and tc_val[1].isdigit()\n and tc_val[2].replace('.', '').isdigit()):\n raise ValueError('Unrecognized or improperly formatted timecode string.')\n hrs, mins = int(tc_val[0]), int(tc_val[1])\n secs = float(tc_val[2]) if '.' in tc_val[2] else int(tc_val[2])\n if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60 and secs < 60):\n raise ValueError('Invalid timecode range (values outside allowed range).')\n secs += (((hrs * 60.0) + mins) * 60.0)\n return self._seconds_to_frames(secs)\n\n def __iadd__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num += other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num += other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for addition.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num += self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num += self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing addition with FrameTimecode.')\n if self.frame_num < 0: # Required to allow adding negative seconds/frames.\n self.frame_num = 0\n return self\n\n def __add__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return += other\n return to_return\n\n def __isub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num -= other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num -= other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for subtraction.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num -= self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num -= self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing subtraction with FrameTimecode: %s' %\n type(other))\n if self.frame_num < 0:\n self.frame_num = 0\n return self\n\n def __sub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return -= other\n return to_return\n\n def __eq__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n return self.frame_num == other\n elif isinstance(other, float):\n return self.get_seconds() == other\n elif isinstance(other, str):\n return self.frame_num == self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num == other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n elif other is None:\n return False\n else:\n raise TypeError('Unsupported type for performing == with FrameTimecode: %s' %\n type(other))\n\n def __ne__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n return not self == other\n\n def __lt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num < other\n elif isinstance(other, float):\n return self.get_seconds() < other\n elif isinstance(other, str):\n return self.frame_num < self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num < other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing < with FrameTimecode: %s' %\n type(other))\n\n def __le__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num <= other\n elif isinstance(other, float):\n return self.get_seconds() <= other\n elif isinstance(other, str):\n return self.frame_num <= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num <= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing <= with FrameTimecode: %s' %\n type(other))\n\n def __gt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num > other\n elif isinstance(other, float):\n return self.get_seconds() > other\n elif isinstance(other, str):\n return self.frame_num > self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num > other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing > with FrameTimecode: %s' %\n type(other))\n\n def __ge__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num >= other\n elif isinstance(other, float):\n return self.get_seconds() >= other\n elif isinstance(other, str):\n return self.frame_num >= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num >= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing >= with FrameTimecode: %s' %\n type(other))\n\n # TODO(v1.0): __int__ and __float__ should be removed. Mark as deprecated, and indicate\n # need to use relevant property instead.\n\n def __int__(self) -> int:\n return self.frame_num\n\n def __float__(self) -> float:\n return self.get_seconds()\n\n def __str__(self) -> str:\n return self.get_timecode()\n\n def __repr__(self) -> str:\n return '%s [frame=%d, fps=%.3f]' % (self.get_timecode(), self.frame_num, self.framerate)\n\n def __hash__(self) -> int:\n return self.frame_num" }, { "identifier": "VideoStream", "path": "backend/scenedetect/video_stream.py", "snippet": "class VideoStream(ABC):\n \"\"\" Interface which all video backends must implement. \"\"\"\n\n #\n # Default Implementations\n #\n\n @property\n def base_timecode(self) -> FrameTimecode:\n \"\"\"FrameTimecode object to use as a time base.\"\"\"\n return FrameTimecode(timecode=0, fps=self.frame_rate)\n\n #\n # Abstract Static Methods\n #\n\n @staticmethod\n @abstractmethod\n def BACKEND_NAME() -> str:\n \"\"\"Unique name used to identify this backend. Should be a static property in derived\n classes (`BACKEND_NAME = 'backend_identifier'`).\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Properties\n #\n\n @property\n @abstractmethod\n def path(self) -> Union[bytes, str]:\n \"\"\"Video or device path.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def name(self) -> Union[bytes, str]:\n \"\"\"Name of the video, without extension, or device.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def is_seekable(self) -> bool:\n \"\"\"True if seek() is allowed, False otherwise.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_rate(self) -> float:\n \"\"\"Frame rate in frames/sec.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def duration(self) -> Optional[FrameTimecode]:\n \"\"\"Duration of the stream as a FrameTimecode, or None if non terminating.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_size(self) -> Tuple[int, int]:\n \"\"\"Size of each video frame in pixels as a tuple of (width, height).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def aspect_ratio(self) -> float:\n \"\"\"Pixel aspect ratio as a float (1.0 represents square pixels).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position(self) -> FrameTimecode:\n \"\"\"Current position within stream as FrameTimecode.\n\n This can be interpreted as presentation time stamp, thus frame 1 corresponds\n to the presentation time 0. Returns 0 even if `frame_number` is 1.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position_ms(self) -> float:\n \"\"\"Current position within stream as a float of the presentation time in\n milliseconds. The first frame has a PTS of 0.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_number(self) -> int:\n \"\"\"Current position within stream as the frame number.\n\n Will return 0 until the first frame is `read`.\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Methods\n #\n\n @abstractmethod\n def read(self, decode: bool = True, advance: bool = True) -> Union[ndarray, bool]:\n \"\"\"Read and decode the next frame as a numpy.ndarray. Returns False when video ends.\n\n Arguments:\n decode: Decode and return the frame.\n advance: Seek to the next frame. If False, will return the current (last) frame.\n\n Returns:\n If decode = True, the decoded frame (numpy.ndarray), or False (bool) if end of video.\n If decode = False, a bool indicating if advancing to the the next frame succeeded.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def reset(self) -> None:\n \"\"\" Close and re-open the VideoStream (equivalent to seeking back to beginning). \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def seek(self, target: Union[FrameTimecode, float, int]) -> None:\n \"\"\"Seek to the given timecode. If given as a frame number, represents the current seek\n pointer (e.g. if seeking to 0, the next frame decoded will be the first frame of the video).\n\n For 1-based indices (first frame is frame #1), the target frame number needs to be converted\n to 0-based by subtracting one. For example, if we want to seek to the first frame, we call\n seek(0) followed by read(). If we want to seek to the 5th frame, we call seek(4) followed\n by read(), at which point frame_number will be 5.\n\n May not be supported on all backend types or inputs (e.g. cameras).\n\n Arguments:\n target: Target position in video stream to seek to.\n If float, interpreted as time in seconds.\n If int, interpreted as frame number.\n Raises:\n SeekError: An error occurs while seeking, or seeking is not supported.\n ValueError: `target` is not a valid value (i.e. it is negative).\n \"\"\"\n raise NotImplementedError" }, { "identifier": "SceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SceneDetector:\n \"\"\" Base class to inherit from when implementing a scene detection algorithm.\n\n This API is not yet stable and subject to change.\n\n This represents a \"dense\" scene detector, which returns a list of frames where\n the next scene/shot begins in a video.\n\n Also see the implemented scene detectors in the scenedetect.detectors module\n to get an idea of how a particular detector can be created.\n \"\"\"\n # TODO(v0.7): Make this a proper abstract base class.\n\n stats_manager: Optional[StatsManager] = None\n \"\"\"Optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to\n use for caching frame metrics to and from.\"\"\"\n\n # TODO(v1.0): Remove - this is a rarely used case for what is now a neglegible performance gain.\n def is_processing_required(self, frame_num: int) -> bool:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Test if all calculations for a given frame are already done.\n\n Returns:\n False if the SceneDetector has assigned _metric_keys, and the\n stats_manager property is set to a valid StatsManager object containing\n the required frame metrics/calculations for the given frame - thus, not\n needing the frame to perform scene detection.\n\n True otherwise (i.e. the frame_img passed to process_frame is required\n to be passed to process_frame for the given frame_num).\n \"\"\"\n metric_keys = self.get_metrics()\n return not metric_keys or not (self.stats_manager is not None\n and self.stats_manager.metrics_exist(frame_num, metric_keys))\n\n def stats_manager_required(self) -> bool:\n \"\"\"Stats Manager Required: Prototype indicating if detector requires stats.\n\n Returns:\n True if a StatsManager is required for the detector, False otherwise.\n \"\"\"\n return False\n\n def get_metrics(self) -> List[str]:\n \"\"\"Get Metrics: Get a list of all metric names/keys used by the detector.\n\n Returns:\n List of strings of frame metric key names that will be used by\n the detector when a StatsManager is passed to process_frame.\n \"\"\"\n return []\n\n def process_frame(self, frame_num: int, frame_img: Optional[numpy.ndarray]) -> List[int]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[int]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n @property\n def event_buffer_length(self) -> int:\n \"\"\"The amount of frames a given event can be buffered for, in time. Represents maximum\n amount any event can be behind `frame_number` in the result of :meth:`process_frame`.\n \"\"\"\n return 0" }, { "identifier": "SparseSceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SparseSceneDetector(SceneDetector):\n \"\"\"Base class to inherit from when implementing a sparse scene detection algorithm.\n\n This class will be removed in v1.0 and should not be used.\n\n Unlike dense detectors, sparse detectors scene_detect \"events\" and return a *pair* of frames,\n as opposed to just a single cut.\n\n An example of a SparseSceneDetector is the MotionDetector.\n \"\"\"\n\n def process_frame(self, frame_num: int, frame_img: numpy.ndarray) -> List[Tuple[int, int]]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[Tuple[int, int]]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []" }, { "identifier": "StatsManager", "path": "backend/scenedetect/stats_manager.py", "snippet": "class StatsManager:\n \"\"\"Provides a key-value store for frame metrics/calculations which can be used\n for two-pass detection algorithms, as well as saving stats to a CSV file.\n\n Analyzing a statistics CSV file is also very useful for finding the optimal\n algorithm parameters for certain detection methods. Additionally, the data\n may be plotted by a graphing module (e.g. matplotlib) by obtaining the\n metric of interest for a series of frames by iteratively calling get_metrics(),\n after having called the detect_scenes(...) method on the SceneManager object\n which owns the given StatsManager instance.\n\n Only metrics consisting of `float` or `int` should be used currently.\n \"\"\"\n\n def __init__(self, base_timecode: FrameTimecode = None):\n \"\"\"Initialize a new StatsManager.\n\n Arguments:\n base_timecode: Timecode associated with this object. Must not be None (default value\n will be removed in a future release).\n \"\"\"\n # Frame metrics is a dict of frame (int): metric_dict (Dict[str, float])\n # of each frame metric key and the value it represents (usually float).\n self._frame_metrics: Dict[FrameTimecode, Dict[str, float]] = dict()\n self._registered_metrics: Set[str] = set() # Set of frame metric keys.\n self._loaded_metrics: Set[str] = set() # Metric keys loaded from stats file.\n self._metrics_updated: bool = False # Flag indicating if metrics require saving.\n self._base_timecode: Optional[FrameTimecode] = base_timecode # Used for timing calculations.\n\n def register_metrics(self, metric_keys: Iterable[str]) -> None:\n \"\"\"Register a list of metric keys that will be used by the detector.\n\n Used to ensure that multiple detector keys don't overlap.\n\n Raises:\n FrameMetricRegistered: A particular metric_key has already been registered/added\n to the StatsManager. Only if the StatsManager is being used for read-only\n access (i.e. all frames in the video have already been processed for the given\n metric_key in the exception) is this behavior desirable.\n \"\"\"\n for metric_key in metric_keys:\n if metric_key not in self._registered_metrics:\n self._registered_metrics.add(metric_key)\n else:\n raise FrameMetricRegistered(metric_key)\n\n # TODO(v1.0): Change frame_number to a FrameTimecode now that it is just a hash and will\n # be required for VFR support.\n def get_metrics(self, frame_number: int, metric_keys: Iterable[str]) -> List[Any]:\n \"\"\"Return the requested statistics/metrics for a given frame.\n\n Arguments:\n frame_number (int): Frame number to retrieve metrics for.\n metric_keys (List[str]): A list of metric keys to look up.\n\n Returns:\n A list containing the requested frame metrics for the given frame number\n in the same order as the input list of metric keys. If a metric could\n not be found, None is returned for that particular metric.\n \"\"\"\n return [self._get_metric(frame_number, metric_key) for metric_key in metric_keys]\n\n def set_metrics(self, frame_number: int, metric_kv_dict: Dict[str, Any]) -> None:\n \"\"\" Set Metrics: Sets the provided statistics/metrics for a given frame.\n\n Arguments:\n frame_number: Frame number to retrieve metrics for.\n metric_kv_dict: A dict mapping metric keys to the\n respective integer/floating-point metric values to set.\n \"\"\"\n for metric_key in metric_kv_dict:\n self._set_metric(frame_number, metric_key, metric_kv_dict[metric_key])\n\n def metrics_exist(self, frame_number: int, metric_keys: Iterable[str]) -> bool:\n \"\"\" Metrics Exist: Checks if the given metrics/stats exist for the given frame.\n\n Returns:\n bool: True if the given metric keys exist for the frame, False otherwise.\n \"\"\"\n return all([self._metric_exists(frame_number, metric_key) for metric_key in metric_keys])\n\n def is_save_required(self) -> bool:\n \"\"\" Is Save Required: Checks if the stats have been updated since loading.\n\n Returns:\n bool: True if there are frame metrics/statistics not yet written to disk,\n False otherwise.\n \"\"\"\n return self._metrics_updated\n\n def save_to_csv(self,\n csv_file: Union[str, bytes, TextIO],\n base_timecode: Optional[FrameTimecode] = None,\n force_save=True) -> None:\n \"\"\" Save To CSV: Saves all frame metrics stored in the StatsManager to a CSV file.\n\n Arguments:\n csv_file: A file handle opened in write mode (e.g. open('...', 'w')) or a path as str.\n base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility.\n force_save: If True, writes metrics out even if an update is not required.\n\n Raises:\n OSError: If `path` cannot be opened or a write failure occurs.\n \"\"\"\n # TODO(v0.7): Replace with DeprecationWarning that `base_timecode` will be removed in v0.8.\n if base_timecode is not None:\n logger.error('base_timecode is deprecated.')\n\n # Ensure we need to write to the file, and that we have data to do so with.\n if not ((self.is_save_required() or force_save) and self._registered_metrics\n and self._frame_metrics):\n logger.info(\"No metrics to save.\")\n return\n\n assert self._base_timecode is not None\n\n # If we get a path instead of an open file handle, recursively call ourselves\n # again but with file handle instead of path.\n if isinstance(csv_file, (str, bytes)):\n with open(csv_file, 'w') as file:\n self.save_to_csv(csv_file=file, force_save=force_save)\n return\n\n csv_writer = csv.writer(csv_file, lineterminator='\\n')\n metric_keys = sorted(list(self._registered_metrics.union(self._loaded_metrics)))\n csv_writer.writerow([COLUMN_NAME_FRAME_NUMBER, COLUMN_NAME_TIMECODE] + metric_keys)\n frame_keys = sorted(self._frame_metrics.keys())\n logger.info(\"Writing %d frames to CSV...\", len(frame_keys))\n for frame_key in frame_keys:\n frame_timecode = self._base_timecode + frame_key\n csv_writer.writerow(\n [frame_timecode.get_frames() +\n 1, frame_timecode.get_timecode()] +\n [str(metric) for metric in self.get_metrics(frame_key, metric_keys)])\n\n @staticmethod\n def valid_header(row: List[str]) -> bool:\n \"\"\"Check that the given CSV row is a valid header for a statsfile.\n\n Arguments:\n row: A row decoded from the CSV reader.\n\n Returns:\n True if `row` is a valid statsfile header, False otherwise.\n \"\"\"\n if not row or not len(row) >= 2:\n return False\n if row[0] != COLUMN_NAME_FRAME_NUMBER or row[1] != COLUMN_NAME_TIMECODE:\n return False\n return True\n\n # TODO(v1.0): Remove.\n def load_from_csv(self, csv_file: Union[str, bytes, TextIO]) -> Optional[int]:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Load all metrics stored in a CSV file into the StatsManager instance. Will be removed in a\n future release after becoming a no-op.\n\n Arguments:\n csv_file: A file handle opened in read mode (e.g. open('...', 'r')) or a path as str.\n\n Returns:\n int or None: Number of frames/rows read from the CSV file, or None if the\n input file was blank or could not be found.\n\n Raises:\n StatsFileCorrupt: Stats file is corrupt and can't be loaded, or wrong file\n was specified.\n \"\"\"\n # TODO: Make this an error, then make load_from_csv() a no-op, and finally, remove it.\n logger.warning(\"load_from_csv() is deprecated and will be removed in a future release.\")\n\n # If we get a path instead of an open file handle, check that it exists, and if so,\n # recursively call ourselves again but with file set instead of path.\n if isinstance(csv_file, (str, bytes)):\n if os.path.exists(csv_file):\n with open(csv_file, 'r') as file:\n return self.load_from_csv(csv_file=file)\n # Path doesn't exist.\n return None\n\n # If we get here, file is a valid file handle in read-only text mode.\n csv_reader = csv.reader(csv_file, lineterminator='\\n')\n num_cols = None\n num_metrics = None\n num_frames = None\n # First Row: Frame Num, Timecode, [metrics...]\n try:\n row = next(csv_reader)\n # Backwards compatibility for previous versions of statsfile\n # which included an additional header row.\n if not self.valid_header(row):\n row = next(csv_reader)\n except StopIteration:\n # If the file is blank or we couldn't decode anything, assume the file was empty.\n return None\n if not self.valid_header(row):\n raise StatsFileCorrupt()\n num_cols = len(row)\n num_metrics = num_cols - 2\n if not num_metrics > 0:\n raise StatsFileCorrupt('No metrics defined in CSV file.')\n self._loaded_metrics = row[2:]\n num_frames = 0\n for row in csv_reader:\n metric_dict = {}\n if not len(row) == num_cols:\n raise StatsFileCorrupt('Wrong number of columns detected in stats file row.')\n for i, metric_str in enumerate(row[2:]):\n if metric_str and metric_str != 'None':\n try:\n metric_dict[self._loaded_metrics[i]] = float(metric_str)\n except ValueError:\n raise StatsFileCorrupt('Corrupted value in stats file: %s' %\n metric_str) from ValueError\n frame_number = int(row[0])\n # Switch from 1-based to 0-based frame numbers.\n if frame_number > 0:\n frame_number -= 1\n self.set_metrics(frame_number, metric_dict)\n num_frames += 1\n logger.info('Loaded %d metrics for %d frames.', num_metrics, num_frames)\n self._metrics_updated = False\n return num_frames\n\n def _get_metric(self, frame_number: int, metric_key: str) -> Optional[Any]:\n if self._metric_exists(frame_number, metric_key):\n return self._frame_metrics[frame_number][metric_key]\n return None\n\n def _set_metric(self, frame_number: int, metric_key: str, metric_value: Any) -> None:\n self._metrics_updated = True\n if not frame_number in self._frame_metrics:\n self._frame_metrics[frame_number] = dict()\n self._frame_metrics[frame_number][metric_key] = metric_value\n\n def _metric_exists(self, frame_number: int, metric_key: str) -> bool:\n return (frame_number in self._frame_metrics\n and metric_key in self._frame_metrics[frame_number])" }, { "identifier": "FrameMetricRegistered", "path": "backend/scenedetect/stats_manager.py", "snippet": "class FrameMetricRegistered(Exception):\n \"\"\" Raised when attempting to register a frame metric key which has\n already been registered. \"\"\"\n\n def __init__(self,\n metric_key: str,\n message: str = \"Attempted to re-register frame metric key.\"):\n super().__init__(message)\n self.metric_key = metric_key" } ]
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
12,856
:mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts(
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts(
cut_list: Iterable[FrameTimecode],
6
2023-10-25 02:50:01+00:00
16k
EulerSearch/embedding_studio
embedding_studio/models/plugin.py
[ { "identifier": "ClickstreamParser", "path": "embedding_studio/embeddings/data/clickstream/parsers/parser.py", "snippet": "class ClickstreamParser(object):\n # TODO: annotate types precisely\n def __init__(\n self,\n query_item_type: type,\n search_result_type: type,\n meta_type: type,\n event_type: type,\n ):\n self.query_item_type = query_item_type\n self.search_result_type = search_result_type\n self.meta_type = meta_type\n self.event_type = event_type\n\n def parse(self, session_data: Dict) -> RawClickstreamSession:\n return RawClickstreamSession.from_dict(\n session_data,\n self.query_item_type,\n self.search_result_type,\n self.meta_type,\n self.event_type,\n )\n\n # TODO: merge schemas\n def parse_from_mongo(\n self, session_data: SessionWithEvents\n ) -> RawClickstreamSession:\n return RawClickstreamSession.from_mongo(\n session_data,\n self.query_item_type,\n self.search_result_type,\n self.meta_type,\n self.event_type,\n )" }, { "identifier": "QueryRetriever", "path": "embedding_studio/embeddings/data/clickstream/query_retriever.py", "snippet": "class QueryRetriever(object):\n \"\"\"As we can't exactly predict a schema of storing queries:\n 1. As text exceptly in clickstream service\n 2. As ID of a record with a text\n 3. As a path to an image\n\n We provide an ability to use any query item. So, a user can specify any.\n\n \"\"\"\n\n def setup(self, clickstream_sessions: List[ClickstreamSession]):\n pass\n\n def __call__(self, query: QueryItem):\n return query" }, { "identifier": "ClickstreamSessionsSplitter", "path": "embedding_studio/embeddings/data/clickstream/splitter.py", "snippet": "class ClickstreamSessionsSplitter:\n def __init__(\n self,\n test_size_ratio: float = 0.2,\n shuffle: bool = True,\n random_state: Optional[int] = None,\n ):\n \"\"\"Generate train / test clickstream sessions split.\n\n :param test_size_ratio: ratio of test split size (default: 0.2)\n :param shuffle: to shuffle or not paired clickstream sessions (default: True)\n :param random_state: random state to sklearn splitter (default: None)\n \"\"\"\n if (\n not isinstance(test_size_ratio, float)\n or test_size_ratio <= 0\n or test_size_ratio >= 1.0\n ):\n raise ValueError(\n f\"test_size_ration is a numeric value in range (0.0, 1.0)\"\n )\n\n if test_size_ratio >= 0.5:\n logger.warning(\n \"test_size_ration is larger than 0.5. It's unusual for ML to have test size > train size.\"\n )\n\n self._test_size_ratio = test_size_ratio\n\n if not isinstance(shuffle, bool):\n raise ValueError(\"shuffle should be boolean\")\n self._shuffle = shuffle\n self._random_state = random_state\n\n @property\n def shuffle(self) -> bool:\n return self._shuffle\n\n def split(self, sessions: List[ClickstreamSession]) -> DatasetDict:\n \"\"\"Split clickstream sessions.\n\n :param sessions: sessions to be split\n :return: train / test splits accordingly (PairedClickstreamDataset)\n \"\"\"\n # Get all IDs\n all_result_ids: Set[str] = set()\n for session in sessions:\n all_result_ids.update(session.results)\n\n if len(all_result_ids) == 0:\n raise ValueError(\"Sessions list is empty\")\n\n # Ensure a minimum number of unique result IDs in each set\n min_unique_test_sessions: int = int(\n self._test_size_ratio * len(sessions)\n )\n\n # Split the result IDs into train and test sets\n train_result_ids, test_result_ids = train_test_split(\n list(all_result_ids),\n test_size=self._test_size_ratio,\n random_state=self._random_state,\n )\n test_result_ids: Set[str] = set(test_result_ids)\n\n # Split sessions into train and test based on result IDs\n train_sessions: List[ClickstreamSession] = []\n test_sessions: List[ClickstreamSession] = []\n\n for session in sessions:\n if len(session.results) == 0:\n continue\n\n if (\n len(set(session.results) & test_result_ids)\n / len(session.results)\n <= 0.5\n ):\n # If less than 50% of result IDs intersect with the test set, add to the train set\n train_sessions.append(session)\n else:\n test_sessions.append(session)\n\n if len(test_sessions) < min_unique_test_sessions:\n logger.warning(\n f\"Clickstream sessions intersects highly, so they are not split well\"\n )\n random_train_session_indexess: List[int] = random.choices(\n list(range(len(train_sessions))),\n k=min_unique_test_sessions - len(test_sessions),\n )\n for i in reversed(sorted(random_train_session_indexess)):\n test_sessions.append(train_sessions.pop(i))\n\n if len(test_sessions) + len(train_sessions) < len(sessions):\n missed_sessions_count = len(sessions) - (\n len(test_sessions) + len(train_sessions)\n )\n logger.warning(\n f\"Clickstream sessions weren't split correctly, add {missed_sessions_count} more sessions to the train split.\"\n )\n\n for session in sessions:\n if (\n session not in train_sessions\n and session not in test_sessions\n ):\n train_sessions.append(session)\n\n return DatasetDict(\n {\n \"train\": PairedClickstreamDataset(\n train_sessions, self.shuffle\n ),\n \"test\": PairedClickstreamDataset(test_sessions, self.shuffle),\n }\n )" }, { "identifier": "DataLoader", "path": "embedding_studio/embeddings/data/loaders/data_loader.py", "snippet": "class DataLoader(ABC):\n def __init__(self, **kwargs):\n pass\n\n @abstractmethod\n def load(self, items_data: List[ItemMeta]) -> Dataset:\n raise NotImplemented" }, { "identifier": "RankingData", "path": "embedding_studio/embeddings/data/ranking_data.py", "snippet": "class RankingData:\n def __init__(self, clickstream: DatasetDict, items: DatasetDict):\n self.clickstream = clickstream\n self.items = items" }, { "identifier": "ItemStorageProducer", "path": "embedding_studio/embeddings/data/storages/producer.py", "snippet": "class ItemStorageProducer:\n def __init__(\n self,\n preprocessor: ItemsDatasetDictPreprocessor,\n id_field_name: Optional[str] = None,\n ):\n \"\"\"Preprocess and split dataset with train/test clickstream sessions.\n\n :param preprocessor: items dataset dict preprocessing\n :param id_field_name: specified field name ID (default: None)\n \"\"\"\n self.preprocessor = preprocessor\n self._id_field_name = (\n id_field_name\n if id_field_name is not None\n else preprocessor.get_id_field_name()\n )\n\n @property\n def id_field_name(self) -> str:\n return self._id_field_name\n\n def _preprocess(self, dataset: DatasetDict) -> DatasetDict:\n logger.debug(\"Prerprocess a dataset\")\n return self.preprocessor.convert(dataset)\n\n def __call__(\n self,\n dataset: Union[Dataset, DatasetDict],\n clickstream_dataset: DatasetDict,\n ) -> DatasetDict:\n \"\"\"Split dataset with train_clickstream / test_clickstream\n\n :param dataset: dataset to be split\n :param clickstream_dataset: train /test clickstream sessions (PairedClickstreamDataset)\n :return: split dataset\n \"\"\"\n\n if not (\n isinstance(clickstream_dataset[\"train\"], PairedClickstreamDataset)\n and isinstance(\n clickstream_dataset[\"test\"], PairedClickstreamDataset\n )\n ):\n raise ValueError(\n \"clickstream_dataset values should be instances of PairedClickstreamDataset\"\n )\n\n if isinstance(dataset, Dataset):\n train_ids: Set[str] = clickstream_dataset[\n \"train\"\n ].irrelevant_ids.union(\n clickstream_dataset[\"train\"].not_irrelevant_ids\n )\n\n if len(train_ids) == 0:\n raise ValueError(\"Train clickstream is empty\")\n\n test_ids: Set[str] = clickstream_dataset[\n \"test\"\n ].irrelevant_ids.union(\n clickstream_dataset[\"test\"].not_irrelevant_ids\n )\n\n if len(test_ids) == 0:\n raise ValueError(\"Train clickstream is empty\")\n\n split_dataset: DatasetDict = DatasetDict(\n {\n \"train\": dataset.filter(\n lambda example: example[self.id_field_name]\n in train_ids\n ),\n \"test\": dataset.filter(\n lambda example: example[self.id_field_name] in test_ids\n ),\n }\n )\n\n else:\n logger.warning(f\"Provided dataset is already split\")\n split_dataset: DatasetDict = dataset\n\n return self._preprocess(split_dataset)" }, { "identifier": "DatasetFieldsNormalizer", "path": "embedding_studio/embeddings/data/utils/fields_normalizer.py", "snippet": "class DatasetFieldsNormalizer:\n ID_FIELD_NAME = \"item_id\"\n ITEM_FIELD_NAME = \"item\"\n\n def __init__(self, item_field_name: str, id_field_name: str):\n \"\"\"Unify column names in DatasetDict, so it can be used in fine-tuning script.\n A dataset should have ID column, related to ID in clickstream.\n\n :param item_field_name: name of column with items.\n :param id_field_name: name of ID column\n \"\"\"\n if not id_field_name:\n raise ValueError(\"id_field_name should be non-empty string\")\n self.id_field_name = id_field_name\n\n if not item_field_name:\n raise ValueError(\"item_field_name should be non-empty string\")\n self.item_field_name = item_field_name\n\n def __call__(self, dataset: DatasetDict) -> DatasetDict:\n id_normalizer = (\n lambda id_value: str(id_value.item())\n if (\n isinstance(id_value, Tensor)\n or isinstance(id_value, FloatTensor)\n )\n else str(id_value)\n )\n for key in dataset.keys():\n if (\n DatasetFieldsNormalizer.ID_FIELD_NAME\n not in dataset.column_names[key]\n ):\n dataset = dataset.rename_column(\n self.id_field_name, DatasetFieldsNormalizer.ID_FIELD_NAME\n )\n else:\n logger.warning(\n f\"Dataset {key} split already has {DatasetFieldsNormalizer.ID_FIELD_NAME} field\"\n )\n\n if (\n DatasetFieldsNormalizer.ITEM_FIELD_NAME\n not in dataset.column_names[key]\n ):\n dataset = dataset.rename_column(\n self.item_field_name,\n DatasetFieldsNormalizer.ITEM_FIELD_NAME,\n )\n else:\n logger.warning(\n f\"Dataset {key} split already has {DatasetFieldsNormalizer.ITEM_FIELD_NAME} field\"\n )\n\n return dataset.map(\n lambda example: {\n DatasetFieldsNormalizer.ID_FIELD_NAME: id_normalizer(\n example[DatasetFieldsNormalizer.ID_FIELD_NAME]\n )\n }\n )" }, { "identifier": "ExperimentsManager", "path": "embedding_studio/workers/fine_tuning/experiments/experiments_tracker.py", "snippet": "class ExperimentsManager:\n def __init__(\n self,\n tracking_uri: str,\n main_metric: str,\n accumulators: List[MetricsAccumulator],\n is_loss: bool = False,\n n_top_runs: int = 10,\n requirements: Optional[str] = None,\n retry_config: Optional[RetryConfig] = None,\n ):\n \"\"\"Wrapper over mlflow package to manage certain fine-tuning experiments.\n\n :param tracking_uri: url of MLFlow server\n :param main_metric: name of main metric that will be used to find best model\n :param accumulators: accumulators of metrics to be logged\n :param is_loss: is main metric loss (if True, then best quality is minimal) (default: False)\n :param n_top_runs: how many hyper params group consider to be used in following tuning steps (default: 10)\n :param requirements: extra requirements to be passed to mlflow.pytorch.log_model (default: None)\n :param retry_config: retry policy (default: None)\n \"\"\"\n if not isinstance(tracking_uri, str) or len(tracking_uri) == 0:\n raise ValueError(\n f\"MLFlow tracking URI value should be a not empty string\"\n )\n mlflow.set_tracking_uri(tracking_uri)\n self._tracking_uri = tracking_uri\n if self._tracking_uri.endswith(\"/\"):\n self._tracking_uri = self._tracking_uri[:-1]\n\n self.retry_config = (\n retry_config\n if retry_config\n else ExperimentsManager._get_default_retry_config()\n )\n self.attempt_exception_types = [RestException]\n\n if not isinstance(main_metric, str) or len(main_metric) == 0:\n raise ValueError(f\"main_metric value should be a not empty string\")\n self.main_metric = main_metric\n self._metric_field = f\"metrics.{self.main_metric}\"\n\n self._n_top_runs = n_top_runs\n self._is_loss = is_loss\n\n if len(accumulators) == 0:\n logger.warning(\n \"No accumulators were provided, there will be no metrics logged except loss\"\n )\n self._accumulators = accumulators\n\n self._requirements: List[str] = (\n _get_base_requirements() if requirements is None else requirements\n )\n\n self._iteration_experiment = None\n self._tuning_iteration = None\n self._tuning_iteration_id = None\n\n self._run = None\n self._run_params = None\n self._run_id = None\n\n def _check_artifact_exists(self, run_id, artifact_path):\n client = mlflow.MlflowClient()\n artifacts = client.list_artifacts(run_id, path=artifact_path)\n return any(artifact.path == artifact_path for artifact in artifacts)\n\n @staticmethod\n def _get_default_retry_config() -> RetryConfig:\n default_retry_params = RetryParams(\n max_attempts=settings.DEFAULT_MAX_ATTEMPTS,\n wait_time_seconds=settings.DEFAULT_WAIT_TIME_SECONDS,\n )\n\n config = RetryConfig(default_params=default_retry_params)\n config[\"log_metric\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_METRIC_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS,\n )\n config[\"log_param\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_PARAM_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS,\n )\n config[\"log_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"load_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOAD_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"delete_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"search_runs\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_RUNS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS,\n )\n config[\"end_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_END_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_END_RUN_WAIT_TIME_SECONDS,\n )\n config[\"get_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_RUN_WAIT_TIME_SECONDS,\n )\n config[\"search_experiments\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS,\n )\n config[\"delete_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"create_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_CREATE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"get_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n\n return config\n\n @property\n def is_loss(self) -> bool:\n return self._is_loss\n\n def __del__(self):\n self.finish_run()\n self.finish_iteration()\n\n def is_retryable_error(self, e: Exception) -> bool:\n return False\n\n def _get_model_exists_filter(self) -> str:\n return \"metrics.model_uploaded = 1\"\n\n def _get_artifact_url(self, run_id: str, artifact_path: str) -> str:\n return (\n f\"{self._tracking_uri}/get-artifact?path=\"\n f'{urllib.parse.quote(artifact_path, safe=\"\")}&run_uuid={run_id}'\n )\n\n @retry_method(name=\"log_model\")\n def upload_initial_model(self, model: EmbeddingsModelInterface):\n \"\"\"Upload the very first, initial model to the mlflow server\n\n :param model: model to be uploaded\n \"\"\"\n self.finish_iteration()\n experiment_id = get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME)\n if experiment_id is None:\n logger.info(\n f\"Can't find any active iteration with name: {INITIAL_EXPERIMENT_NAME}\"\n )\n try:\n logger.info(\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n except MlflowException as e:\n if \"Cannot set a deleted experiment\" in str(e):\n logger.error(\n f\"Creation of initial experiment is failed: experiment with the same name {INITIAL_EXPERIMENT_NAME} is deleted, but not archived\"\n )\n experiments = mlflow.search_experiments(\n view_type=mlflow.entities.ViewType.ALL\n )\n deleted_experiment_id = None\n\n for exp in experiments:\n if exp.name == INITIAL_EXPERIMENT_NAME:\n deleted_experiment_id = exp.experiment_id\n break\n\n logger.info(\n f\"Restore deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().restore_experiment(\n deleted_experiment_id\n )\n logger.info(\n f\"Archive deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n deleted_experiment_id,\n INITIAL_EXPERIMENT_NAME + \"_archive\",\n )\n logger.info(\n f\"Delete archived experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.delete_experiment(deleted_experiment_id)\n logger.info(f\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n else:\n raise e\n\n with mlflow.start_run(\n experiment_id=get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n run_name=INITIAL_RUN_NAME,\n ) as run:\n logger.info(\n f\"Upload initial model to {INITIAL_EXPERIMENT_NAME} / {INITIAL_RUN_NAME}\"\n )\n if self._check_artifact_exists(\n get_run_id_by_name(\n get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n INITIAL_RUN_NAME,\n ),\n \"model\",\n ):\n logger.info(\"Model is already uploaded\")\n return\n\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n logger.info(\"Uploading is finished\")\n\n @retry_method(name=\"load_model\")\n def download_initial_model(self) -> EmbeddingsModelInterface:\n \"\"\"Download initial model.\n\n :return: initial embeddings model\n \"\"\"\n model_uri: str = f\"runs:/{get_run_id_by_name(get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME), INITIAL_RUN_NAME)}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_runs\")\n def get_top_params(self) -> Optional[List[FineTuningParams]]:\n \"\"\"Get top N previous fine-tuning iteration best params\n\n :return: fine-tuning iteration params\n \"\"\"\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id:\n logger.warning(\n \"Can't retrieve top params, no previous iteration in history\"\n )\n return None\n\n else:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[last_session_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and only finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"Can't retrieve top params, no previous iteration's finished runs with uploaded model in history\"\n )\n return None\n\n # Get the indices that would sort the DataFrame based on the specified parameter\n sorted_indices: np.ndarray = np.argsort(\n runs[self._metric_field].values\n )\n if not self.is_loss:\n sorted_indices = sorted_indices[\n ::-1\n ] # Use [::-1] to sort in descending order\n\n # Extract the top N rows based on the sorted indices\n top_n_rows: np.ndarray = runs.iloc[\n sorted_indices[: self._n_top_runs]\n ]\n\n # Define a mapping dictionary to remove the \"params.\" prefix\n column_mapping: Dict[str, str] = {\n col: col.replace(\"params.\", \"\") for col in top_n_rows.columns\n }\n\n # Rename the columns\n top_n_rows: np.ndarray = top_n_rows.rename(\n columns=column_mapping\n ).to_dict(orient=\"records\")\n\n return [FineTuningParams(**row) for row in top_n_rows]\n\n def _get_best_previous_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id or last_session_id is None:\n return None, True\n else:\n run_id, _ = self._get_best_quality(last_session_id)\n return run_id, False\n\n def _get_best_current_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n if (\n initial_id == self._tuning_iteration_id\n or self._tuning_iteration_id is None\n ):\n return None, True\n else:\n run_id, _ = self._get_best_quality(self._tuning_iteration_id)\n return run_id, False\n\n @retry_method(name=\"load_model\")\n def get_last_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, no previous iteration in history\"\n )\n return None\n else:\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no previous iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_current_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_current_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, current run is initial\"\n )\n return None\n\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_last_model(self) -> EmbeddingsModelInterface:\n \"\"\"Get previous iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Download initial model, no previous iteration in history\"\n )\n return self.download_initial_model()\n\n else:\n if run_id is None:\n logger.warning(\n \"Download initial model, no previous iteration's \"\n \"finished runs with uploaded model in history\"\n )\n return self.download_initial_model()\n else:\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"load_model\")\n def get_current_model(self) -> Optional[EmbeddingsModelInterface]:\n \"\"\"Get current iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n if self._tuning_iteration is None:\n logger.error(\"No current iteration, can't get any model\")\n return\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n logger.info(\"Download initial model\")\n return self.download_initial_model()\n\n run_id, is_initial = self._get_best_current_run_id()\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_experiments\")\n def get_previous_iteration_id(self) -> Optional[str]:\n if (\n self._tuning_iteration == INITIAL_EXPERIMENT_NAME\n or self._tuning_iteration is None\n ):\n logger.warning(\n f\"Can't find previous iteration - no current iteration was setup\"\n )\n return None\n\n plugin_name = f\"{self._tuning_iteration.plugin_name}\"\n experiments: List[Experiment] = [\n e\n for e in mlflow.search_experiments()\n if (\n e.name.startswith(EXPERIMENT_PREFIX)\n and e.name.find(plugin_name) != -1\n and e.name != str(self._tuning_iteration)\n )\n ]\n if len(experiments) == 0:\n logger.warning(\"No iteration found\")\n return None\n else:\n return max(\n experiments, key=lambda exp: exp.creation_time\n ).experiment_id\n\n @retry_method(name=\"delete_experiment\")\n def delete_previous_iteration(self):\n experiment_id: Optional[str] = self.get_previous_iteration_id()\n\n logger.info(\"Delete models of previous iteration.\")\n runs = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"]\n run_ids = runs[\"run_id\"].tolist()\n\n for run_id in run_ids:\n self.delete_model(run_id, experiment_id)\n\n if experiment_id is not None:\n logger.info(\n f\"Iteration with ID {experiment_id} is going to be deleted\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n experiment_id, INITIAL_EXPERIMENT_NAME + \"_archive\"\n )\n mlflow.delete_experiment(experiment_id)\n else:\n logger.warning(\n \"Can't delete a previous iteration, no previous iteration in history\"\n )\n\n @retry_method(name=\"create_experiment\")\n def set_iteration(self, iteration: FineTuningIteration):\n \"\"\"Start a new fine-tuning session.\n\n :param iteration: fine-tuning iteration info\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n self.finish_iteration()\n\n logger.info(\"Start a new fine-tuning iterations\")\n\n self._tuning_iteration = iteration\n self._tuning_iteration_id = get_experiment_id_by_name(str(iteration))\n if self._tuning_iteration_id is None:\n self._tuning_iteration_id = mlflow.create_experiment(\n str(iteration)\n )\n\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n @retry_method(name=\"start_run\")\n def set_run(self, params: FineTuningParams) -> bool:\n \"\"\"Start a new run with provided fine-tuning params\n\n :param params: provided fine-tuning params\n :return: True if it's a finished run (otherwise False)\n \"\"\"\n convert_value = (\n lambda value: \", \".join(map(str, value))\n if isinstance(value, list)\n else value\n )\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n # TODO: implement exception\n raise ValueError(\"You can't start run for initial iteration\")\n\n if self._run is not None:\n self.finish_run()\n\n logger.info(\n f\"Start a new run for iteration {self._tuning_iteration_id} with params:\\n\\t{str(params)}\"\n )\n\n self._run_params = params\n run_name: str = self._run_params.id\n self._run_id = get_run_id_by_name(self._tuning_iteration_id, run_name)\n\n self._run = mlflow.start_run(\n self._run_id, self._tuning_iteration_id, run_name\n )\n if self._run_id is None:\n self._run_id = self._run.info.run_id\n for key, value in dict(self._tuning_iteration).items():\n mlflow.log_param(key, convert_value(value))\n\n for key, value in dict(self._run_params).items():\n mlflow.log_param(key, convert_value(value))\n\n mlflow.log_metric(\"model_uploaded\", 0)\n\n return False\n else:\n return self._run.info.status == \"FINISHED\"\n\n @retry_method(name=\"search_runs\")\n def model_is_uploaded(self) -> bool:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs[\"run_id\"] == self._run_id]\n return runs.shape[0] > 0\n\n @retry_method(name=\"get_experiment\")\n def finish_iteration(self):\n logger.info(f\"Finish current iteration {self._tuning_iteration_id}\")\n self._tuning_iteration = INITIAL_EXPERIMENT_NAME\n self._tuning_iteration_id = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n\n if self._tuning_iteration_id is None:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_name=INITIAL_EXPERIMENT_NAME\n )\n self._tuning_iteration_id = (\n self._iteration_experiment.experiment_id\n )\n else:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n logger.info(f\"Current iteration is finished\")\n\n @retry_method(name=\"end_run\")\n def finish_run(self):\n logger.info(\n f\"Finish current run {self._tuning_iteration_id} / {self._run_id}\"\n )\n for accumulator in self._accumulators:\n accumulator.clear()\n\n mlflow.end_run()\n\n # Set params to default None\n self._run = None\n self._run_params = None\n self._run_id = None\n\n logger.info(f\"Current run is finished\")\n\n @retry_method(name=\"log_param\")\n def _set_model_as_deleted(self, run_id: str, experiment_id: str):\n with mlflow.start_run(\n run_id=run_id, experiment_id=experiment_id\n ) as run:\n mlflow.log_metric(\"model_deleted\", 1)\n mlflow.log_metric(\"model_uploaded\", 0)\n\n @retry_method(name=\"delete_model\")\n def _delete_model(self, run_id: str, experiment_id: str) -> bool:\n logger.warning(\n f\"Unable to delete a model for run {run_id}, MLFlow has no such functionality, please implement on your own.\"\n )\n return False\n\n @retry_method(name=\"get_run\")\n def delete_model(self, run_id: str, experiment_id: Optional[str] = None):\n experiment_id = (\n self._tuning_iteration_id\n if experiment_id is None\n else experiment_id\n )\n if experiment_id is None:\n raise ValueError(\n f\"No iteration was initialized, unable to delete model.\"\n )\n\n if experiment_id == INITIAL_EXPERIMENT_NAME:\n raise ValueError(f\"Initial model can't be deleted.\")\n\n run_info = None\n try:\n run_info = mlflow.get_run(run_id=run_id)\n except RestException as e:\n if e.get_http_status_code() == 404:\n logger.exception(f\"Run with ID {run_id} doesn't exist.\")\n else:\n raise e\n\n if run_info is not None:\n runs: pd.DataFrame = mlflow.search_runs(\n filter_string=self._get_model_exists_filter()\n )\n runs = runs[runs[\"run_id\"] == run_id]\n if runs.shape[0] == 0:\n logger.warning(\n f\"Run {run_id} has no model being uploaded. Nothing to delete\"\n )\n\n else:\n deleted = None\n try:\n deleted = self._delete_model(run_id, experiment_id)\n except MaxAttemptsReachedException:\n pass\n\n if deleted:\n self._set_model_as_deleted(run_id, experiment_id)\n\n @retry_method(name=\"log_model\")\n def save_model(\n self, model: EmbeddingsModelInterface, best_only: bool = True\n ):\n \"\"\"Save fine-tuned embedding model\n\n :param model: model to be saved\n :param best_only: save only if it's the best (default: True)\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"Can't save not initial model for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n logger.info(\n f\"Save model for {self._tuning_iteration_id} / {self._run_id}\"\n )\n if not best_only:\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n else:\n current_quality = self.get_quality()\n best_run_id, best_quality = self.get_best_quality()\n\n if best_run_id is None or (\n current_quality <= best_quality\n if self.is_loss\n else current_quality >= best_quality\n ):\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n\n if best_run_id is not None:\n self.delete_model(best_run_id)\n else:\n logger.info(\"Not the best run - ignore saving\")\n\n @retry_method(name=\"log_metric\")\n def save_metric(self, metric_value: MetricValue):\n \"\"\"Accumulate and save metric value\n\n :param metric_value: value to be logged\n \"\"\"\n for accumulator in self._accumulators:\n for name, value in accumulator.accumulate(metric_value):\n mlflow.log_metric(name, value)\n\n @retry_method(name=\"search_runs\")\n def get_quality(self) -> float:\n \"\"\"Current run quality value\n\n :return: quality value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id]\n )\n quality: np.ndarray = runs[runs.run_id == self._run_id][\n self._metric_field\n ]\n return float(quality) if quality.shape[0] == 1 else float(quality[0])\n\n @retry_method(name=\"search_runs\")\n def _get_best_quality(\n self, experiment_id: str\n ) -> Tuple[Optional[str], float]:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and not finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"No finished experiments found with model uploaded, except initial\"\n )\n return None, 0.0\n\n else:\n value: float = (\n runs[self._metric_field].min()\n if self.is_loss\n else runs[self._metric_field].max()\n )\n best: pd.DataFrame = runs[runs[self._metric_field] == value][\n [\"run_id\", self._metric_field]\n ]\n return list(best.itertuples(index=False, name=None))[0]\n\n def get_best_quality(self) -> Tuple[str, float]:\n \"\"\"Get current fine-tuning iteration best quality\n\n :return: run_id and best metric value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n return self._get_best_quality(self._tuning_iteration_id)" }, { "identifier": "FineTuningSettings", "path": "embedding_studio/workers/fine_tuning/experiments/finetuning_settings.py", "snippet": "class FineTuningSettings(BaseModel):\n \"\"\"\n\n :param loss_func: loss object for a ranking task\n :param metric_calculators: list of trackable metrics calculators (default: None)\n by default only DistanceShift metric\n :param ranker: ranking function (query, items) -> ranks (defult: cosine similarity)\n :param is_similarity: is ranking function similarity like or distance (default: True)\n :param confidence_calculator: function to calculate results confidences (default: dummy_confidences)\n :param step_size: optimizer steps (default: 500)\n :param gamma: optimizers gamma (default: 0.9)\n :param num_epochs: num of training epochs (default: 10)\n :param batch_size: count of sessions in a batch (default: 1)\n :param test_each_n_sessions: frequency of validation, if value in range [0, 1] - used as ratio (default: -1)\n \"\"\"\n\n loss_func: RankingLossInterface\n metric_calculators: Optional[List[MetricCalculator]] = None\n ranker: Optional[\n Callable[[FloatTensor, FloatTensor], FloatTensor]\n ] = COSINE_SIMILARITY\n is_similarity: Optional[bool] = True\n confidence_calculator: Optional[Callable] = dummy_confidences\n step_size: Optional[int] = 500\n gamma: Optional[float] = 0.9\n num_epochs: Optional[int] = 10\n batch_size: Optional[int] = 1\n test_each_n_sessions: Optional[Union[float, int]] = -1\n\n class Config:\n arbitrary_types_allowed = True" }, { "identifier": "MetricsAccumulator", "path": "embedding_studio/workers/fine_tuning/experiments/metrics_accumulator.py", "snippet": "class MetricsAccumulator:\n def __init__(\n self,\n name: str,\n calc_mean: bool = False,\n calc_sliding: bool = False,\n calc_min: bool = False,\n calc_max: bool = False,\n window_size: int = 10,\n ):\n \"\"\"Accumulator of metric values + calculator of aggregations like mean, max, min, sliding_mean.\n\n :param name: metric name (metrics with other name will be ignored)\n :param calc_mean: should accumulator calculate mean value (default: False)\n :param calc_sliding: should accumulator calculate sliding mean value (default: False)\n :param calc_min: should accumulator calculate min value (default: False)\n :param calc_max: should accumulator calculate max value (default: False)\n :param window_size: size of sliding window (default: 10)\n \"\"\"\n if not isinstance(name, str) or len(name) == 0:\n raise ValueError(\"MetricsAccumulator's name should not be empty\")\n\n self._name = name\n\n if not isinstance(calc_mean, bool):\n raise ValueError(\"calc_mean value should be bool\")\n self._calc_mean = calc_mean\n\n if not isinstance(calc_sliding, bool):\n raise ValueError(\"calc_sliding value should be bool\")\n self._calc_sliding = calc_sliding\n\n if not isinstance(calc_min, bool):\n raise ValueError(\"calc_min value should be bool\")\n self._calc_min = calc_min\n\n if not isinstance(calc_max, bool):\n raise ValueError(\"calc_max value should be bool\")\n self._calc_max = calc_max\n\n if not isinstance(window_size, int) or window_size <= 1:\n raise ValueError(\n \"window_size value should be integer with value more than 1\"\n )\n\n self._window_size = window_size\n self._values = []\n\n @property\n def name(self) -> str:\n return self._name\n\n def clear(self):\n \"\"\"Clear accumulator\"\"\"\n self._values = []\n\n def accumulate(self, value: MetricValue) -> List[Tuple[str, float]]:\n \"\"\"Add metric value to an accumulator.\n\n :param value: metric to be accumulated\n :return: aggregations\n \"\"\"\n if self.name == value.name:\n self._values.append(value.value)\n\n return self.aggregate()\n\n return []\n\n def aggregate(self) -> List[Tuple[str, float]]:\n \"\"\"Aggregate accumulated metrics\n\n :return: metric aggregations (last, mean, sliding, min, max)\n \"\"\"\n aggregations: List[Tuple[str, float]] = []\n if len(self._values) > 0:\n aggregations.append((self.name, self._values[-1]))\n if self._calc_mean:\n aggregations.append(\n (f\"mean_{self.name}\", float(np.mean(self._values)))\n )\n\n if self._calc_sliding:\n slide_value = float(\n np.mean(self._values)\n if len(self._values) < self._window_size\n else np.mean(self._values[-self._window_size :])\n )\n aggregations.append((f\"sliding_{self.name}\", slide_value))\n\n if self._calc_min:\n aggregations.append((f\"min_{self.name}\", np.min(self._values)))\n\n if self._calc_max:\n aggregations.append((f\"max_{self.name}\", np.max(self._values)))\n\n return aggregations" } ]
from dataclasses import dataclass from typing import Any, Dict, List, Optional from pydantic import BaseModel from embedding_studio.embeddings.data.clickstream.parsers.parser import ( ClickstreamParser, ) from embedding_studio.embeddings.data.clickstream.query_retriever import ( QueryRetriever, ) from embedding_studio.embeddings.data.clickstream.splitter import ( ClickstreamSessionsSplitter, ) from embedding_studio.embeddings.data.loaders.data_loader import DataLoader from embedding_studio.embeddings.data.ranking_data import RankingData from embedding_studio.embeddings.data.storages.producer import ( ItemStorageProducer, ) from embedding_studio.embeddings.data.utils.fields_normalizer import ( DatasetFieldsNormalizer, ) from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, ) from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import ( MetricsAccumulator, )
10,875
class PluginMeta(BaseModel): name: str version: str = "1.0.0" description: Optional[str] = None @dataclass class FineTuningBuilder: data_loader: DataLoader query_retriever: QueryRetriever clickstream_parser: ClickstreamParser clickstream_sessions_splitter: ClickstreamSessionsSplitter
class PluginMeta(BaseModel): name: str version: str = "1.0.0" description: Optional[str] = None @dataclass class FineTuningBuilder: data_loader: DataLoader query_retriever: QueryRetriever clickstream_parser: ClickstreamParser clickstream_sessions_splitter: ClickstreamSessionsSplitter
dataset_fields_normalizer: DatasetFieldsNormalizer
6
2023-10-31 00:33:13+00:00
16k
masked-spacetime-hashing/msth
MSTH/SpaceTimeHashing/trainer.py
[ { "identifier": "ExperimentConfig", "path": "nerfstudio/configs/experiment_config.py", "snippet": "class ExperimentConfig(InstantiateConfig):\n \"\"\"Full config contents for running an experiment. Any experiment types (like training) will be\n subclassed from this, and must have their _target field defined accordingly.\"\"\"\n\n output_dir: Path = Path(\"outputs\")\n \"\"\"relative or absolute output directory to save all checkpoints and logging\"\"\"\n method_name: Optional[str] = None\n \"\"\"Method name. Required to set in python or via cli\"\"\"\n experiment_name: Optional[str] = None\n \"\"\"Experiment name. If None, will automatically be set to dataset name\"\"\"\n timestamp: str = \"{timestamp}\"\n \"\"\"Experiment timestamp.\"\"\"\n machine: MachineConfig = MachineConfig()\n \"\"\"Machine configuration\"\"\"\n logging: LoggingConfig = LoggingConfig()\n \"\"\"Logging configuration\"\"\"\n viewer: ViewerConfig = ViewerConfig()\n \"\"\"Viewer configuration\"\"\"\n pipeline: VanillaPipelineConfig = VanillaPipelineConfig()\n \"\"\"Pipeline configuration\"\"\"\n optimizers: Dict[str, Any] = to_immutable_dict(\n {\n \"fields\": {\n \"optimizer\": OptimizerConfig(),\n \"scheduler\": SchedulerConfig(),\n }\n }\n )\n \"\"\"Dictionary of optimizer groups and their schedulers\"\"\"\n vis: Literal[\"viewer\", \"wandb\", \"tensorboard\", \"viewer+wandb\", \"viewer+tensorboard\"] = \"wandb\"\n \"\"\"Which visualizer to use.\"\"\"\n data: Optional[Path] = None\n \"\"\"Alias for --pipeline.datamanager.data\"\"\"\n relative_model_dir: Path = Path(\"nerfstudio_models/\")\n \"\"\"Relative path to save all checkpoints.\"\"\"\n\n def is_viewer_enabled(self) -> bool:\n \"\"\"Checks if a viewer is enabled.\"\"\"\n return (\"viewer\" == self.vis) | (\"viewer+wandb\" == self.vis) | (\"viewer+tensorboard\" == self.vis)\n\n def is_wandb_enabled(self) -> bool:\n \"\"\"Checks if wandb is enabled.\"\"\"\n return (\"wandb\" == self.vis) | (\"viewer+wandb\" == self.vis)\n\n def is_tensorboard_enabled(self) -> bool:\n \"\"\"Checks if tensorboard is enabled.\"\"\"\n return (\"tensorboard\" == self.vis) | (\"viewer+tensorboard\" == self.vis)\n\n def set_timestamp(self) -> None:\n \"\"\"Dynamically set the experiment timestamp\"\"\"\n if self.timestamp == \"{timestamp}\":\n self.timestamp = datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")\n\n def set_experiment_name(self) -> None:\n \"\"\"Dynamically set the experiment name\"\"\"\n if self.experiment_name is None:\n datapath = self.pipeline.datamanager.data\n if datapath is not None:\n datapath = datapath.parent if datapath.is_file() else datapath\n self.experiment_name = str(datapath.stem)\n else:\n self.experiment_name = \"unnamed\"\n\n def get_base_dir(self) -> Path:\n \"\"\"Retrieve the base directory to set relative paths\"\"\"\n # check the experiment and method names\n assert self.method_name is not None, \"Please set method name in config or via the cli\"\n self.set_experiment_name()\n return Path(f\"{self.output_dir}/{self.experiment_name}/{self.method_name}/{self.timestamp}\")\n\n def get_checkpoint_dir(self) -> Path:\n \"\"\"Retrieve the checkpoint directory\"\"\"\n return Path(self.get_base_dir() / self.relative_model_dir)\n\n def print_to_terminal(self) -> None:\n \"\"\"Helper to pretty print config to terminal\"\"\"\n CONSOLE.rule(\"Config\")\n CONSOLE.print(self)\n CONSOLE.rule(\"\")\n\n def save_config(self) -> None:\n \"\"\"Save config to base directory\"\"\"\n base_dir = self.get_base_dir()\n assert base_dir is not None\n base_dir.mkdir(parents=True, exist_ok=True)\n config_yaml_path = base_dir / \"config.yml\"\n CONSOLE.log(f\"Saving config to: {config_yaml_path}\")\n config_yaml_path.write_text(yaml.dump(self), \"utf8\")" }, { "identifier": "TrainingCallback", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallback:\n \"\"\"Callback class used during training.\n The function 'func' with 'args' and 'kwargs' will be called every 'update_every_num_iters' training iterations,\n including at iteration 0. The function is called after the training iteration.\n\n Args:\n where_to_run: List of locations for when to run callback (before/after iteration)\n func: The function that will be called.\n update_every_num_iters: How often to call the function `func`.\n iters: Tuple of iteration steps to perform callback\n args: args for the function 'func'.\n kwargs: kwargs for the function 'func'.\n \"\"\"\n\n def __init__(\n self,\n where_to_run: List[TrainingCallbackLocation],\n func: Callable,\n update_every_num_iters: Optional[int] = None,\n iters: Optional[Tuple[int, ...]] = None,\n args: Optional[List] = None,\n kwargs: Optional[Dict] = None,\n ):\n assert (\n \"step\" in signature(func).parameters.keys()\n ), f\"'step: int' must be an argument in the callback function 'func': {func.__name__}\"\n self.where_to_run = where_to_run\n self.update_every_num_iters = update_every_num_iters\n self.iters = iters\n self.func = func\n self.args = args if args is not None else []\n self.kwargs = kwargs if kwargs is not None else {}\n\n def run_callback(self, step: int) -> None:\n \"\"\"Callback to run after training step\n\n Args:\n step: current iteration step\n \"\"\"\n if self.update_every_num_iters is not None:\n if step % self.update_every_num_iters == 0:\n self.func(*self.args, **self.kwargs, step=step)\n elif self.iters is not None:\n if step in self.iters:\n self.func(*self.args, **self.kwargs, step=step)\n\n def run_callback_at_location(self, step: int, location: TrainingCallbackLocation) -> None:\n \"\"\"Runs the callback if it's supposed to be run at the given location.\n\n Args:\n step: current iteration step\n location: when to run callback (before/after iteration)\n \"\"\"\n if location in self.where_to_run:\n self.run_callback(step=step)" }, { "identifier": "TrainingCallbackAttributes", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallbackAttributes:\n \"\"\"Attributes that can be used to configure training callbacks.\n The callbacks can be specified in the Dataloader or Model implementations.\n Instead of providing access to the entire Trainer object, we only provide these attributes.\n This should be least prone to errors and fairly clean from a user perspective.\"\"\"\n\n # TODO(ethan): type this without circular imports\n optimizers: Optional[InitVar]\n \"\"\"optimizers for training\"\"\"\n grad_scaler: Optional[InitVar]\n \"\"\"gradient scalers\"\"\"\n pipeline: Optional[InitVar]\n \"\"\"reference to training pipeline\"\"\"" }, { "identifier": "TrainingCallbackLocation", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallbackLocation(Enum):\n \"\"\"Enum for specifying where the training callback should be run.\"\"\"\n\n BEFORE_TRAIN_ITERATION = auto()\n AFTER_TRAIN_ITERATION = auto()" }, { "identifier": "Optimizers", "path": "nerfstudio/engine/optimizers.py", "snippet": "class Optimizers:\n \"\"\"A set of optimizers.\n\n Args:\n config: The optimizer configuration object.\n param_groups: A dictionary of parameter groups to optimize.\n \"\"\"\n\n def __init__(self, config: Dict[str, Any], param_groups: Dict[str, List[Parameter]]) -> None:\n self.config = config\n self.optimizers = {}\n self.schedulers = {}\n self.parameters = {}\n for param_group_name, params in param_groups.items():\n lr_init = config[param_group_name][\"optimizer\"].lr\n self.optimizers[param_group_name] = config[param_group_name][\"optimizer\"].setup(params=params)\n self.parameters[param_group_name] = params\n if config[param_group_name][\"scheduler\"]:\n self.schedulers[param_group_name] = (\n config[param_group_name][\"scheduler\"]\n .setup()\n .get_scheduler(optimizer=self.optimizers[param_group_name], lr_init=lr_init)\n )\n\n def optimizer_step(self, param_group_name: str) -> None:\n \"\"\"Fetch and step corresponding optimizer.\n\n Args:\n param_group_name: name of optimizer to step forward\n \"\"\"\n self.optimizers[param_group_name].step()\n\n def scheduler_step(self, param_group_name: str) -> None:\n \"\"\"Fetch and step corresponding scheduler.\n\n Args:\n param_group_name: name of scheduler to step forward\n \"\"\"\n if self.config.param_group_name.scheduler: # type: ignore\n self.schedulers[param_group_name].step()\n\n def zero_grad_all(self) -> None:\n \"\"\"Zero the gradients for all optimizer parameters.\"\"\"\n for _, optimizer in self.optimizers.items():\n optimizer.zero_grad()\n\n def optimizer_scaler_step_all(self, grad_scaler: GradScaler) -> None:\n \"\"\"Take an optimizer step using a grad scaler.\n\n Args:\n grad_scaler: GradScaler to use\n \"\"\"\n for param_group, optimizer in self.optimizers.items():\n max_norm = self.config[param_group][\"optimizer\"].max_norm\n if max_norm is not None:\n grad_scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(self.parameters[param_group], max_norm)\n grad_scaler.step(optimizer)\n\n def optimizer_step_all(self) -> None:\n \"\"\"Run step for all optimizers.\"\"\"\n for param_group, optimizer in self.optimizers.items():\n # note that they key is the parameter name\n max_norm = self.config[param_group][\"optimizer\"].max_norm\n if max_norm is not None:\n torch.nn.utils.clip_grad_norm_(self.parameters[param_group], max_norm)\n optimizer.step()\n\n def scheduler_step_all(self, step: int) -> None:\n \"\"\"Run step for all schedulers.\n\n Args:\n step: the current step\n \"\"\"\n for param_group_name, scheduler in self.schedulers.items():\n scheduler.step()\n # TODO(ethan): clean this up. why is there indexing into a list?\n lr = scheduler.get_last_lr()[0]\n writer.put_scalar(name=f\"learning_rate/{param_group_name}\", scalar=lr, step=step)\n\n def load_optimizers(self, loaded_state: Dict[str, Any]) -> None:\n \"\"\"Helper to load the optimizer state from previous checkpoint\n\n Args:\n loaded_state: the state from the previous checkpoint\n \"\"\"\n for k, v in loaded_state.items():\n self.optimizers[k].load_state_dict(v)" }, { "identifier": "VanillaPipeline", "path": "nerfstudio/pipelines/base_pipeline.py", "snippet": "class VanillaPipeline(Pipeline):\n \"\"\"The pipeline class for the vanilla nerf setup of multiple cameras for one or a few scenes.\n\n config: configuration to instantiate pipeline\n device: location to place model and data\n test_mode:\n 'val': loads train/val datasets into memory\n 'test': loads train/test dataset into memory\n 'inference': does not load any dataset into memory\n world_size: total number of machines available\n local_rank: rank of current machine\n\n Attributes:\n datamanager: The data manager that will be used\n model: The model that will be used\n \"\"\"\n\n def __init__(\n self,\n config: VanillaPipelineConfig,\n device: str,\n test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\",\n world_size: int = 1,\n local_rank: int = 0,\n ):\n super().__init__()\n self.config = config\n self.test_mode = test_mode\n self.datamanager: VanillaDataManager = config.datamanager.setup(\n device=device, test_mode=test_mode, world_size=world_size, local_rank=local_rank\n )\n self.datamanager.to(device)\n # TODO(ethan): get rid of scene_bounds from the model\n assert self.datamanager.train_dataset is not None, \"Missing input dataset\"\n\n self._model = config.model.setup(\n scene_box=self.datamanager.train_dataset.scene_box,\n num_train_data=len(self.datamanager.train_dataset),\n metadata=self.datamanager.train_dataset.metadata,\n )\n self.model.to(device)\n\n self.world_size = world_size\n if world_size > 1:\n self._model = typing.cast(Model, DDP(self._model, device_ids=[local_rank], find_unused_parameters=True))\n dist.barrier(device_ids=[local_rank])\n\n @property\n def device(self):\n \"\"\"Returns the device that the model is on.\"\"\"\n return self.model.device\n\n @profiler.time_function\n def get_train_loss_dict(self, step: int):\n \"\"\"This function gets your training loss dict. This will be responsible for\n getting the next batch of data from the DataManager and interfacing with the\n Model class, feeding the data to the model's forward function.\n\n Args:\n step: current iteration step to update sampler if using DDP (distributed)\n \"\"\"\n ray_bundle, batch = self.datamanager.next_train(step)\n model_outputs = self.model(ray_bundle)\n metrics_dict = self.model.get_metrics_dict(model_outputs, batch)\n\n if self.config.datamanager.camera_optimizer is not None:\n camera_opt_param_group = self.config.datamanager.camera_optimizer.param_group\n if camera_opt_param_group in self.datamanager.get_param_groups():\n # Report the camera optimization metrics\n metrics_dict[\"camera_opt_translation\"] = (\n self.datamanager.get_param_groups()[camera_opt_param_group][0].data[:, :3].norm()\n )\n metrics_dict[\"camera_opt_rotation\"] = (\n self.datamanager.get_param_groups()[camera_opt_param_group][0].data[:, 3:].norm()\n )\n\n loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)\n\n return model_outputs, loss_dict, metrics_dict\n\n def forward(self):\n \"\"\"Blank forward method\n\n This is an nn.Module, and so requires a forward() method normally, although in our case\n we do not need a forward() method\"\"\"\n raise NotImplementedError\n\n @profiler.time_function\n def get_eval_loss_dict(self, step: int):\n \"\"\"This function gets your evaluation loss dict. It needs to get the data\n from the DataManager and feed it to the model's forward function\n\n Args:\n step: current iteration step\n \"\"\"\n self.eval()\n ray_bundle, batch = self.datamanager.next_eval(step)\n model_outputs = self.model(ray_bundle)\n metrics_dict = self.model.get_metrics_dict(model_outputs, batch)\n loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)\n self.train()\n return model_outputs, loss_dict, metrics_dict\n\n @profiler.time_function\n def get_eval_image_metrics_and_images(self, step: int):\n \"\"\"This function gets your evaluation loss dict. It needs to get the data\n from the DataManager and feed it to the model's forward function\n\n Args:\n step: current iteration step\n \"\"\"\n self.eval()\n image_idx, camera_ray_bundle, batch = self.datamanager.next_eval_image(step)\n # print(camera_ray_bundle.shape)\n # print(batch.keys())\n # print(batch[\"image_idx\"].shape)\n # print(batch[\"image\"].shape)\n outputs = self.model.get_outputs_for_camera_ray_bundle(camera_ray_bundle)\n metrics_dict, images_dict = self.model.get_image_metrics_and_images(outputs, batch)\n assert \"image_idx\" not in metrics_dict\n metrics_dict[\"image_idx\"] = image_idx\n assert \"num_rays\" not in metrics_dict\n metrics_dict[\"num_rays\"] = len(camera_ray_bundle)\n self.train()\n return metrics_dict, images_dict\n\n @profiler.time_function\n def get_average_eval_image_metrics(self, step: Optional[int] = None):\n \"\"\"Iterate over all the images in the eval dataset and get the average.\n\n Returns:\n metrics_dict: dictionary of metrics\n \"\"\"\n self.eval()\n metrics_dict_list = []\n num_images = len(self.datamanager.fixed_indices_eval_dataloader)\n with Progress(\n TextColumn(\"[progress.description]{task.description}\"),\n BarColumn(),\n TimeElapsedColumn(),\n MofNCompleteColumn(),\n transient=True,\n ) as progress:\n task = progress.add_task(\"[green]Evaluating all eval images...\", total=num_images)\n for camera_ray_bundle, batch in self.datamanager.fixed_indices_eval_dataloader:\n # time this the following line\n inner_start = time()\n height, width = camera_ray_bundle.shape\n num_rays = height * width\n outputs = self.model.get_outputs_for_camera_ray_bundle(camera_ray_bundle)\n metrics_dict, _ = self.model.get_image_metrics_and_images(outputs, batch)\n assert \"num_rays_per_sec\" not in metrics_dict\n metrics_dict[\"num_rays_per_sec\"] = num_rays / (time() - inner_start)\n fps_str = \"fps\"\n assert fps_str not in metrics_dict\n metrics_dict[fps_str] = metrics_dict[\"num_rays_per_sec\"] / (height * width)\n metrics_dict_list.append(metrics_dict)\n progress.advance(task)\n # average the metrics list\n metrics_dict = {}\n for key in metrics_dict_list[0].keys():\n metrics_dict[key] = float(\n torch.mean(torch.tensor([metrics_dict[key] for metrics_dict in metrics_dict_list]))\n )\n self.train()\n return metrics_dict\n\n def load_pipeline(self, loaded_state: Dict[str, Any], step: int) -> None:\n \"\"\"Load the checkpoint from the given path\n\n Args:\n loaded_state: pre-trained model state dict\n step: training step of the loaded checkpoint\n \"\"\"\n state = {key.replace(\"module.\", \"\"): value for key, value in loaded_state.items()}\n self._model.update_to_step(step)\n self.load_state_dict(state, strict=True)\n\n def get_training_callbacks(\n self, training_callback_attributes: TrainingCallbackAttributes\n ) -> List[TrainingCallback]:\n \"\"\"Returns the training callbacks from both the Dataloader and the Model.\"\"\"\n datamanager_callbacks = self.datamanager.get_training_callbacks(training_callback_attributes)\n model_callbacks = self.model.get_training_callbacks(training_callback_attributes)\n callbacks = datamanager_callbacks + model_callbacks\n return callbacks\n\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n \"\"\"Get the param groups for the pipeline.\n\n Returns:\n A list of dictionaries containing the pipeline's param groups.\n \"\"\"\n datamanager_params = self.datamanager.get_param_groups()\n model_params = self.model.get_param_groups()\n # TODO(ethan): assert that key names don't overlap\n return {**datamanager_params, **model_params}" }, { "identifier": "profiler", "path": "nerfstudio/utils/profiler.py", "snippet": "CONSOLE = Console(width=120)\nPROFILER = []\ndef time_function(func: Callable) -> Callable:\n def wrapper(*args, **kwargs):\ndef flush_profiler(config: cfg.LoggingConfig):\ndef setup_profiler(config: cfg.LoggingConfig):\n def __init__(self, config: cfg.LoggingConfig):\n def update_time(self, func_name: str, start_time: float, end_time: float):\n def print_profile(self):\nclass Profiler:" }, { "identifier": "writer", "path": "nerfstudio/utils/writer.py", "snippet": "CONSOLE = Console(width=120)\nEVENT_WRITERS = []\nEVENT_STORAGE = []\nGLOBAL_BUFFER = {}\n ITER_TRAIN_TIME = \"Train Iter (time)\"\n TOTAL_TRAIN_TIME = \"Train Total (time)\"\n ITER_VIS_TIME = \"Viewer Rendering (time)\"\n ETA = \"ETA (time)\"\n TRAIN_RAYS_PER_SEC = \"Train Rays / Sec\"\n TEST_RAYS_PER_SEC = \"Test Rays / Sec\"\n VIS_RAYS_PER_SEC = \"Vis Rays / Sec\"\n CURR_TEST_PSNR = \"Test PSNR\"\n TRAIN_LOSS = \"Train Loss\"\n TRAIN_PSNR = \"Train PSNR\"\n STATIC_TOTAL_TRAIN_TIME = \"Static Train (time)\"\n DYNAMIC_TOTAL_TRAIN_TIME = \"Dynamic Train (time)\"\n STATIC_ITER_TRAIN_TIME = \"Static Train Iter (time)\"\n DYNAMIC_ITER_TRAIN_TIME = \"Dynamic Train Iter (time)\"\n IMAGE = \"write_image\"\n SCALAR = \"write_scalar\"\n DICT = \"write_scalar_dict\"\n CONFIG = \"write_config\"\nclass EventName(enum.Enum):\nclass EventType(enum.Enum):\nclass Writer:\nclass TimeWriter:\nclass WandbWriter(Writer):\nclass TensorboardWriter(Writer):\nclass LocalWriter:\ndef put_image(name, image: TensorType[\"H\", \"W\", \"C\"], step: int):\ndef put_scalar(name: str, scalar: Any, step: int):\ndef put_dict(name: str, scalar_dict: Dict[str, Any], step: int):\ndef put_config(name: str, config_dict: Dict[str, Any], step: int):\ndef put_time(name: str, duration: float, step: int, avg_over_steps: bool = True, update_eta: bool = False):\ndef write_out_storage():\ndef setup_local_writer(config: cfg.LoggingConfig, max_iter: int, banner_messages: Optional[List[str]] = None) -> None:\ndef setup_event_writer(is_wandb_enabled: bool, is_tensorboard_enabled: bool, log_dir: Path, name: str) -> None:\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_scalar_dict(self, name: str, scalar_dict: Dict[str, Any], step: int) -> None:\n def __init__(self, writer, name, step=None, write=True):\n def __enter__(self):\n def __exit__(self, *args):\n def __init__(self, log_dir: Path, name: str = None):\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int):\n def __init__(self, log_dir: Path):\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int): # pylint: disable=unused-argument\ndef _cursorup(x: int):\ndef _format_time(seconds):\n def __init__(self, config: cfg.LocalWriterConfig, banner_messages: Optional[List[str]] = None):\n def write_stats_log(self, step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int):\n def _consolidate_events(self):\n def _update_header(self, latest_map, new_key):\n def _print_stats(self, latest_map, padding=\" \"):" }, { "identifier": "check_eval_enabled", "path": "nerfstudio/utils/decorators.py", "snippet": "def check_eval_enabled(func: Callable) -> Callable:\n \"\"\"Decorator: check if evaluation step is enabled\"\"\"\n\n def wrapper(self, *args, **kwargs):\n ret = None\n if self.config.is_wandb_enabled() or self.config.is_tensorboard_enabled():\n ret = func(self, *args, **kwargs)\n return ret\n\n return wrapper" }, { "identifier": "check_main_thread", "path": "nerfstudio/utils/decorators.py", "snippet": "def check_main_thread(func: Callable) -> Callable:\n \"\"\"Decorator: check if you are on main thread\"\"\"\n\n def wrapper(*args, **kwargs):\n ret = None\n if comms.is_main_process():\n ret = func(*args, **kwargs)\n return ret\n\n return wrapper" }, { "identifier": "check_viewer_enabled", "path": "nerfstudio/utils/decorators.py", "snippet": "def check_viewer_enabled(func: Callable) -> Callable:\n \"\"\"Decorator: check if viewer is enabled and only run on main process\"\"\"\n\n def wrapper(self, *args, **kwargs):\n ret = None\n if self.config.is_viewer_enabled() and comms.is_main_process():\n ret = func(self, *args, **kwargs)\n return ret\n\n return wrapper" }, { "identifier": "step_check", "path": "nerfstudio/utils/misc.py", "snippet": "def step_check(step, step_size, run_at_zero=False) -> bool:\n \"\"\"Returns true based on current step and step interval.\"\"\"\n if step_size == 0:\n return False\n return (run_at_zero or step != 0) and step % step_size == 0" }, { "identifier": "EventName", "path": "nerfstudio/utils/writer.py", "snippet": "class EventName(enum.Enum):\n \"\"\"Names of possible events that can be logged via Local Writer for convenience.\n see config/logging/default_logging.yaml\"\"\"\n\n ITER_TRAIN_TIME = \"Train Iter (time)\"\n TOTAL_TRAIN_TIME = \"Train Total (time)\"\n ITER_VIS_TIME = \"Viewer Rendering (time)\"\n ETA = \"ETA (time)\"\n TRAIN_RAYS_PER_SEC = \"Train Rays / Sec\"\n TEST_RAYS_PER_SEC = \"Test Rays / Sec\"\n VIS_RAYS_PER_SEC = \"Vis Rays / Sec\"\n CURR_TEST_PSNR = \"Test PSNR\"\n TRAIN_LOSS = \"Train Loss\"\n TRAIN_PSNR = \"Train PSNR\"\n STATIC_TOTAL_TRAIN_TIME = \"Static Train (time)\"\n DYNAMIC_TOTAL_TRAIN_TIME = \"Dynamic Train (time)\"\n STATIC_ITER_TRAIN_TIME = \"Static Train Iter (time)\"\n DYNAMIC_ITER_TRAIN_TIME = \"Dynamic Train Iter (time)\"" }, { "identifier": "TimeWriter", "path": "nerfstudio/utils/writer.py", "snippet": "class TimeWriter:\n \"\"\"Timer context manager that calculates duration around wrapped functions\"\"\"\n\n def __init__(self, writer, name, step=None, write=True):\n self.writer = writer\n self.name = name\n self.step = step\n self.write = write\n\n self.start: float = 0.0\n self.duration: float = 0.0\n\n def __enter__(self):\n self.start = time()\n return self\n\n def __exit__(self, *args):\n self.duration = time() - self.start\n update_step = self.step is not None\n if self.write:\n self.writer.put_time(\n name=self.name,\n duration=self.duration,\n step=self.step if update_step else GLOBAL_BUFFER[\"max_iter\"],\n avg_over_steps=update_step,\n update_eta=self.name == EventName.ITER_TRAIN_TIME,\n )" }, { "identifier": "viewer_utils", "path": "nerfstudio/viewer/server/viewer_utils.py", "snippet": "CONSOLE = Console(width=120)\n INIT = \"init\"\n RGB = \"rgb\"\n RGB_FINE = \"rgb_fine\"\n ACCUMULATION = \"accumulation\"\n ACCUMULATION_FINE = \"accumulation_fine\"\n DEFAULT = \"default\"\n TURBO = \"turbo\"\n VIRIDIS = \"viridis\"\n MAGMA = \"magma\"\n INFERNO = \"inferno\"\n CIVIDIS = \"cividis\"\ndef get_viewer_version() -> str:\ndef setup_viewer(config: cfg.ViewerConfig, log_filename: Path, datapath: Path):\n def __init__(self, func):\n def __enter__(self):\n def __exit__(self, ext_type, exc_value, traceback):\n def __init__(self, state: \"ViewerState\", graph: Model, camera_ray_bundle: RayBundle):\n def run(self):\n def join(self, timeout=None):\n def __init__(self, state):\n def run(self):\n def __init__(self, config: cfg.ViewerConfig, log_filename: Path, datapath: Path):\n def _pick_drawn_image_idxs(self, total_num: int) -> list[int]:\n def init_scene(self, dataset: InputDataset, start_train=True) -> None:\n def _check_camera_path_payload(self, trainer, step: int):\n def _check_populate_paths_payload(self, trainer, step: int):\n def _check_webrtc_offer(self):\n def loop_in_thread(loop):\n def _update_render_aabb(self, graph):\n def update_scene(self, trainer, step: int, graph: Model, num_rays_per_batch: int) -> None:\n def check_interrupt(self, frame, event, arg): # pylint: disable=unused-argument\n def _get_camera_object(self):\n def _apply_colormap(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6):\n async def send_webrtc_answer(self, data):\n def set_image(self, image):\n def _send_output_to_viewer(self, outputs: Dict[str, Any], colors: torch.Tensor = None):\n def _update_viewer_stats(self, render_time: float, num_rays: int, image_height: int, image_width: int) -> None:\n def _calculate_image_res(self, camera_object, is_training: bool) -> Optional[Tuple[int, int]]:\n def _process_invalid_output(self, output_type: str) -> str:\n def _render_image_in_viewer(self, camera_object, graph: Model, is_training: bool) -> None:\n def _calculate_rendering_fps(self, camera_object, is_training: bool):\nclass OutputTypes(str, enum.Enum):\nclass ColormapTypes(str, enum.Enum):\nclass IOChangeException(Exception):\nclass SetTrace:\nclass RenderThread(threading.Thread):\nclass CheckThread(threading.Thread):\nclass ViewerState:" }, { "identifier": "Timer", "path": "MSTH/utils.py", "snippet": "class Timer:\n recorder = defaultdict(list)\n\n def __init__(self, des=\"\", verbose=True, record=False) -> None:\n self.des = des\n self.verbose = verbose\n self.record = record\n\n def __enter__(self):\n return self\n self.start = time.time()\n self.start_cuda = torch.cuda.Event(enable_timing=True)\n self.end_cuda = torch.cuda.Event(enable_timing=True)\n self.start_cuda.record()\n return self\n\n def __exit__(self, *args):\n return\n self.end = time.time()\n self.end_cuda.record()\n self.interval = self.end - self.start\n if self.verbose:\n torch.cuda.synchronize()\n print(f\"[cudasync]{self.des} consuming {self.start_cuda.elapsed_time(self.end_cuda)/1000.:.8f}\")\n\n print(f\"{self.des} consuming {self.interval:.8f}\")\n if self.record:\n Timer.recorder[self.des].append(self.interval)\n\n @staticmethod\n def show_recorder():\n pprint(Timer.recorder)" }, { "identifier": "VideoPipeline", "path": "MSTH/video_pipeline.py", "snippet": "def module_wrapper(ddp_or_model: Union[DDP, Model]) -> Model:\n def __init__(\n self,\n config: VanillaPipelineConfig,\n device: str,\n test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\",\n world_size: int = 1,\n local_rank: int = 0,\n ):\n def device(self):\n def get_static_train_loss_dict(self, step: int):\n def get_dynamic_train_loss_dict(self, step: int):\n def hash_reinitialize(self, step: int, std: float):\n def set_static(self, step: int):\n def get_static_eval_loss_dict(self, step: int):\n def get_dynamic_eval_loss_dict(self, step: int):\n def get_eval_image_metrics_and_images(self, step: int):\n def get_cur_frame_eval_mask(self):\n def get_average_eval_image_metrics(self, step: Optional[int] = None):\n def load_pipeline(self, loaded_state: Dict[str, Any], step: int) -> None:\n def get_training_callbacks(\n self, training_callback_attributes: TrainingCallbackAttributes\n ) -> List[TrainingCallback]:\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n def tick(self):\n def cur_frame(self):\n def num_dynamic_rays(self):\n def num_static_rays(self):\n def get_eval_last_frame(self):\n def get_metric(self, image, rgb):\n def __init__(\n self,\n config: SpaceTimePipelineConfig,\n device: str,\n test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\",\n world_size: int = 1,\n local_rank: int = 0,\n ):\n def device(self):\n def get_train_loss_dict(self, step: int):\n def get_eval_loss_dict(self, step: int):\n def get_eval_image_metrics_and_images(self, step: int, interval=10, use_fast=False):\n def get_eval_image_metrics_and_images_fast(self, step: int, interval=10, thresh=0.9):\n def get_average_eval_image_metrics(self, step: Optional[int] = None):\n def get_eval_video(self, num_frames=None):\n def render_from_cameras(\n self,\n near=1.0,\n far=5.0,\n num_frames=None,\n cameras=None,\n save_path=None,\n fps=None,\n offset=None,\n render_depth=True,\n ):\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n def get_training_callbacks(\n self, training_callback_attributes: TrainingCallbackAttributes\n ) -> List[TrainingCallback]:\n def mock_eval(self):\n def load_pipeline(self, loaded_state: Dict[str, Any], step: int) -> None:\nclass VideoPipelineConfig(cfg.InstantiateConfig):\nclass VideoPipeline(Pipeline):\nclass SpaceTimePipelineConfig(cfg.InstantiateConfig):\nclass SpaceTimePipeline(Pipeline):" }, { "identifier": "Trainer", "path": "nerfstudio/engine/trainer.py", "snippet": "class Trainer:\n \"\"\"Trainer class\n\n Args:\n config: The configuration object.\n local_rank: Local rank of the process.\n world_size: World size of the process.\n\n Attributes:\n config: The configuration object.\n local_rank: Local rank of the process.\n world_size: World size of the process.\n device: The device to run the training on.\n pipeline: The pipeline object.\n optimizers: The optimizers object.\n callbacks: The callbacks object.\n \"\"\"\n\n pipeline: VanillaPipeline\n optimizers: Optimizers\n callbacks: List[TrainingCallback]\n\n def __init__(self, config: TrainerConfig, local_rank: int = 0, world_size: int = 1) -> None:\n self.config = config\n self.local_rank = local_rank\n self.world_size = world_size\n self.device: TORCH_DEVICE = \"cpu\" if world_size == 0 else f\"cuda:{local_rank}\"\n self.mixed_precision: bool = self.config.mixed_precision\n if self.device == \"cpu\":\n self.mixed_precision = False\n CONSOLE.print(\"Mixed precision is disabled for CPU training.\")\n self._start_step: int = 0\n # optimizers\n self.grad_scaler = GradScaler(enabled=self.mixed_precision)\n\n self.base_dir: Path = config.get_base_dir()\n # directory to save checkpoints\n self.checkpoint_dir: Path = config.get_checkpoint_dir()\n CONSOLE.log(f\"Saving checkpoints to: {self.checkpoint_dir}\")\n\n self.viewer_state = None\n\n def setup(self, test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\") -> None:\n \"\"\"Setup the Trainer by calling other setup functions.\n\n Args:\n test_mode:\n 'val': loads train/val datasets into memory\n 'test': loads train/test datasets into memory\n 'inference': does not load any dataset into memory\n \"\"\"\n self.pipeline = self.config.pipeline.setup(\n device=self.device, test_mode=test_mode, world_size=self.world_size, local_rank=self.local_rank\n )\n self.optimizers = self.setup_optimizers()\n\n self._load_checkpoint()\n\n self.callbacks = self.pipeline.get_training_callbacks(\n TrainingCallbackAttributes(\n optimizers=self.optimizers, # type: ignore\n grad_scaler=self.grad_scaler, # type: ignore\n pipeline=self.pipeline, # type: ignore\n )\n )\n\n # set up viewer if enabled\n viewer_log_path = self.base_dir / self.config.viewer.relative_log_filename\n self.viewer_state, banner_messages = None, None\n if self.config.is_viewer_enabled() and self.local_rank == 0:\n datapath = self.pipeline.datamanager.get_datapath()\n if datapath is None:\n datapath = self.base_dir\n self.viewer_state, banner_messages = viewer_utils.setup_viewer(\n self.config.viewer, log_filename=viewer_log_path, datapath=datapath\n )\n self._check_viewer_warnings()\n # set up writers/profilers if enabled\n writer_log_path = self.base_dir / self.config.logging.relative_log_dir\n writer.setup_event_writer(\n self.config.is_wandb_enabled(),\n self.config.is_tensorboard_enabled(),\n log_dir=writer_log_path,\n name=self.config.wandb_name,\n )\n writer.setup_local_writer(\n self.config.logging, max_iter=self.config.max_num_iterations, banner_messages=banner_messages\n )\n writer.put_config(name=\"config\", config_dict=dataclasses.asdict(self.config), step=0)\n profiler.setup_profiler(self.config.logging)\n\n def setup_optimizers(self) -> Optimizers:\n \"\"\"Helper to set up the optimizers\n\n Returns:\n The optimizers object given the trainer config.\n \"\"\"\n optimizer_config = self.config.optimizers.copy()\n param_groups = self.pipeline.get_param_groups()\n camera_optimizer_config = self.config.pipeline.datamanager.camera_optimizer\n if camera_optimizer_config is not None and camera_optimizer_config.mode != \"off\":\n assert camera_optimizer_config.param_group not in optimizer_config\n optimizer_config[camera_optimizer_config.param_group] = {\n \"optimizer\": camera_optimizer_config.optimizer,\n \"scheduler\": camera_optimizer_config.scheduler,\n }\n return Optimizers(optimizer_config, param_groups)\n\n def train(self) -> None:\n \"\"\"Train the model.\"\"\"\n assert self.pipeline.datamanager.train_dataset is not None, \"Missing DatsetInputs\"\n\n self.pipeline.datamanager.train_dataparser_outputs.save_dataparser_transform(\n self.base_dir / \"dataparser_transforms.json\"\n )\n\n self._init_viewer_state()\n with TimeWriter(writer, EventName.TOTAL_TRAIN_TIME):\n num_iterations = self.config.max_num_iterations\n step = 0\n for step in range(self._start_step, self._start_step + num_iterations):\n with TimeWriter(writer, EventName.ITER_TRAIN_TIME, step=step) as train_t:\n self.pipeline.train()\n\n # training callbacks before the training iteration\n for callback in self.callbacks:\n callback.run_callback_at_location(\n step, location=TrainingCallbackLocation.BEFORE_TRAIN_ITERATION\n )\n\n # time the forward pass\n loss, loss_dict, metrics_dict = self.train_iteration(step)\n\n # training callbacks after the training iteration\n for callback in self.callbacks:\n callback.run_callback_at_location(step, location=TrainingCallbackLocation.AFTER_TRAIN_ITERATION)\n\n # Skip the first two steps to avoid skewed timings that break the viewer rendering speed estimate.\n if step > 1:\n writer.put_time(\n name=EventName.TRAIN_RAYS_PER_SEC,\n duration=self.pipeline.datamanager.get_train_rays_per_batch() / train_t.duration,\n step=step,\n avg_over_steps=True,\n )\n\n self._update_viewer_state(step)\n\n # a batch of train rays\n if step_check(step, self.config.logging.steps_per_log, run_at_zero=True):\n writer.put_scalar(name=\"Train Loss\", scalar=loss, step=step)\n writer.put_dict(name=\"Train Loss Dict\", scalar_dict=loss_dict, step=step)\n writer.put_dict(name=\"Train Metrics Dict\", scalar_dict=metrics_dict, step=step)\n\n # Do not perform evaluation if there are no validation images\n if self.pipeline.datamanager.eval_dataset:\n self.eval_iteration(step)\n\n if step_check(step, self.config.steps_per_save):\n self.save_checkpoint(step)\n\n writer.write_out_storage()\n\n # save checkpoint at the end of training\n # self.save_checkpoint(step)\n\n # write out any remaining events (e.g., total train time)\n writer.write_out_storage()\n\n CONSOLE.rule()\n CONSOLE.print(\"[bold green]:tada: :tada: :tada: Training Finished :tada: :tada: :tada:\", justify=\"center\")\n if not self.config.viewer.quit_on_train_completion:\n CONSOLE.print(\"Use ctrl+c to quit\", justify=\"center\")\n self._always_render(step)\n\n @check_main_thread\n def _always_render(self, step: int) -> None:\n if self.viewer_state is not None:\n while True:\n self.viewer_state.vis[\"renderingState/isTraining\"].write(False)\n self._update_viewer_state(step)\n\n @check_main_thread\n def _check_viewer_warnings(self) -> None:\n \"\"\"Helper to print out any warnings regarding the way the viewer/loggers are enabled\"\"\"\n if (\n self.config.is_viewer_enabled()\n and not self.config.is_tensorboard_enabled()\n and not self.config.is_wandb_enabled()\n ):\n string: str = (\n \"[NOTE] Not running eval iterations since only viewer is enabled.\\n\"\n \"Use [yellow]--vis {wandb, tensorboard, viewer+wandb, viewer+tensorboard}[/yellow] to run with eval.\"\n )\n CONSOLE.print(f\"{string}\")\n\n @check_viewer_enabled\n def _init_viewer_state(self) -> None:\n \"\"\"Initializes viewer scene with given train dataset\"\"\"\n assert self.viewer_state and self.pipeline.datamanager.train_dataset\n self.viewer_state.init_scene(\n dataset=self.pipeline.datamanager.train_dataset,\n start_train=self.config.viewer.start_train,\n )\n if not self.config.viewer.start_train:\n self._always_render(self._start_step)\n\n @check_viewer_enabled\n def _update_viewer_state(self, step: int) -> None:\n \"\"\"Updates the viewer state by rendering out scene with current pipeline\n Returns the time taken to render scene.\n\n Args:\n step: current train step\n \"\"\"\n assert self.viewer_state is not None\n with TimeWriter(writer, EventName.ITER_VIS_TIME, step=step) as _:\n num_rays_per_batch: int = self.pipeline.datamanager.get_train_rays_per_batch()\n try:\n self.viewer_state.update_scene(self, step, self.pipeline.model, num_rays_per_batch)\n except RuntimeError:\n time.sleep(0.03) # sleep to allow buffer to reset\n assert self.viewer_state.vis is not None\n self.viewer_state.vis[\"renderingState/log_errors\"].write(\n \"Error: GPU out of memory. Reduce resolution to prevent viewer from crashing.\"\n )\n\n @check_viewer_enabled\n def _update_viewer_rays_per_sec(self, train_t: TimeWriter, vis_t: TimeWriter, step: int) -> None:\n \"\"\"Performs update on rays/sec calculation for training\n\n Args:\n train_t: timer object carrying time to execute total training iteration\n vis_t: timer object carrying time to execute visualization step\n step: current step\n \"\"\"\n train_num_rays_per_batch: int = self.pipeline.datamanager.get_train_rays_per_batch()\n writer.put_time(\n name=EventName.TRAIN_RAYS_PER_SEC,\n duration=train_num_rays_per_batch / (train_t.duration - vis_t.duration),\n step=step,\n avg_over_steps=True,\n )\n\n def _load_checkpoint(self) -> None:\n \"\"\"Helper function to load pipeline and optimizer from prespecified checkpoint\"\"\"\n load_dir: Path = self.config.load_dir\n if load_dir is not None:\n load_step = self.config.load_step\n if load_step is None:\n print(\"Loading latest checkpoint from load_dir\")\n # NOTE: this is specific to the checkpoint name format\n load_step = sorted(int(x[x.find(\"-\") + 1 : x.find(\".\")]) for x in os.listdir(load_dir))[-1]\n load_path: Path = load_dir / f\"step-{load_step:09d}.ckpt\"\n assert load_path.exists(), f\"Checkpoint {load_path} does not exist\"\n loaded_state = torch.load(load_path, map_location=\"cpu\")\n self._start_step = loaded_state[\"step\"] + 1\n # load the checkpoints for pipeline, optimizers, and gradient scalar\n self.pipeline.load_pipeline(loaded_state[\"pipeline\"], loaded_state[\"step\"])\n self.optimizers.load_optimizers(loaded_state[\"optimizers\"])\n self.grad_scaler.load_state_dict(loaded_state[\"scalers\"])\n CONSOLE.print(f\"done loading checkpoint from {load_path}\")\n else:\n CONSOLE.print(\"No checkpoints to load, training from scratch\")\n\n @check_main_thread\n def save_checkpoint(self, step: int) -> None:\n \"\"\"Save the model and optimizers\n\n Args:\n step: number of steps in training for given checkpoint\n \"\"\"\n # possibly make the checkpoint directory\n if not self.checkpoint_dir.exists():\n self.checkpoint_dir.mkdir(parents=True, exist_ok=True)\n # save the checkpoint\n ckpt_path: Path = self.checkpoint_dir / f\"step-{step:09d}.ckpt\"\n torch.save(\n {\n \"step\": step,\n \"pipeline\": self.pipeline.module.state_dict() # type: ignore\n if hasattr(self.pipeline, \"module\")\n else self.pipeline.state_dict(),\n \"optimizers\": {k: v.state_dict() for (k, v) in self.optimizers.optimizers.items()},\n \"scalers\": self.grad_scaler.state_dict(),\n },\n ckpt_path,\n )\n # possibly delete old checkpoints\n if self.config.save_only_latest_checkpoint:\n # delete everything else in the checkpoint folder\n for f in self.checkpoint_dir.glob(\"*\"):\n if f != ckpt_path:\n f.unlink()\n\n @profiler.time_function\n def train_iteration(self, step: int) -> TRAIN_INTERATION_OUTPUT:\n \"\"\"Run one iteration with a batch of inputs. Returns dictionary of model losses.\n\n Args:\n step: Current training step.\n \"\"\"\n self.optimizers.zero_grad_all()\n cpu_or_cuda_str: str = self.device.split(\":\")[0]\n with torch.autocast(device_type=cpu_or_cuda_str, enabled=self.mixed_precision):\n _, loss_dict, metrics_dict = self.pipeline.get_train_loss_dict(step=step)\n loss = functools.reduce(torch.add, loss_dict.values())\n self.grad_scaler.scale(loss).backward() # type: ignore\n self.optimizers.optimizer_scaler_step_all(self.grad_scaler)\n\n if self.config.log_gradients:\n total_grad = 0\n for tag, value in self.pipeline.model.named_parameters():\n assert tag != \"Total\"\n if value.grad is not None:\n grad = value.grad.norm()\n metrics_dict[f\"Gradients/{tag}\"] = grad\n total_grad += grad\n\n metrics_dict[\"Gradients/Total\"] = total_grad\n\n self.grad_scaler.update()\n self.optimizers.scheduler_step_all(step)\n\n # Merging loss and metrics dict into a single output.\n return loss, loss_dict, metrics_dict\n\n @check_eval_enabled\n @profiler.time_function\n def eval_iteration(self, step: int) -> None:\n \"\"\"Run one iteration with different batch/image/all image evaluations depending on step size.\n\n Args:\n step: Current training step.\n \"\"\"\n # a batch of eval rays\n if step_check(step, self.config.steps_per_eval_batch):\n _, eval_loss_dict, eval_metrics_dict = self.pipeline.get_eval_loss_dict(step=step)\n eval_loss = functools.reduce(torch.add, eval_loss_dict.values())\n writer.put_scalar(name=\"Eval Loss\", scalar=eval_loss, step=step)\n writer.put_dict(name=\"Eval Loss Dict\", scalar_dict=eval_loss_dict, step=step)\n writer.put_dict(name=\"Eval Metrics Dict\", scalar_dict=eval_metrics_dict, step=step)\n\n # one eval image\n if step_check(step, self.config.steps_per_eval_image):\n with TimeWriter(writer, EventName.TEST_RAYS_PER_SEC, write=False) as test_t:\n metrics_dict, images_dict = self.pipeline.get_eval_image_metrics_and_images(step=step)\n writer.put_time(\n name=EventName.TEST_RAYS_PER_SEC,\n duration=metrics_dict[\"num_rays\"] / test_t.duration,\n step=step,\n avg_over_steps=True,\n )\n writer.put_dict(name=\"Eval Images Metrics\", scalar_dict=metrics_dict, step=step)\n group = \"Eval Images\"\n for image_name, image in images_dict.items():\n writer.put_image(name=group + \"/\" + image_name, image=image, step=step)\n\n # all eval images\n if step_check(step, self.config.steps_per_eval_all_images):\n metrics_dict = self.pipeline.get_average_eval_image_metrics(step=step)\n writer.put_dict(name=\"Eval Images Metrics Dict (all images)\", scalar_dict=metrics_dict, step=step)" }, { "identifier": "TrainerConfig", "path": "nerfstudio/engine/trainer.py", "snippet": "class TrainerConfig(ExperimentConfig):\n \"\"\"Configuration for training regimen\"\"\"\n\n _target: Type = field(default_factory=lambda: Trainer)\n \"\"\"target class to instantiate\"\"\"\n steps_per_save: int = 1000\n \"\"\"Number of steps between saves.\"\"\"\n steps_per_eval_batch: int = 500\n \"\"\"Number of steps between randomly sampled batches of rays.\"\"\"\n steps_per_eval_image: int = 500\n \"\"\"Number of steps between single eval images.\"\"\"\n steps_per_eval_all_images: int = 25000\n \"\"\"Number of steps between eval all images.\"\"\"\n max_num_iterations: int = 1000000\n \"\"\"Maximum number of iterations to run.\"\"\"\n mixed_precision: bool = False\n \"\"\"Whether or not to use mixed precision for training.\"\"\"\n save_only_latest_checkpoint: bool = True\n \"\"\"Whether to only save the latest checkpoint or all checkpoints.\"\"\"\n # optional parameters if we want to resume training\n load_dir: Optional[Path] = None\n \"\"\"Optionally specify a pre-trained model directory to load from.\"\"\"\n load_step: Optional[int] = None\n \"\"\"Optionally specify model step to load from; if none, will find most recent model in load_dir.\"\"\"\n load_config: Optional[Path] = None\n \"\"\"Path to config YAML file.\"\"\"\n log_gradients: bool = False\n \"\"\"Optionally log gradients during training\"\"\"\n\n \"\"\" feng add \n load_pretrain_or_resume:\n resume:\n load all parameters including network, \n pretrain: \n \"\"\"\n load_pretrain_or_resume: str = \"resume\"\n wandb_name: str = \"none\"\n \"\"\" /feng add \"\"\"" } ]
import dataclasses import functools import os import time import numpy as np import torch import yappi import wandb from dataclasses import dataclass, field from pathlib import Path from typing import Dict, List, Optional, Tuple, Type, Union from rich.console import Console from torch.cuda.amp.grad_scaler import GradScaler from typing_extensions import Literal from nerfstudio.configs.experiment_config import ExperimentConfig from nerfstudio.engine.callbacks import ( TrainingCallback, TrainingCallbackAttributes, TrainingCallbackLocation, ) from nerfstudio.engine.optimizers import Optimizers from nerfstudio.pipelines.base_pipeline import VanillaPipeline from nerfstudio.utils import profiler, writer from nerfstudio.utils.decorators import ( check_eval_enabled, check_main_thread, check_viewer_enabled, ) from nerfstudio.utils.misc import step_check from nerfstudio.utils.writer import EventName, TimeWriter from nerfstudio.viewer.server import viewer_utils from MSTH.utils import Timer from MSTH.video_pipeline import ( VideoPipeline, VideoPipelineConfig, SpaceTimeDataManagerConfig, SpaceTimePipelineConfig, SpaceTimePipeline, ) from nerfstudio.engine.trainer import Trainer, TrainerConfig
12,835
from __future__ import annotations CONSOLE = Console(width=120) TRAIN_INTERATION_OUTPUT = Tuple[ # pylint: disable=invalid-name torch.Tensor, Dict[str, torch.Tensor], Dict[str, torch.Tensor] ] TORCH_DEVICE = Union[torch.device, str] # pylint: disable=invalid-name @dataclass class SpaceTimeHashingTrainerConfig(TrainerConfig): """Configuration for training regimen""" _target: Type = field(default_factory=lambda: SpaceTimeHashingTrainer) pipeline: SpaceTimePipelineConfig """target class to instantiate""" steps_per_save: int = 1000 """Number of steps between saves.""" steps_per_eval_batch: int = 500 """Number of steps between randomly sampled batches of rays.""" steps_per_eval_image: int = 2000 """Number of steps between single eval images.""" steps_per_eval_all_images: int = 25000 """Number of steps between eval all images.""" max_num_iterations: int = 1000000 """Maximum number of iterations to run.""" mixed_precision: bool = False """Whether or not to use mixed precision for training.""" save_only_latest_checkpoint: bool = True """Whether to only save the latest checkpoint or all checkpoints.""" # optional parameters if we want to resume training load_dir: Optional[Path] = None """Optionally specify a pre-trained model directory to load from.""" load_step: Optional[int] = None """Optionally specify model step to load from; if none, will find most recent model in load_dir.""" load_config: Optional[Path] = None """Path to config YAML file.""" log_gradients: bool = False """Optionally log gradients during training""" wandb_name: str = "none" steps_full_video: int = 10000000000 eval_total_frames: Optional[int] = None save_eval_video: bool = False render_camera_offset: Optional[List[float]] = None class SpaceTimeHashingTrainer(Trainer): config: SpaceTimeHashingTrainerConfig pipeline: SpaceTimePipeline optimizers: Optimizers
from __future__ import annotations CONSOLE = Console(width=120) TRAIN_INTERATION_OUTPUT = Tuple[ # pylint: disable=invalid-name torch.Tensor, Dict[str, torch.Tensor], Dict[str, torch.Tensor] ] TORCH_DEVICE = Union[torch.device, str] # pylint: disable=invalid-name @dataclass class SpaceTimeHashingTrainerConfig(TrainerConfig): """Configuration for training regimen""" _target: Type = field(default_factory=lambda: SpaceTimeHashingTrainer) pipeline: SpaceTimePipelineConfig """target class to instantiate""" steps_per_save: int = 1000 """Number of steps between saves.""" steps_per_eval_batch: int = 500 """Number of steps between randomly sampled batches of rays.""" steps_per_eval_image: int = 2000 """Number of steps between single eval images.""" steps_per_eval_all_images: int = 25000 """Number of steps between eval all images.""" max_num_iterations: int = 1000000 """Maximum number of iterations to run.""" mixed_precision: bool = False """Whether or not to use mixed precision for training.""" save_only_latest_checkpoint: bool = True """Whether to only save the latest checkpoint or all checkpoints.""" # optional parameters if we want to resume training load_dir: Optional[Path] = None """Optionally specify a pre-trained model directory to load from.""" load_step: Optional[int] = None """Optionally specify model step to load from; if none, will find most recent model in load_dir.""" load_config: Optional[Path] = None """Path to config YAML file.""" log_gradients: bool = False """Optionally log gradients during training""" wandb_name: str = "none" steps_full_video: int = 10000000000 eval_total_frames: Optional[int] = None save_eval_video: bool = False render_camera_offset: Optional[List[float]] = None class SpaceTimeHashingTrainer(Trainer): config: SpaceTimeHashingTrainerConfig pipeline: SpaceTimePipeline optimizers: Optimizers
callbacks: List[TrainingCallback]
1
2023-10-26 04:39:15+00:00
16k
Trustworthy-AI-Group/TransferAttack
transferattack/model_related/ghost.py
[ { "identifier": "Attack", "path": "transferattack/attack.py", "snippet": "class Attack(object):\n \"\"\"\n Base class for all attacks.\n \"\"\"\n def __init__(self, attack, model_name, epsilon, targeted, random_start, norm, loss, device=None):\n \"\"\"\n Initialize the hyperparameters\n\n Arguments:\n attack (str): the name of attack.\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \"\"\"\n if norm not in ['l2', 'linfty']:\n raise Exception(\"Unsupported norm {}\".format(norm))\n self.attack = attack\n self.model = self.load_model(model_name)\n self.epsilon = epsilon\n self.targeted = targeted\n self.random_start = random_start\n self.norm = norm\n if isinstance(self.model, EnsembleModel):\n self.device = self.model.device\n else:\n self.device = next(self.model.parameters()).device if device is None else device\n self.loss = self.loss_function(loss)\n\n def load_model(self, model_name):\n \"\"\"\n The model Loading stage, which should be overridden when surrogate model is customized (e.g., DSM, SETR, etc.)\n Prioritize the model in torchvision.models, then timm.models\n\n Arguments:\n model_name (str/list): the name of surrogate model in model_list in utils.py\n\n Returns:\n model (torch.nn.Module): the surrogate model wrapped by wrap_model in utils.py\n \"\"\"\n def load_single_model(model_name):\n if model_name in models.__dict__.keys():\n print('=> Loading model {} from torchvision.models'.format(model_name))\n model = models.__dict__[model_name](weights=\"DEFAULT\")\n elif model_name in timm.list_models():\n print('=> Loading model {} from timm.models'.format(model_name))\n model = timm.create_model(model_name, pretrained=True)\n else:\n raise ValueError('Model {} not supported'.format(model_name))\n return wrap_model(model.eval().cuda())\n\n if isinstance(model_name, list):\n return EnsembleModel([load_single_model(name) for name in model_name])\n else:\n return load_single_model(model_name)\n\n def forward(self, data, label, **kwargs):\n \"\"\"\n The general attack procedure\n\n Arguments:\n data (N, C, H, W): tensor for input images\n labels (N,): tensor for ground-truth labels if untargetd\n labels (2,N): tensor for [ground-truth, targeted labels] if targeted\n \"\"\"\n if self.targeted:\n assert len(label) == 2\n label = label[1] # the second element is the targeted label tensor\n data = data.clone().detach().to(self.device)\n label = label.clone().detach().to(self.device)\n\n # Initialize adversarial perturbation\n delta = self.init_delta(data)\n\n momentum = 0\n for _ in range(self.epoch):\n # Obtain the output\n logits = self.get_logits(self.transform(data+delta, momentum=momentum))\n\n # Calculate the loss\n loss = self.get_loss(logits, label)\n\n # Calculate the gradients\n grad = self.get_grad(loss, delta)\n\n # Calculate the momentum\n momentum = self.get_momentum(grad, momentum)\n\n # Update adversarial perturbation\n delta = self.update_delta(delta, data, momentum, self.alpha)\n\n return delta.detach()\n\n def get_logits(self, x, **kwargs):\n \"\"\"\n The inference stage, which should be overridden when the attack need to change the models (e.g., ensemble-model attack, ghost, etc.) or the input (e.g. DIM, SIM, etc.)\n \"\"\"\n return self.model(x)\n\n def get_loss(self, logits, label):\n \"\"\"\n The loss calculation, which should be overrideen when the attack change the loss calculation (e.g., ATA, etc.)\n \"\"\"\n # Calculate the loss\n return -self.loss(logits, label) if self.targeted else self.loss(logits, label)\n\n\n def get_grad(self, loss, delta, **kwargs):\n \"\"\"\n The gradient calculation, which should be overridden when the attack need to tune the gradient (e.g., TIM, variance tuning, enhanced momentum, etc.)\n \"\"\"\n return torch.autograd.grad(loss, delta, retain_graph=False, create_graph=False)[0]\n\n def get_momentum(self, grad, momentum, **kwargs):\n \"\"\"\n The momentum calculation\n \"\"\"\n return momentum * self.decay + grad / (grad.abs().mean(dim=(1,2,3), keepdim=True))\n\n def init_delta(self, data, **kwargs):\n delta = torch.zeros_like(data).to(self.device)\n if self.random_start:\n if self.norm == 'linfty':\n delta.uniform_(-self.epsilon, self.epsilon)\n else:\n delta.normal_(-self.epsilon, self.epsilon)\n d_flat = delta.view(delta.size(0), -1)\n n = d_flat.norm(p=2, dim=10).view(delta.size(0), 1, 1, 1)\n r = torch.zeros_like(data).uniform_(0,1).to(self.device)\n delta *= r/n*self.epsilon\n delta = clamp(delta, img_min-data, img_max-data)\n delta.requires_grad = True\n return delta\n\n def update_delta(self, delta, data, grad, alpha, **kwargs):\n if self.norm == 'linfty':\n delta = torch.clamp(delta + alpha * grad.sign(), -self.epsilon, self.epsilon)\n else:\n grad_norm = torch.norm(grad.view(grad.size(0), -1), dim=1).view(-1, 1, 1, 1)\n scaled_grad = grad / (grad_norm + 1e-20)\n delta = (delta + scaled_grad * alpha).view(delta.size(0), -1).renorm(p=2, dim=0, maxnorm=self.epsilon).view_as(delta)\n delta = clamp(delta, img_min-data, img_max-data)\n return delta\n\n def loss_function(self, loss):\n \"\"\"\n Get the loss function\n \"\"\"\n if loss == 'crossentropy':\n return nn.CrossEntropyLoss()\n else:\n raise Exception(\"Unsupported loss {}\".format(loss))\n\n def transform(self, data, **kwargs):\n return data\n\n def __call__(self, *input, **kwargs):\n self.model.eval()\n return self.forward(*input, **kwargs)" }, { "identifier": "ghost_resnet101", "path": "transferattack/model_related/ghost_networks/resnet.py", "snippet": "@register_model\n@handle_legacy_interface(weights=(\"pretrained\", ResNet101_Weights.IMAGENET1K_V1))\ndef ghost_resnet101(*, ghost_random_range=0.16, weights: Optional[ResNet101_Weights] = None, progress: bool = True, **kwargs: Any) -> GhostResNet:\n \"\"\"ResNet-101 from `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`__.\n\n .. note::\n The bottleneck of TorchVision places the stride for downsampling to the second 3x3\n convolution while the original paper places it to the first 1x1 convolution.\n This variant improves the accuracy and is known as `ResNet V1.5\n <https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.\n\n Args:\n weights (:class:`~torchvision.models.ResNet101_Weights`, optional): The\n pretrained weights to use. See\n :class:`~torchvision.models.ResNet101_Weights` below for\n more details, and possible values. By default, no pre-trained\n weights are used.\n progress (bool, optional): If True, displays a progress bar of the\n download to stderr. Default is True.\n **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``\n base class. Please refer to the `source code\n <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_\n for more details about this class.\n\n .. autoclass:: torchvision.models.ResNet101_Weights\n :members:\n \"\"\"\n weights = ResNet101_Weights.verify(weights)\n\n return _resnet(GhostBottleneck, ghost_random_range, [3, 4, 23, 3], weights, progress, **kwargs)" }, { "identifier": "ghost_resnet152", "path": "transferattack/model_related/ghost_networks/resnet.py", "snippet": "@register_model\n@handle_legacy_interface(weights=(\"pretrained\", ResNet152_Weights.IMAGENET1K_V1))\ndef ghost_resnet152(*, ghost_random_range=0.12, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> GhostResNet:\n \"\"\"ResNet-152 from `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`__.\n\n .. note::\n The bottleneck of TorchVision places the stride for downsampling to the second 3x3\n convolution while the original paper places it to the first 1x1 convolution.\n This variant improves the accuracy and is known as `ResNet V1.5\n <https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.\n\n Args:\n weights (:class:`~torchvision.models.ResNet152_Weights`, optional): The\n pretrained weights to use. See\n :class:`~torchvision.models.ResNet152_Weights` below for\n more details, and possible values. By default, no pre-trained\n weights are used.\n progress (bool, optional): If True, displays a progress bar of the\n download to stderr. Default is True.\n **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``\n base class. Please refer to the `source code\n <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_\n for more details about this class.\n\n .. autoclass:: torchvision.models.ResNet152_Weights\n :members:\n \"\"\"\n weights = ResNet152_Weights.verify(weights)\n\n return _resnet(GhostBottleneck, ghost_random_range, [3, 8, 36, 3], weights, progress, **kwargs)" }, { "identifier": "MIFGSM", "path": "transferattack/gradient/mifgsm.py", "snippet": "class MIFGSM(Attack):\n \"\"\"\n MI-FGSM Attack\n 'Boosting Adversarial Attacks with Momentum (CVPR 2018)'(https://arxiv.org/abs/1710.06081)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n\n Example script:\n python main.py --attack mifgsm --output_dir adv_data/mifgsm/resnet18\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='MI-FGSM', **kwargs):\n super().__init__(attack, model_name, epsilon, targeted, random_start, norm, loss, device)\n self.alpha = alpha\n self.epoch = epoch\n self.decay = decay" }, { "identifier": "NIFGSM", "path": "transferattack/gradient/nifgsm.py", "snippet": "class NIFGSM(MIFGSM):\n \"\"\"\n NI-FGSM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='NI-FGSM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n\n def transform(self, x, momentum, **kwargs):\n \"\"\"\n look ahead for NI-FGSM\n \"\"\"\n return x + self.alpha*self.decay*momentum" }, { "identifier": "VMIFGSM", "path": "transferattack/gradient/vmifgsm.py", "snippet": "class VMIFGSM(Attack):\n \"\"\"\n VMI-FGSM Attack\n 'Enhancing the transferability of adversarial attacks through variance tuning (CVPR 2021)'(https://arxiv.org/abs/2103.15571)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n beta (float): the relative value for the neighborhood.\n num_neighbor (int): the number of samples for estimating the gradient variance.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, beta=1.5, num_neighbor=20, epoch=10, decay=1.\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, beta=1.5, num_neighbor=20, epoch=10, decay=1., targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='VMI-FGSM', **kwargs):\n super().__init__(attack, model_name, epsilon, targeted, random_start, norm, loss, device)\n self.alpha = alpha\n self.radius = beta * epsilon\n self.epoch = epoch\n self.decay = decay\n self.num_neighbor = num_neighbor\n\n def get_variance(self, data, delta, label, cur_grad, momentum, **kwargs):\n \"\"\"\n Calculate the gradient variance \n \"\"\"\n grad = 0\n for _ in range(self.num_neighbor):\n # Obtain the output\n # This is inconsistent for transform!\n logits = self.get_logits(self.transform(data+delta+torch.zeros_like(delta).uniform_(-self.radius, self.radius).to(self.device), momentum=momentum))\n\n # Calculate the loss\n loss = self.get_loss(logits, label)\n\n # Calculate the gradients\n grad += self.get_grad(loss, delta)\n\n return grad / self.num_neighbor - cur_grad\n\n def forward(self, data, label, **kwargs):\n \"\"\"\n The attack procedure for VMI-FGSM\n\n Arguments:\n data: (N, C, H, W) tensor for input images\n labels: (N,) tensor for ground-truth labels if untargetd, otherwise targeted labels\n \"\"\"\n if self.targeted:\n assert len(label) == 2\n label = label[1] # the second element is the targeted label tensor\n data = data.clone().detach().to(self.device)\n label = label.clone().detach().to(self.device)\n\n # Initialize adversarial perturbation\n delta = self.init_delta(data)\n\n momentum, variance = 0, 0\n for _ in range(self.epoch):\n # Obtain the output\n logits = self.get_logits(self.transform(data+delta, momentum=momentum))\n\n # Calculate the loss\n loss = self.get_loss(logits, label)\n\n # Calculate the gradients\n grad = self.get_grad(loss, delta)\n\n # Calculate the momentum\n momentum = self.get_momentum(grad+variance, momentum)\n\n # Calculate the variance\n variance = self.get_variance(data, delta, label, grad, momentum)\n\n # Update adversarial perturbation\n delta = self.update_delta(delta, data, momentum, self.alpha)\n\n return delta.detach()" }, { "identifier": "DIM", "path": "transferattack/input_transformation/dim.py", "snippet": "class DIM(MIFGSM):\n \"\"\"\n DIM Attack\n 'Improving Transferability of Adversarial Examples with Input Diversity (CVPR 2019)'(https://arxiv.org/abs/1803.06978)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n resize_rate (float): the relative size of the resized image\n diversity_prob (float): the probability for transforming the input image\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1, resize_rate=1.1, diversity_prob=0.5\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., resize_rate=1.1, diversity_prob=0.5, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='DIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n if resize_rate < 1:\n raise Exception(\"Error! The resize rate should be larger than 1.\")\n self.resize_rate = resize_rate\n self.diversity_prob = diversity_prob\n \n def transform(self, x, **kwargs):\n \"\"\"\n Random transform the input images\n \"\"\"\n # do not transform the input image\n if torch.rand(1) > self.diversity_prob:\n return x\n \n img_size = x.shape[-1]\n img_resize = int(img_size * self.resize_rate)\n\n # resize the input image to random size\n rnd = torch.randint(low=min(img_size, img_resize), high=max(img_size, img_resize), size=(1,), dtype=torch.int32)\n rescaled = F.interpolate(x, size=[rnd, rnd], mode='bilinear', align_corners=False)\n\n # randomly add padding\n h_rem = img_resize - rnd\n w_rem = img_resize - rnd\n pad_top = torch.randint(low=0, high=h_rem.item(), size=(1,), dtype=torch.int32)\n pad_bottom = h_rem - pad_top\n pad_left = torch.randint(low=0, high=w_rem.item(), size=(1,), dtype=torch.int32)\n pad_right = w_rem - pad_left\n\n padded = F.pad(rescaled, [pad_left.item(), pad_right.item(), pad_top.item(), pad_bottom.item()], value=0)\n\n # resize the image back to img_size\n return F.interpolate(padded, size=[img_size, img_size], mode='bilinear', align_corners=False)" }, { "identifier": "TIM", "path": "transferattack/input_transformation/tim.py", "snippet": "class TIM(MIFGSM):\n \"\"\"\n TIM Attack\n 'Evading Defenses to Transferable Adversarial Examples by Translation-Invariant Attacks (CVPR 2019)'(https://arxiv.org/abs/1904.02884)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n kernel_type (str): the type of kernel (gaussian/uniform/linear).\n kernel_size (int): the size of kernel.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15\n\n Example script:\n python main.py --attack tim --output_dir adv_data/tim/resnet18\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='TIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.kernel = self.generate_kernel(kernel_type, kernel_size)\n\n def generate_kernel(self, kernel_type, kernel_size, nsig=3):\n \"\"\"\n Generate the gaussian/uniform/linear kernel\n\n Arguments:\n kernel_type (str): the method for initilizing the kernel\n kernel_size (int): the size of kernel\n \"\"\"\n if kernel_type.lower() == 'gaussian':\n x = np.linspace(-nsig, nsig, kernel_size)\n kern1d = st.norm.pdf(x)\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n elif kernel_type.lower() == 'uniform':\n kernel = np.ones((kernel_size, kernel_size)) / (kernel_size ** 2)\n elif kernel_type.lower() == 'linear':\n kern1d = 1 - np.abs(np.linspace((-kernel_size+1)//2, (kernel_size-1)//2, kernel_size)/(kernel_size**2))\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n else:\n raise Exception(\"Unspported kernel type {}\".format(kernel_type))\n \n stack_kernel = np.stack([kernel, kernel, kernel])\n stack_kernel = np.expand_dims(stack_kernel, 1)\n return torch.from_numpy(stack_kernel.astype(np.float32)).to(self.device)\n\n def get_grad(self, loss, delta, **kwargs):\n \"\"\"\n Overridden for TIM attack.\n \"\"\"\n grad = torch.autograd.grad(loss, delta, retain_graph=False, create_graph=False)[0]\n grad = F.conv2d(grad, self.kernel, stride=1, padding='same', groups=3)\n return grad" }, { "identifier": "SIM", "path": "transferattack/input_transformation/sim.py", "snippet": "class SIM(MIFGSM):\n \"\"\"\n SIM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='SIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n\n def transform(self, x, **kwargs):\n \"\"\"\n Scale the input for SIM\n \"\"\"\n return torch.cat([x / (2**i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale)) if self.targeted else self.loss(logits, label.repeat(self.num_scale))" }, { "identifier": "Admix", "path": "transferattack/input_transformation/admix.py", "snippet": "class Admix(MIFGSM):\n \"\"\"\n Admix Attack\n 'Admix: Enhancing the Transferability of Adversarial Attacks (ICCV 2021)'(https://arxiv.org/abs/2102.00436)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n num_admix (int): the number of admixed images in each iteration.\n admix_strength (float): the strength of admixed images.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='Admix', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n self.num_admix = num_admix\n self.admix_strength = admix_strength\n\n def transform(self, x, **kwargs):\n \"\"\"\n Admix the input for Admix Attack\n \"\"\"\n admix_images = torch.concat([(x + self.admix_strength * x[torch.randperm(x.size(0))].detach()) for _ in range(self.num_admix)], dim=0)\n return torch.concat([admix_images / (2 ** i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale*self.num_admix)) if self.targeted else self.loss(logits, label.repeat(self.num_scale*self.num_admix))" }, { "identifier": "MIFGSM", "path": "transferattack/gradient/mifgsm.py", "snippet": "class MIFGSM(Attack):\n \"\"\"\n MI-FGSM Attack\n 'Boosting Adversarial Attacks with Momentum (CVPR 2018)'(https://arxiv.org/abs/1710.06081)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n\n Example script:\n python main.py --attack mifgsm --output_dir adv_data/mifgsm/resnet18\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='MI-FGSM', **kwargs):\n super().__init__(attack, model_name, epsilon, targeted, random_start, norm, loss, device)\n self.alpha = alpha\n self.epoch = epoch\n self.decay = decay" }, { "identifier": "NIFGSM", "path": "transferattack/gradient/nifgsm.py", "snippet": "class NIFGSM(MIFGSM):\n \"\"\"\n NI-FGSM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='NI-FGSM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n\n def transform(self, x, momentum, **kwargs):\n \"\"\"\n look ahead for NI-FGSM\n \"\"\"\n return x + self.alpha*self.decay*momentum" }, { "identifier": "DIM", "path": "transferattack/input_transformation/dim.py", "snippet": "class DIM(MIFGSM):\n \"\"\"\n DIM Attack\n 'Improving Transferability of Adversarial Examples with Input Diversity (CVPR 2019)'(https://arxiv.org/abs/1803.06978)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n resize_rate (float): the relative size of the resized image\n diversity_prob (float): the probability for transforming the input image\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1, resize_rate=1.1, diversity_prob=0.5\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., resize_rate=1.1, diversity_prob=0.5, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='DIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n if resize_rate < 1:\n raise Exception(\"Error! The resize rate should be larger than 1.\")\n self.resize_rate = resize_rate\n self.diversity_prob = diversity_prob\n \n def transform(self, x, **kwargs):\n \"\"\"\n Random transform the input images\n \"\"\"\n # do not transform the input image\n if torch.rand(1) > self.diversity_prob:\n return x\n \n img_size = x.shape[-1]\n img_resize = int(img_size * self.resize_rate)\n\n # resize the input image to random size\n rnd = torch.randint(low=min(img_size, img_resize), high=max(img_size, img_resize), size=(1,), dtype=torch.int32)\n rescaled = F.interpolate(x, size=[rnd, rnd], mode='bilinear', align_corners=False)\n\n # randomly add padding\n h_rem = img_resize - rnd\n w_rem = img_resize - rnd\n pad_top = torch.randint(low=0, high=h_rem.item(), size=(1,), dtype=torch.int32)\n pad_bottom = h_rem - pad_top\n pad_left = torch.randint(low=0, high=w_rem.item(), size=(1,), dtype=torch.int32)\n pad_right = w_rem - pad_left\n\n padded = F.pad(rescaled, [pad_left.item(), pad_right.item(), pad_top.item(), pad_bottom.item()], value=0)\n\n # resize the image back to img_size\n return F.interpolate(padded, size=[img_size, img_size], mode='bilinear', align_corners=False)" }, { "identifier": "TIM", "path": "transferattack/input_transformation/tim.py", "snippet": "class TIM(MIFGSM):\n \"\"\"\n TIM Attack\n 'Evading Defenses to Transferable Adversarial Examples by Translation-Invariant Attacks (CVPR 2019)'(https://arxiv.org/abs/1904.02884)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n kernel_type (str): the type of kernel (gaussian/uniform/linear).\n kernel_size (int): the size of kernel.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15\n\n Example script:\n python main.py --attack tim --output_dir adv_data/tim/resnet18\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='TIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.kernel = self.generate_kernel(kernel_type, kernel_size)\n\n def generate_kernel(self, kernel_type, kernel_size, nsig=3):\n \"\"\"\n Generate the gaussian/uniform/linear kernel\n\n Arguments:\n kernel_type (str): the method for initilizing the kernel\n kernel_size (int): the size of kernel\n \"\"\"\n if kernel_type.lower() == 'gaussian':\n x = np.linspace(-nsig, nsig, kernel_size)\n kern1d = st.norm.pdf(x)\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n elif kernel_type.lower() == 'uniform':\n kernel = np.ones((kernel_size, kernel_size)) / (kernel_size ** 2)\n elif kernel_type.lower() == 'linear':\n kern1d = 1 - np.abs(np.linspace((-kernel_size+1)//2, (kernel_size-1)//2, kernel_size)/(kernel_size**2))\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n else:\n raise Exception(\"Unspported kernel type {}\".format(kernel_type))\n \n stack_kernel = np.stack([kernel, kernel, kernel])\n stack_kernel = np.expand_dims(stack_kernel, 1)\n return torch.from_numpy(stack_kernel.astype(np.float32)).to(self.device)\n\n def get_grad(self, loss, delta, **kwargs):\n \"\"\"\n Overridden for TIM attack.\n \"\"\"\n grad = torch.autograd.grad(loss, delta, retain_graph=False, create_graph=False)[0]\n grad = F.conv2d(grad, self.kernel, stride=1, padding='same', groups=3)\n return grad" }, { "identifier": "SIM", "path": "transferattack/input_transformation/sim.py", "snippet": "class SIM(MIFGSM):\n \"\"\"\n SIM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='SIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n\n def transform(self, x, **kwargs):\n \"\"\"\n Scale the input for SIM\n \"\"\"\n return torch.cat([x / (2**i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale)) if self.targeted else self.loss(logits, label.repeat(self.num_scale))" }, { "identifier": "Admix", "path": "transferattack/input_transformation/admix.py", "snippet": "class Admix(MIFGSM):\n \"\"\"\n Admix Attack\n 'Admix: Enhancing the Transferability of Adversarial Attacks (ICCV 2021)'(https://arxiv.org/abs/2102.00436)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n num_admix (int): the number of admixed images in each iteration.\n admix_strength (float): the strength of admixed images.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='Admix', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n self.num_admix = num_admix\n self.admix_strength = admix_strength\n\n def transform(self, x, **kwargs):\n \"\"\"\n Admix the input for Admix Attack\n \"\"\"\n admix_images = torch.concat([(x + self.admix_strength * x[torch.randperm(x.size(0))].detach()) for _ in range(self.num_admix)], dim=0)\n return torch.concat([admix_images / (2 ** i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale*self.num_admix)) if self.targeted else self.loss(logits, label.repeat(self.num_scale*self.num_admix))" } ]
from ..utils import * from ..attack import Attack from .ghost_networks.resnet import ghost_resnet101, ghost_resnet152 from ..gradient.mifgsm import MIFGSM from ..gradient.nifgsm import NIFGSM from ..gradient.vmifgsm import VMIFGSM from ..input_transformation.dim import DIM from ..input_transformation.tim import TIM from ..input_transformation.sim import SIM from ..input_transformation.admix import Admix from torch import Tensor from ..utils import * from ..gradient.mifgsm import MIFGSM from ..gradient.nifgsm import NIFGSM from ..input_transformation.dim import DIM from ..input_transformation.tim import TIM from ..input_transformation.sim import SIM from ..input_transformation.admix import Admix
11,898
""" Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_IFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) self.decay = 0. def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_NIFGSM(NIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_VMIFGSM(VMIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_DIM(DIM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model
# example bash: python main.py --attack=ghost_network support_models = { "resnet101": ghost_resnet101, "resnet152": ghost_resnet152, } class GhostNetwork_MIFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_IFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) self.decay = 0. def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_NIFGSM(NIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_VMIFGSM(VMIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_DIM(DIM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model
class GhostNetwork_SIM(SIM):
14
2023-10-31 03:43:26+00:00
16k
chenruduan/OAReactDiff
demo.py
[ { "identifier": "LEFTNet", "path": "oa_reactdiff/model/leftnet.py", "snippet": "class LEFTNet(torch.nn.Module):\n r\"\"\"\n LEFTNet\n\n Args:\n pos_require_grad (bool, optional): If set to :obj:`True`, will require to take derivative of model output with respect to the atomic positions. (default: :obj:`False`)\n cutoff (float, optional): Cutoff distance for interatomic interactions. (default: :obj:`5.0`)\n num_layers (int, optional): Number of building blocks. (default: :obj:`4`)\n hidden_channels (int, optional): Hidden embedding size. (default: :obj:`128`)\n num_radial (int, optional): Number of radial basis functions. (default: :obj:`96`)\n y_mean (float, optional): Mean value of the labels of training data. (default: :obj:`0`)\n y_std (float, optional): Standard deviation of the labels of training data. (default: :obj:`1`)\n\n \"\"\"\n\n def __init__(\n self,\n pos_require_grad=False,\n cutoff=10.0,\n num_layers=4,\n hidden_channels=128,\n num_radial=96,\n in_hidden_channels: int = 8,\n reflect_equiv: bool = True,\n legacy: bool = True,\n update: bool = True,\n pos_grad: bool = False,\n single_layer_output: bool = True,\n for_conf: bool = False,\n ff: bool = False,\n object_aware: bool = True,\n **kwargs,\n ):\n super(LEFTNet, self).__init__()\n self.num_layers = num_layers\n self.hidden_channels = hidden_channels\n self.cutoff = cutoff\n self.pos_require_grad = pos_require_grad\n self.reflect_equiv = reflect_equiv\n self.legacy = legacy\n self.update = update\n self.pos_grad = pos_grad\n self.for_conf = for_conf\n self.ff = ff\n self.object_aware = object_aware\n\n self.embedding = nn.Linear(in_hidden_channels, hidden_channels)\n self.embedding_out = nn.Linear(hidden_channels, in_hidden_channels)\n self.radial_emb = RBFEmb(num_radial, self.cutoff)\n self.neighbor_emb = NeighborEmb(hidden_channels, in_hidden_channels)\n self.s2v = CFConvS2V(hidden_channels)\n\n self.radial_lin = nn.Sequential(\n nn.Linear(num_radial, hidden_channels),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels, hidden_channels),\n )\n\n self.lin3 = nn.Sequential(\n nn.Linear(3, hidden_channels // 4),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels // 4, 1),\n )\n self.pos_expansion = MLP(\n in_dim=3,\n out_dims=[hidden_channels // 2, hidden_channels],\n activation=\"swish\",\n last_layer_no_activation=True,\n bias=False,\n )\n if self.legacy:\n self.distance_embedding = MLP(\n in_dim=num_radial,\n out_dims=[hidden_channels // 2, hidden_channels],\n activation=\"swish\",\n bias=False,\n )\n if self.pos_grad:\n self.dynamic_mlp_modules = nn.Sequential(\n nn.Linear(hidden_channels, hidden_channels // 2),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels // 2, 3),\n )\n\n self.gcl_layers = nn.ModuleList()\n self.message_layers = nn.ModuleList()\n self.update_layers = nn.ModuleList()\n\n for _ in range(num_layers):\n self.gcl_layers.append(\n GCLMessage(hidden_channels, num_radial, legacy=legacy)\n )\n self.message_layers.append(\n EquiMessage(hidden_channels, num_radial, reflect_equiv).jittable()\n )\n self.update_layers.append(EquiUpdate(hidden_channels, reflect_equiv))\n\n self.last_layer = nn.Linear(hidden_channels, 1)\n\n self.inv_sqrt_2 = 1 / math.sqrt(2.0)\n self.out_pos = EquiOutput(\n hidden_channels,\n out_channels=1,\n single_layer_output=single_layer_output,\n )\n\n # for node-wise frame\n self.vec = vector()\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.radial_emb.reset_parameters()\n\n def scalarization(self, pos, edge_index):\n i, j = edge_index\n dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt()\n coord_diff = pos[i] - pos[j]\n radial = torch.sum((coord_diff) ** 2, 1).unsqueeze(1)\n coord_cross = torch.cross(pos[i], pos[j])\n norm = torch.sqrt(radial) + EPS\n coord_diff = coord_diff / norm\n cross_norm = (torch.sqrt(torch.sum((coord_cross) ** 2, 1).unsqueeze(1))) + EPS\n coord_cross = coord_cross / cross_norm\n coord_vertical = torch.cross(coord_diff, coord_cross)\n\n return dist, coord_diff, coord_cross, coord_vertical\n\n @staticmethod\n def assemble_nodemask(edge_index: Tensor, pos: Tensor):\n node_mask = torch.zeros(pos.size(0), device=pos.device)\n node_mask[:] = -1\n _i, _j = edge_index\n _ind = 0\n for center in range(pos.size(0)):\n if node_mask[center] > -1:\n continue\n _connected = _j[torch.where(_i == center)]\n _connected = torch.concat(\n [_connected, torch.tensor([center], device=pos.device)]\n )\n node_mask[_connected] = _ind\n _ind += 1\n return node_mask\n\n def forward(\n self,\n h: Tensor,\n pos: Tensor,\n edge_index: Tensor,\n edge_attr: Optional[Tensor] = None,\n node_mask: Optional[Tensor] = None,\n edge_mask: Optional[Tensor] = None,\n update_coords_mask: Optional[Tensor] = None,\n subgraph_mask: Optional[Tensor] = None,\n ):\n # if self.pos_require_grad:\n # pos.requires_grad_()\n\n if not self.object_aware:\n subgraph_mask = None\n\n i, j = edge_index\n\n # embed z, assuming last column is atom number\n z_emb = self.embedding(h)\n\n i, j = edge_index\n dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt()\n inner_subgraph_mask = torch.zeros(edge_index.size(1), 1, device=dist.device)\n inner_subgraph_mask[torch.where(dist < self.cutoff)[0]] = 1\n\n all_edge_masks = inner_subgraph_mask\n if subgraph_mask is not None:\n all_edge_masks = all_edge_masks * subgraph_mask\n\n edge_index_w_cutoff = edge_index.T[torch.where(all_edge_masks > 0)[0]].T\n node_mask_w_cutoff = self.assemble_nodemask(\n edge_index=edge_index_w_cutoff, pos=pos\n )\n\n pos_frame = pos.clone()\n pos_frame = remove_mean_batch(pos_frame, node_mask_w_cutoff.long())\n\n # bulid edge-wise frame and scalarization vector features for edge update\n dist, coord_diff, coord_cross, coord_vertical = self.scalarization(\n pos_frame, edge_index\n )\n\n dist = dist * all_edge_masks.squeeze(-1)\n coord_diff = coord_diff * all_edge_masks\n coord_cross = coord_cross * all_edge_masks\n coord_vertical = coord_vertical * all_edge_masks\n\n frame = torch.cat(\n (\n coord_diff.unsqueeze(-1),\n coord_cross.unsqueeze(-1),\n coord_vertical.unsqueeze(-1),\n ),\n dim=-1,\n )\n radial_emb = self.radial_emb(dist)\n radial_emb = radial_emb * all_edge_masks\n\n f = self.radial_lin(radial_emb)\n rbounds = 0.5 * (torch.cos(dist * pi / self.cutoff) + 1.0)\n f = rbounds.unsqueeze(-1) * f\n\n # init node features\n s = self.neighbor_emb(h, z_emb, edge_index, f)\n\n NE1 = self.s2v(s, coord_diff.unsqueeze(-1), edge_index, f)\n scalrization1 = torch.sum(NE1[i].unsqueeze(2) * frame.unsqueeze(-1), dim=1)\n scalrization2 = torch.sum(NE1[j].unsqueeze(2) * frame.unsqueeze(-1), dim=1)\n if self.reflect_equiv:\n scalrization1[:, 1, :] = torch.abs(scalrization1[:, 1, :].clone())\n scalrization2[:, 1, :] = torch.abs(scalrization2[:, 1, :].clone())\n\n scalar3 = (\n self.lin3(torch.permute(scalrization1, (0, 2, 1)))\n + torch.permute(scalrization1, (0, 2, 1))[:, :, 0].unsqueeze(2)\n ).squeeze(-1)\n scalar4 = (\n self.lin3(torch.permute(scalrization2, (0, 2, 1)))\n + torch.permute(scalrization2, (0, 2, 1))[:, :, 0].unsqueeze(2)\n ).squeeze(-1)\n edgeweight = torch.cat((scalar3, scalar4), dim=-1) * rbounds.unsqueeze(-1)\n edgeweight = torch.cat((edgeweight, f), dim=-1)\n # add distance embedding\n edgeweight = torch.cat((edgeweight, radial_emb), dim=-1)\n\n # bulid node-wise frame for node-update\n a = pos_frame\n if self.legacy:\n b = self.vec(pos_frame, edge_index)\n else:\n # Added by Chenru: for new implementation of constructing node frame.\n eff_edge_ij = torch.where(all_edge_masks.squeeze(-1) == 1)[0]\n eff_edge_index = edge_index[:, eff_edge_ij]\n eff_dist = dist[eff_edge_ij]\n b = nn_vector(eff_dist, eff_edge_index, pos_frame)\n # assert_rot_equiv(nn_vector, dist_pad, edge_index, pos) # for debugging\n\n x1 = (a - b) / ((torch.sqrt(torch.sum((a - b) ** 2, 1).unsqueeze(1))) + EPS)\n y1 = torch.cross(a, b)\n normy = (torch.sqrt(torch.sum(y1**2, 1).unsqueeze(1))) + EPS\n y1 = y1 / normy\n # assert torch.trace(torch.matmul(x1, torch.transpose(y1, 0, 1))) < EPS # for debugging\n\n z1 = torch.cross(x1, y1)\n nodeframe = torch.cat(\n (x1.unsqueeze(-1), y1.unsqueeze(-1), z1.unsqueeze(-1)), dim=-1\n )\n\n pos_prjt = torch.sum(pos_frame.unsqueeze(-1) * nodeframe, dim=1)\n\n vec = torch.zeros(s.size(0), 3, s.size(1), device=s.device)\n gradient = torch.zeros(s.size(0), 3, device=s.device)\n for i in range(self.num_layers):\n # Added by Chenru: for letting multiple objects message passing.\n if self.legacy or i == 0:\n s = s + self.pos_expansion(pos_prjt)\n s, edgeweight = self.gcl_layers[i](\n s,\n edge_index,\n edgeweight,\n )\n\n dx, dvec = self.message_layers[i](\n s,\n vec,\n edge_index,\n radial_emb,\n edgeweight,\n coord_diff,\n coord_cross,\n )\n s = s + dx\n vec = vec + dvec\n s = s * self.inv_sqrt_2\n\n if self.update:\n dx, dvec = self.update_layers[i](s, vec, nodeframe)\n s = s + dx\n vec = vec + dvec\n\n if self.pos_grad:\n dynamic_coff = self.dynamic_mlp_modules(s) # (node, 3)\n basis_mix = (\n dynamic_coff[:, :1] * x1\n + dynamic_coff[:, 1:2] * y1\n + dynamic_coff[:, 2:3] * z1\n )\n gradient = gradient + basis_mix / self.num_layers\n\n if self.for_conf:\n return s\n\n _, dpos = self.out_pos(s, vec)\n\n if update_coords_mask is not None:\n dpos = update_coords_mask * dpos\n pos = pos + dpos + gradient\n\n if self.ff:\n return s, dpos\n\n h = self.embedding_out(s)\n if node_mask is not None:\n h = h * node_mask\n edge_attr = None\n return h, pos, edge_attr" }, { "identifier": "generate_full_eij", "path": "oa_reactdiff/tests/model/utils.py", "snippet": "def generate_full_eij(n_atom: int):\n r\"\"\"Get fully-connected graphs for n_atoms.\"\"\"\n edge_index = []\n for ii in range(n_atom):\n for jj in range(n_atom):\n if ii != jj:\n edge_index.append([ii, jj])\n return torch.transpose(torch.Tensor(edge_index), 1, 0).long()" }, { "identifier": "get_cut_graph_mask", "path": "oa_reactdiff/tests/model/utils.py", "snippet": "def get_cut_graph_mask(edge_index, n_cut):\n r\"\"\"Get mask for a graph cut at n_cut, with ij representing cross-subgraph edgs being 0.\"\"\"\n ind_sum = torch.where(edge_index < n_cut, 1, 0).sum(dim=0)\n subgraph_mask = torch.zeros(edge_index.size(1)).long()\n subgraph_mask[ind_sum == 2] = 1\n subgraph_mask[ind_sum == 0] = 1\n subgraph_mask = subgraph_mask[:, None]\n return subgraph_mask" }, { "identifier": "DDPMModule", "path": "oa_reactdiff/trainer/pl_trainer.py", "snippet": "class DDPMModule(LightningModule):\n def __init__(\n self,\n model_config: Dict,\n optimizer_config: Dict,\n training_config: Dict,\n node_nfs: List[int] = [9] * 3,\n edge_nf: int = 4,\n condition_nf: int = 3,\n fragment_names: List[str] = [\"inorg_node\", \"org_edge\", \"org_node\"],\n pos_dim: int = 3,\n update_pocket_coords: bool = True,\n condition_time: bool = True,\n edge_cutoff: Optional[float] = None,\n norm_values: Tuple = (1.0, 1.0, 1.0),\n norm_biases: Tuple = (0.0, 0.0, 0.0),\n noise_schedule: str = \"polynomial_2\",\n timesteps: int = 1000,\n precision: float = 1e-5,\n loss_type: str = \"l2\",\n pos_only: bool = False,\n process_type: Optional[str] = None,\n model: nn.Module = None,\n enforce_same_encoding: Optional[List] = None,\n scales: List[float] = [1.0, 1.0, 1.0],\n eval_epochs: int = 20,\n source: Optional[Dict] = None,\n fixed_idx: Optional[List] = None,\n ) -> None:\n super().__init__()\n egnn_dynamics = EGNNDynamics(\n model_config=model_config,\n node_nfs=node_nfs,\n edge_nf=edge_nf,\n condition_nf=condition_nf,\n fragment_names=fragment_names,\n pos_dim=pos_dim,\n update_pocket_coords=update_pocket_coords,\n condition_time=condition_time,\n edge_cutoff=edge_cutoff,\n model=model,\n enforce_same_encoding=enforce_same_encoding,\n source=source,\n )\n\n normalizer = Normalizer(\n norm_values=norm_values,\n norm_biases=norm_biases,\n pos_dim=pos_dim,\n )\n\n gamma_module = PredefinedNoiseSchedule(\n noise_schedule=noise_schedule,\n timesteps=timesteps,\n precision=precision,\n )\n schedule = DiffSchedule(gamma_module=gamma_module, norm_values=norm_values)\n\n self.ddpm = EnVariationalDiffusion(\n dynamics=egnn_dynamics,\n schdule=schedule,\n normalizer=normalizer,\n size_histogram=None,\n loss_type=loss_type,\n pos_only=pos_only,\n fixed_idx=fixed_idx,\n )\n self.model_config = model_config\n self.optimizer_config = optimizer_config\n self.training_config = training_config\n self.loss_type = loss_type\n self.n_fragments = len(fragment_names)\n self.remove_h = training_config[\"remove_h\"]\n self.pos_only = pos_only\n self.process_type = process_type or \"QM9\"\n self.scales = scales\n\n sampling_gamma_module = PredefinedNoiseSchedule(\n noise_schedule=\"polynomial_2\",\n timesteps=150,\n precision=precision,\n )\n self.sampling_schedule = DiffSchedule(\n gamma_module=sampling_gamma_module,\n norm_values=norm_values,\n )\n self.eval_epochs = eval_epochs\n\n self.clip_grad = training_config[\"clip_grad\"]\n if self.clip_grad:\n self.gradnorm_queue = utils.Queue()\n self.gradnorm_queue.add(3000)\n self.save_hyperparameters()\n\n def configure_optimizers(self):\n optimizer = torch.optim.AdamW(self.ddpm.parameters(), **self.optimizer_config)\n if not self.training_config[\"lr_schedule_type\"] is None:\n scheduler_func = LR_SCHEDULER[self.training_config[\"lr_schedule_type\"]]\n scheduler = scheduler_func(\n optimizer=optimizer, **self.training_config[\"lr_schedule_config\"]\n )\n return [optimizer], [scheduler]\n else:\n return optimizer\n\n def setup(self, stage: Optional[str] = None):\n func = PROCESS_FUNC[self.process_type]\n ft = FILE_TYPE[self.process_type]\n if stage == \"fit\":\n self.train_dataset = func(\n Path(self.training_config[\"datadir\"], f\"train_addprop{ft}\"),\n **self.training_config,\n )\n self.training_config[\"reflection\"] = False # Turn off reflection in val.\n self.val_dataset = func(\n Path(self.training_config[\"datadir\"], f\"valid_addprop{ft}\"),\n **self.training_config,\n )\n elif stage == \"test\":\n self.test_dataset = func(\n Path(self.training_config[\"datadir\"], f\"test{ft}\"),\n **self.training_config,\n )\n else:\n raise NotImplementedError\n\n def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n self.training_config[\"bz\"],\n shuffle=True,\n num_workers=self.training_config[\"num_workers\"],\n collate_fn=self.train_dataset.collate_fn,\n )\n\n def val_dataloader(self) -> DataLoader:\n return DataLoader(\n self.val_dataset,\n self.training_config[\"bz\"],\n shuffle=False,\n num_workers=self.training_config[\"num_workers\"],\n collate_fn=self.val_dataset.collate_fn,\n )\n\n def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n self.training_config[\"bz\"],\n shuffle=False,\n num_workers=self.training_config[\"num_workers\"],\n collate_fn=self.test_dataset.collate_fn,\n )\n\n def compute_loss(self, batch):\n representations, conditions = batch\n loss_terms = self.ddpm.forward(\n representations,\n conditions,\n )\n info = {}\n if not self.pos_only:\n denoms = [\n (self.ddpm.pos_dim + self.ddpm.node_nfs[ii])\n * representations[ii][\"size\"]\n for ii in range(self.n_fragments)\n ]\n else:\n denoms = [\n self.ddpm.pos_dim * representations[ii][\"size\"]\n for ii in range(self.n_fragments)\n ]\n error_t_normalized = [\n loss_terms[\"error_t\"][ii] / denoms[ii] * self.scales[ii]\n for ii in range(self.n_fragments)\n ]\n if self.loss_type == \"l2\" and self.training:\n # normalize loss_t\n loss_t = torch.stack(error_t_normalized, dim=0).sum(dim=0)\n\n # normalize loss_0\n loss_0_x = [\n loss_terms[\"loss_0_x\"][ii]\n * self.scales[ii]\n / (self.ddpm.pos_dim * representations[ii][\"size\"])\n for ii in range(self.n_fragments)\n ]\n loss_0_x = torch.stack(loss_0_x, dim=0).sum(dim=0)\n loss_0_cat = torch.stack(loss_terms[\"loss_0_cat\"], dim=0).sum(dim=0)\n loss_0_charge = torch.stack(loss_terms[\"loss_0_charge\"], dim=0).sum(dim=0)\n loss_0 = loss_0_x + loss_0_cat + loss_0_charge\n\n # VLB objective or evaluation step\n else:\n # Note: SNR_weight should be negative\n error_t = [\n -self.ddpm.T * 0.5 * loss_terms[\"SNR_weight\"] * _error_t\n for _error_t in loss_terms[\"error_t\"]\n ]\n loss_t = torch.stack(error_t, dim=0).sum(dim=0)\n\n loss_0_x = torch.stack(loss_terms[\"loss_0_x\"], dim=0).sum(dim=0)\n loss_0_cat = torch.stack(loss_terms[\"loss_0_cat\"], dim=0).sum(dim=0)\n loss_0_charge = torch.stack(loss_terms[\"loss_0_charge\"], dim=0).sum(dim=0)\n loss_0 = (\n loss_0_x + loss_0_cat + loss_0_charge + loss_terms[\"neg_log_constants\"]\n )\n\n nll = loss_t + loss_0 + loss_terms[\"kl_prior\"]\n # nll = loss_t\n\n for ii in range(self.n_fragments):\n info[f\"error_t_{ii}\"] = error_t_normalized[ii].mean().item() / (\n self.scales[ii] + 1e-4\n )\n info[f\"unorm_error_t_{ii}\"] = loss_terms[\"error_t\"][ii].mean().item()\n\n # Correct for normalization on x.\n if not (self.loss_type == \"l2\" and self.training):\n nll = nll - loss_terms[\"delta_log_px\"]\n\n # Transform conditional nll into joint nll\n # Note:\n # loss = -log p(x,h|N) and log p(x,h,N) = log p(x,h|N) + log p(N)\n # Therefore, log p(x,h|N) = -loss + log p(N)\n # => loss_new = -log p(x,h,N) = loss - log p(N)\n nll = nll - loss_terms[\"log_pN\"]\n\n return nll, info\n\n def eval_inplaint_batch(\n self,\n batch: List,\n resamplings: int = 5,\n jump_length: int = 5,\n frag_fixed: List = [0, 2],\n ):\n sampling_ddpm = copy.deepcopy(self.ddpm)\n sampling_ddpm.schedule = self.sampling_schedule\n sampling_ddpm.T = self.sampling_schedule.gamma_module.timesteps\n sampling_ddpm.eval()\n\n representations, conditions = batch\n xh_fixed = [\n torch.cat(\n [repre[feature_type] for feature_type in FEATURE_MAPPING],\n dim=1,\n )\n for repre in representations\n ]\n n_samples = representations[0][\"size\"].size(0)\n fragments_nodes = [repre[\"size\"] for repre in representations]\n with torch.no_grad():\n out_samples, _ = sampling_ddpm.inpaint(\n n_samples=n_samples,\n fragments_nodes=fragments_nodes,\n conditions=conditions,\n return_frames=1,\n resamplings=resamplings,\n jump_length=jump_length,\n timesteps=None,\n xh_fixed=xh_fixed,\n frag_fixed=frag_fixed,\n )\n rmsds = batch_rmsd(\n fragments_nodes,\n out_samples[0],\n xh_fixed,\n idx=1,\n threshold=0.5,\n )\n return np.mean(rmsds), np.median(rmsds)\n\n def training_step(self, batch, batch_idx):\n nll, info = self.compute_loss(batch)\n loss = nll.mean(0)\n\n self.log(\"train-totloss\", loss, rank_zero_only=True)\n for k, v in info.items():\n self.log(f\"train-{k}\", v, rank_zero_only=True)\n\n if (self.current_epoch + 1) % self.eval_epochs == 0 and batch_idx == 0:\n if self.trainer.is_global_zero:\n print(\n \"evaluation on samping for training batch...\",\n batch[1].shape,\n batch_idx,\n )\n rmsd_mean, rmsd_median = self.eval_inplaint_batch(batch)\n info[\"rmsd\"], info[\"rmsd-median\"] = rmsd_mean, rmsd_median\n else:\n info[\"rmsd\"], info[\"rmsd-median\"] = np.nan, np.nan\n info[\"loss\"] = loss\n return info\n\n def _shared_eval(self, batch, batch_idx, prefix, *args):\n nll, info = self.compute_loss(batch)\n loss = nll.mean(0)\n info[\"totloss\"] = loss.item()\n\n if (self.current_epoch + 1) % self.eval_epochs == 0 and batch_idx == 0:\n if self.trainer.is_global_zero:\n print(\n \"evaluation on samping for validation batch...\",\n batch[1].shape,\n batch_idx,\n )\n info[\"rmsd\"], info[\"rmsd-median\"] = self.eval_inplaint_batch(batch)\n else:\n info[\"rmsd\"], info[\"rmsd-median\"] = np.nan, np.nan\n\n info_prefix = {}\n for k, v in info.items():\n info_prefix[f\"{prefix}-{k}\"] = v\n return info_prefix\n\n def validation_step(self, batch, batch_idx, *args):\n return self._shared_eval(batch, batch_idx, \"val\", *args)\n\n def test_step(self, batch, batch_idx, *args):\n return self._shared_eval(batch, batch_idx, \"test\", *args)\n\n def validation_epoch_end(self, val_step_outputs):\n val_epoch_metrics = average_over_batch_metrics(val_step_outputs)\n if self.trainer.is_global_zero:\n pretty_print(self.current_epoch, val_epoch_metrics, prefix=\"val\")\n val_epoch_metrics.update({\"epoch\": self.current_epoch})\n for k, v in val_epoch_metrics.items():\n self.log(k, v, sync_dist=True)\n\n def training_epoch_end(self, outputs) -> None:\n epoch_metrics = average_over_batch_metrics(\n outputs, allowed=[\"rmsd\", \"rmsd-median\"]\n )\n self.log(\"train-rmsd\", epoch_metrics[\"rmsd\"], sync_dist=True)\n self.log(\"train-rmsd-median\", epoch_metrics[\"rmsd-median\"], sync_dist=True)\n\n def configure_gradient_clipping(\n self, optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm\n ):\n if not self.clip_grad:\n return\n\n # Allow gradient norm to be 150% + 1.5 * stdev of the recent history.\n max_grad_norm = 1.5 * self.gradnorm_queue.mean() + 3 * self.gradnorm_queue.std()\n\n # Get current grad_norm\n params = [p for g in optimizer.param_groups for p in g[\"params\"]]\n grad_norm = utils.get_grad_norm(params)\n\n # Lightning will handle the gradient clipping\n self.clip_gradients(\n optimizer, gradient_clip_val=max_grad_norm, gradient_clip_algorithm=\"norm\"\n )\n\n if float(grad_norm) > max_grad_norm:\n self.gradnorm_queue.add(float(max_grad_norm))\n else:\n self.gradnorm_queue.add(float(grad_norm))\n\n if float(grad_norm) > max_grad_norm:\n print(\n f\"Clipped gradient with value {grad_norm:.1f} \"\n f\"while allowed {max_grad_norm:.1f}\"\n )" }, { "identifier": "ProcessedTS1x", "path": "oa_reactdiff/dataset/transition1x.py", "snippet": "class ProcessedTS1x(BaseDataset):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=0,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n single_frag_only=True,\n swapping_react_prod=False,\n append_frag=False,\n reflection=False,\n use_by_ind=False,\n only_ts=False,\n confidence_model=False,\n position_key=\"positions\",\n ediff=None,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n if confidence_model:\n use_by_ind = False\n if remove_h:\n print(\"remove_h is ignored because it is not reasonble for TS.\")\n if single_frag_only:\n single_frag_inds = np.where(\n np.array(self.raw_dataset[\"single_fragment\"]) == 1\n )[0]\n else:\n single_frag_inds = np.array(range(len(self.raw_dataset[\"single_fragment\"])))\n if use_by_ind:\n use_inds = self.raw_dataset[\"use_ind\"]\n else:\n use_inds = range(len(self.raw_dataset[\"single_fragment\"]))\n single_frag_inds = list(set(single_frag_inds).intersection(set(use_inds)))\n\n data_duplicated = copy.deepcopy(self.raw_dataset)\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in data_duplicated[k].items():\n self.raw_dataset[k][v] = [val[ii] for ii in single_frag_inds]\n if swapping_react_prod:\n mapped_val = data_duplicated[mapped_k][v]\n self.raw_dataset[k][v] += [\n mapped_val[ii] for ii in single_frag_inds\n ]\n if reflection:\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in self.raw_dataset[k].items():\n if v in [\"wB97x_6-31G(d).forces\", position_key]:\n self.raw_dataset[k][v] += [reflect_z(_val) for _val in val]\n else:\n self.raw_dataset[k][v] += val\n\n self.reactant = self.raw_dataset[\"reactant\"]\n self.transition_state = self.raw_dataset[\"transition_state\"]\n self.product = self.raw_dataset[\"product\"]\n\n self.n_fragments = pad_fragments + 3\n self.device = torch.device(device)\n n_samples = len(self.reactant[\"charges\"])\n self.n_samples = len(self.reactant[\"charges\"])\n\n self.data = {}\n repeat = 2 if swapping_react_prod else 1\n if confidence_model:\n self.data[\"target\"] = torch.tensor(\n self.raw_dataset[\"target\"] * repeat\n ).unsqueeze(1)\n self.data[\"rmsd\"] = torch.tensor(\n self.raw_dataset[\"rmsd\"] * repeat\n ).unsqueeze(1)\n if ediff is not None:\n self.data[\"ediff\"] = torch.tensor(\n self.raw_dataset[ediff][\"ediff\"] * repeat\n ).unsqueeze(1)\n if not only_ts:\n if not append_frag:\n self.process_molecules(\n \"reactant\", n_samples, idx=0, position_key=position_key\n )\n self.process_molecules(\"transition_state\", n_samples, idx=1)\n self.process_molecules(\n \"product\", n_samples, idx=2, position_key=position_key\n )\n else:\n self.process_molecules(\n \"reactant\",\n n_samples,\n idx=0,\n append_charge=0,\n position_key=position_key,\n )\n self.process_molecules(\n \"transition_state\", n_samples, idx=1, append_charge=1\n )\n self.process_molecules(\n \"product\",\n n_samples,\n idx=2,\n append_charge=0,\n position_key=position_key,\n )\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 3)\n else:\n if not append_frag:\n self.process_molecules(\"transition_state\", n_samples, idx=0)\n else:\n self.process_molecules(\n \"transition_state\", n_samples, idx=0, append_charge=1\n )\n # for idx in range(2):\n # self.patch_dummy_molecules(idx + 1)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]" }, { "identifier": "DiffSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class DiffSchedule(nn.Module):\n def __init__(self, gamma_module: nn.Module, norm_values: Tuple[float]) -> None:\n super().__init__()\n self.gamma_module = gamma_module\n self.norm_values = norm_values\n self.check_issues_norm_values()\n\n @staticmethod\n def inflate_batch_array(array, target):\n r\"\"\"\n Inflates the batch array (array) with only a single axis\n (i.e. shape = (batch_size,), or possibly more empty axes\n (i.e. shape (batch_size, 1, ..., 1)) to match the target shape.\n \"\"\"\n target_shape = (array.size(0),) + (1,) * (len(target.size()) - 1)\n return array.view(target_shape)\n\n def sigma(self, gamma, target_tensor):\n r\"\"\"Computes sigma given gamma.\"\"\"\n return self.inflate_batch_array(torch.sqrt(torch.sigmoid(gamma)), target_tensor)\n\n def alpha(self, gamma, target_tensor):\n r\"\"\"Computes alpha given gamma.\"\"\"\n return self.inflate_batch_array(\n torch.sqrt(torch.sigmoid(-gamma)), target_tensor\n )\n\n @staticmethod\n def SNR(gamma):\n r\"\"\"Computes signal to noise ratio (alpha^2/sigma^2) given gamma.\"\"\"\n return torch.exp(-gamma)\n\n def sigma_and_alpha_t_given_s(\n self, gamma_t: Tensor, gamma_s: Tensor, target_tensor: Tensor\n ) -> tuple[Tensor, Tensor, Tensor]:\n r\"\"\"\n Computes sigma t given s, using gamma_t and gamma_s. Used during sampling.\n These are defined as:\n alpha t given s = alpha t / alpha s,\n sigma t given s = sqrt(1 - (alpha t given s) ^2 ).\n \"\"\"\n sigma2_t_given_s = self.inflate_batch_array(\n -torch.expm1(F.softplus(gamma_s) - F.softplus(gamma_t)), target_tensor\n )\n\n # alpha_t_given_s = alpha_t / alpha_s\n log_alpha2_t = F.logsigmoid(-gamma_t)\n log_alpha2_s = F.logsigmoid(-gamma_s)\n log_alpha2_t_given_s = log_alpha2_t - log_alpha2_s\n\n alpha_t_given_s = torch.exp(0.5 * log_alpha2_t_given_s)\n alpha_t_given_s = self.inflate_batch_array(alpha_t_given_s, target_tensor)\n\n sigma_t_given_s = torch.sqrt(sigma2_t_given_s)\n\n return sigma2_t_given_s, sigma_t_given_s, alpha_t_given_s\n\n def check_issues_norm_values(self, num_stdevs=8):\n zeros = torch.zeros((1, 1))\n gamma_0 = self.gamma_module(zeros)\n sigma_0 = self.sigma(gamma_0, target_tensor=zeros).item()\n\n # Checked if 1 / norm_value is still larger than 10 * standard\n # deviation.\n norm_value = self.norm_values[1]\n\n if sigma_0 * num_stdevs > 1.0 / norm_value:\n raise ValueError(\n f\"Value for normalization value {norm_value} probably too \"\n f\"large with sigma_0 {sigma_0:.5f} and \"\n f\"1 / norm_value = {1. / norm_value}\"\n )" }, { "identifier": "PredefinedNoiseSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class PredefinedNoiseSchedule(nn.Module):\n r\"\"\"\n Predefined noise schedule. Essentially creates a lookup array for predefined\n (non-learned) noise schedules.\n \"\"\"\n\n def __init__(\n self,\n noise_schedule: str,\n timesteps: int,\n precision: float,\n ):\n super().__init__()\n self.timesteps = timesteps\n\n if \"cosine\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) <= 2\n power = 1 if len(splits) == 1 else float(splits[1])\n alphas2 = cosine_beta_schedule(timesteps, raise_to_power=power)\n elif \"polynomial\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 2\n power = float(splits[1])\n alphas2 = polynomial_schedule(timesteps, s=precision, power=power)\n elif \"csin\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 4\n start, end, tau = float(splits[1]), float(splits[2]), float(splits[3])\n alphas2 = ccosine_schedule(timesteps, start=start, end=end, tau=tau)\n elif \"linear\" in noise_schedule:\n alphas2 = linear_schedule(timesteps)\n else:\n raise ValueError(noise_schedule)\n\n # print(\"alphas2\", alphas2)\n\n sigmas2 = 1 - alphas2\n\n log_alphas2 = np.log(alphas2)\n log_sigmas2 = np.log(sigmas2)\n\n log_alphas2_to_sigmas2 = log_alphas2 - log_sigmas2\n\n # print(\"gamma\", -log_alphas2_to_sigmas2)\n\n self.gamma = torch.nn.Parameter(\n torch.from_numpy(-log_alphas2_to_sigmas2).float(), requires_grad=False\n )\n\n def forward(self, t):\n t_int = torch.round(t * self.timesteps).long()\n return self.gamma[t_int]" }, { "identifier": "FEATURE_MAPPING", "path": "oa_reactdiff/diffusion/_normalizer.py", "snippet": "FEATURE_MAPPING = [\"pos\", \"one_hot\", \"charge\"]" }, { "identifier": "batch_rmsd", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def batch_rmsd(\n fragments_nodes: List[Tensor],\n out_samples: List[Tensor],\n xh: List[Tensor],\n idx: int = 1,\n threshold=0.5,\n):\n rmsds = []\n out_samples_use = out_samples[idx]\n xh_use = xh[idx]\n nodes = fragments_nodes[idx].long().cpu().numpy()\n start_ind, end_ind = 0, 0\n for jj, natoms in enumerate(nodes):\n end_ind += natoms\n mol1 = xh2pmg(out_samples_use[start_ind:end_ind])\n mol2 = xh2pmg(xh_use[start_ind:end_ind])\n try:\n rmsd = pymatgen_rmsd(mol1, mol2, ignore_chirality=True, threshold=threshold)\n except:\n rmsd = 1.0\n rmsds.append(min(rmsd, 1.0))\n start_ind = end_ind\n return rmsds" }, { "identifier": "assemble_sample_inputs", "path": "oa_reactdiff/utils/sampling_tools.py", "snippet": "def assemble_sample_inputs(\n atoms: List,\n device: torch.device = torch.device(\"cuda\"),\n n_samples: int = 1,\n frag_type: bool = False,\n):\n empty_site = torch.tensor([[1, 0, 0, 0, 0, 1]], device=device)\n if not frag_type:\n decoders = [\n {\n \"H\": [1, 0, 0, 0, 0, 1],\n \"C\": [0, 1, 0, 0, 0, 6],\n \"N\": [0, 0, 1, 0, 0, 7],\n \"O\": [0, 0, 0, 1, 0, 8],\n \"F\": [0, 0, 0, 0, 1, 9],\n }\n ] * 2\n else:\n decoders = [\n {\n \"H\": [1, 0, 0, 0, 0, 1, 0],\n \"C\": [0, 1, 0, 0, 0, 6, 0],\n \"N\": [0, 0, 1, 0, 0, 7, 0],\n \"O\": [0, 0, 0, 1, 0, 8, 0],\n \"F\": [0, 0, 0, 0, 1, 9, 0],\n },\n {\n \"H\": [1, 0, 0, 0, 0, 1, 1],\n \"C\": [0, 1, 0, 0, 0, 6, 1],\n \"N\": [0, 0, 1, 0, 0, 7, 1],\n \"O\": [0, 0, 0, 1, 0, 8, 1],\n \"F\": [0, 0, 0, 0, 1, 9, 1],\n },\n ]\n\n h0 = [\n torch.cat(\n [\n torch.tensor([decoders[ii % 2][atom] for atom in atoms], device=device)\n for _ in range(n_samples)\n ]\n )\n for ii in range(3)\n ]\n return h0" }, { "identifier": "write_tmp_xyz", "path": "oa_reactdiff/utils/sampling_tools.py", "snippet": "def write_tmp_xyz(\n fragments_nodes, out_samples, idx=[0], prefix=\"gen\", localpath=\"tmp\", ex_ind=0\n):\n TYPEMAP = {\n 0: \"react\",\n 1: \"ts\",\n 2: \"prod\",\n }\n for ii in idx:\n st = TYPEMAP[ii]\n start_ind, end_ind = 0, 0\n for jj, natoms in enumerate(fragments_nodes[0]):\n _jj = jj + ex_ind\n xyzfile = f\"{localpath}/{prefix}_{_jj}_{st}.xyz\"\n end_ind += natoms.item()\n write_single_xyz(\n xyzfile,\n natoms.item(),\n out=out_samples[ii][start_ind:end_ind],\n )\n start_ind = end_ind" }, { "identifier": "xyz2pmg", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def xyz2pmg(xyzfile):\n xyz_converter = XYZ(mol=None)\n mol = xyz_converter.from_file(xyzfile).molecule\n return mol" }, { "identifier": "pymatgen_rmsd", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def pymatgen_rmsd(\n mol1,\n mol2,\n ignore_chirality=False,\n threshold=0.5,\n same_order=False,\n):\n if isinstance(mol1, str):\n mol1 = xyz2pmg(mol1)\n if isinstance(mol2, str):\n mol2 = xyz2pmg(mol2)\n rmsd = rmsd_core(mol1, mol2, threshold)\n if ignore_chirality:\n coords = mol2.cart_coords\n coords[:, -1] = -coords[:, -1]\n mol2_reflect = Molecule(\n species=mol2.species,\n coords=coords,\n )\n rmsd_reflect = rmsd_core(mol1, mol2_reflect, threshold)\n rmsd = min(rmsd, rmsd_reflect)\n return rmsd" }, { "identifier": "pymatgen_rmsd", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def pymatgen_rmsd(\n mol1,\n mol2,\n ignore_chirality=False,\n threshold=0.5,\n same_order=False,\n):\n if isinstance(mol1, str):\n mol1 = xyz2pmg(mol1)\n if isinstance(mol2, str):\n mol2 = xyz2pmg(mol2)\n rmsd = rmsd_core(mol1, mol2, threshold)\n if ignore_chirality:\n coords = mol2.cart_coords\n coords[:, -1] = -coords[:, -1]\n mol2_reflect = Molecule(\n species=mol2.species,\n coords=coords,\n )\n rmsd_reflect = rmsd_core(mol1, mol2_reflect, threshold)\n rmsd = min(rmsd, rmsd_reflect)\n return rmsd" } ]
import torch import py3Dmol import numpy as np import plotly.express as px import json from typing import Optional from torch import tensor from e3nn import o3 from torch_scatter import scatter_mean from oa_reactdiff.model import LEFTNet from oa_reactdiff.tests.model.utils import ( generate_full_eij, get_cut_graph_mask, ) from torch.utils.data import DataLoader from oa_reactdiff.trainer.pl_trainer import DDPMModule from oa_reactdiff.dataset import ProcessedTS1x from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import FEATURE_MAPPING from oa_reactdiff.analyze.rmsd import batch_rmsd from oa_reactdiff.utils.sampling_tools import ( assemble_sample_inputs, write_tmp_xyz, ) from glob import glob from oa_reactdiff.analyze.rmsd import xyz2pmg, pymatgen_rmsd from pymatgen.core import Molecule from collections import OrderedDict from sklearn.cluster import KMeans from glob import glob from pymatgen.io.xyz import XYZ from openbabel import pybel from oa_reactdiff.analyze.rmsd import pymatgen_rmsd
12,926
edge_index = generate_full_eij(ntot) edge_index _h, _pos, __ = model.forward( h=h, pos=remove_mean_batch(pos, mask), edge_index=edge_index, ) rot = o3.rand_matrix() pos_react_rot = torch.matmul(pos_react, rot).double() pos_rot = torch.cat( [pos_react_rot, pos_prod], dim=0, ) # 拼接旋转过后的H2O和未旋转的H2和O自由基 _h_rot, _pos_rot, __ = model.forward( h=h, pos=remove_mean_batch(pos_rot, mask), edge_index=edge_index, ) torch.max( torch.abs( _h - _h_rot ) ) # 旋转后的h应该不变 _pos_rot_prime = torch.cat( [ torch.matmul(_pos[:3], rot), _pos[3:] ] ) torch.max( torch.abs( _pos_rot_prime - _pos_rot ) ) # 旋转后的pos应该旋转 print("At Cell 16, Done.") model_oa = LEFTNet( num_layers=num_layers, hidden_channels=hidden_channels, in_hidden_channels=in_hidden_channels, num_radial=num_radial, object_aware=True, # 使用object-aware模型 ) subgraph_mask = get_cut_graph_mask(edge_index, 3) # 0-2是反应物的原子数 edge_index.T[torch.where(subgraph_mask.squeeze()>0)[0]] _h, _pos, __ = model_oa.forward( h=h, pos=remove_mean_batch(pos, mask), edge_index=edge_index, subgraph_mask=subgraph_mask, ) rot = o3.rand_matrix() pos_react_rot = torch.matmul(pos_react, rot).double() pos_rot = torch.cat( [pos_react_rot, pos_prod], dim=0, ) _h_rot, _pos_rot, __ = model_oa.forward( h=h, pos=remove_mean_batch(pos_rot, mask), edge_index=edge_index, subgraph_mask=subgraph_mask, ) torch.max( torch.abs( _h - _h_rot ) ) # 旋转后的h应该不变 _pos_rot_prime = torch.cat( [ torch.matmul(_pos[:3], rot), _pos[3:] ] ) torch.max( torch.abs( _pos_rot_prime - _pos_rot ) ) # 旋转后的pos应该旋转 print("Cell 22, done") device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda") ddpm_trainer = DDPMModule.load_from_checkpoint( checkpoint_path="./pretrained-ts1x-diff.ckpt", map_location=device, ) ddpm_trainer = ddpm_trainer.to(device) noise_schedule: str = "polynomial_2" timesteps: int = 150 precision: float = 1e-5 gamma_module = PredefinedNoiseSchedule( noise_schedule=noise_schedule, timesteps=timesteps, precision=precision, )
# --- 导入和定义一些函数 ---- default_float = torch.float64 torch.set_default_dtype(default_float) # 使用双精度,测试更准确 def remove_mean_batch( x: tensor, indices: Optional[tensor] = None ) -> tensor: """将x中的每个batch的均值去掉 Args: x (tensor): input tensor. indices (Optional[tensor], optional): batch indices. Defaults to None. Returns: tensor: output tensor with batch mean as 0. """ if indices == None: return x - torch.mean(x, dim=0) mean = scatter_mean(x, indices, dim=0) x = x - mean[indices] return x def draw_in_3dmol(mol: str, fmt: str = "xyz") -> py3Dmol.view: """画分子 Args: mol (str): str content of molecule. fmt (str, optional): format. Defaults to "xyz". Returns: py3Dmol.view: output viewer """ viewer = py3Dmol.view(1024, 576) viewer.addModel(mol, fmt) viewer.setStyle({'stick': {}, "sphere": {"radius": 0.36}}) viewer.zoomTo() return viewer def assemble_xyz(z: list, pos: tensor) -> str: """将原子序数和位置组装成xyz格式 Args: z (list): chemical elements pos (tensor): 3D coordinates Returns: str: xyz string """ natoms =len(z) xyz = f"{natoms}\n\n" for _z, _pos in zip(z, pos.numpy()): xyz += f"{_z}\t" + "\t".join([str(x) for x in _pos]) + "\n" return xyz num_layers = 2 hidden_channels = 8 in_hidden_channels = 4 num_radial = 4 model = LEFTNet( num_layers=num_layers, hidden_channels=hidden_channels, in_hidden_channels=in_hidden_channels, num_radial=num_radial, object_aware=False, ) sum(p.numel() for p in model.parameters() if p.requires_grad) h = torch.rand(3, in_hidden_channels) z = ["O", "H", "H"] pos = tensor([ [0, 0, 0], [1, 0, 0], [0, 1, 0], ]).double() # 方便起见,我们这里把H-O-H的角度设为90度 edge_index = tensor([ [0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 0, 1] ]).long() # 使用全连接的方式,这里的边是无向的 _h, _pos, __ = model.forward( h=h, pos=remove_mean_batch(pos), edge_index=edge_index, ) rot = o3.rand_matrix() pos_rot = torch.matmul(pos, rot).double() _h_rot, _pos_rot, __ = model.forward( h=h, pos=remove_mean_batch(pos_rot), edge_index=edge_index, ) torch.max( torch.abs( _h - _h_rot ) ) # 旋转后的h应该不变 torch.max( torch.abs( torch.matmul(_pos, rot).double() - _pos_rot ) ) # 旋转后的pos应该旋转 print("At Cell 9, Done.") # --- Cell 9 --- ns = [3, ] + [2, 1] # 反应物 3个原子 (H2O),生成物 2个原子 (H2),1个原子 (O自由基) ntot = np.sum(ns) mask = tensor([0, 0, 0, 1, 1, 1]) # 用于区分反应物和生成物 z = ["O", "H", "H"] + ["H", "H", "O"] pos_react = tensor([ [0, 0, 0], [1, 0, 0], [0, 1, 0], ]).double() # 方便起见,我们这里把H-O-H的角度设为90度 pos_prod = tensor([ [0, 3, -0.4], [0, 3, 0.4], [0, -3, 0], ]) # 将H2和O自由基分开 pos = torch.cat( [pos_react, pos_prod], dim=0, ) # 拼接 h = torch.rand(ntot, in_hidden_channels) edge_index = generate_full_eij(ntot) edge_index _h, _pos, __ = model.forward( h=h, pos=remove_mean_batch(pos, mask), edge_index=edge_index, ) rot = o3.rand_matrix() pos_react_rot = torch.matmul(pos_react, rot).double() pos_rot = torch.cat( [pos_react_rot, pos_prod], dim=0, ) # 拼接旋转过后的H2O和未旋转的H2和O自由基 _h_rot, _pos_rot, __ = model.forward( h=h, pos=remove_mean_batch(pos_rot, mask), edge_index=edge_index, ) torch.max( torch.abs( _h - _h_rot ) ) # 旋转后的h应该不变 _pos_rot_prime = torch.cat( [ torch.matmul(_pos[:3], rot), _pos[3:] ] ) torch.max( torch.abs( _pos_rot_prime - _pos_rot ) ) # 旋转后的pos应该旋转 print("At Cell 16, Done.") model_oa = LEFTNet( num_layers=num_layers, hidden_channels=hidden_channels, in_hidden_channels=in_hidden_channels, num_radial=num_radial, object_aware=True, # 使用object-aware模型 ) subgraph_mask = get_cut_graph_mask(edge_index, 3) # 0-2是反应物的原子数 edge_index.T[torch.where(subgraph_mask.squeeze()>0)[0]] _h, _pos, __ = model_oa.forward( h=h, pos=remove_mean_batch(pos, mask), edge_index=edge_index, subgraph_mask=subgraph_mask, ) rot = o3.rand_matrix() pos_react_rot = torch.matmul(pos_react, rot).double() pos_rot = torch.cat( [pos_react_rot, pos_prod], dim=0, ) _h_rot, _pos_rot, __ = model_oa.forward( h=h, pos=remove_mean_batch(pos_rot, mask), edge_index=edge_index, subgraph_mask=subgraph_mask, ) torch.max( torch.abs( _h - _h_rot ) ) # 旋转后的h应该不变 _pos_rot_prime = torch.cat( [ torch.matmul(_pos[:3], rot), _pos[3:] ] ) torch.max( torch.abs( _pos_rot_prime - _pos_rot ) ) # 旋转后的pos应该旋转 print("Cell 22, done") device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda") ddpm_trainer = DDPMModule.load_from_checkpoint( checkpoint_path="./pretrained-ts1x-diff.ckpt", map_location=device, ) ddpm_trainer = ddpm_trainer.to(device) noise_schedule: str = "polynomial_2" timesteps: int = 150 precision: float = 1e-5 gamma_module = PredefinedNoiseSchedule( noise_schedule=noise_schedule, timesteps=timesteps, precision=precision, )
schedule = DiffSchedule(
5
2023-10-30 02:53:38+00:00
16k
Weitheskmt/WeiDMD
weidmd/bopdmd.py
[ { "identifier": "DMDBase", "path": "weidmd/dmdbase.py", "snippet": "class DMDBase:\n \"\"\"\n Dynamic Mode Decomposition base class.\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param int tlsq_rank: rank truncation computing Total Least Square. Default\n is 0, that means no truncation.\n :param bool exact: flag to compute either exact DMD or projected DMD.\n Default is False.\n :param opt: If True, amplitudes are computed like in optimized DMD (see\n :func:`~dmdbase.DMDBase._compute_amplitudes` for reference). If\n False, amplitudes are computed following the standard algorithm. If\n `opt` is an integer, it is used as the (temporal) index of the snapshot\n used to compute DMD modes amplitudes (following the standard\n algorithm).\n The reconstruction will generally be better in time instants near the\n chosen snapshot; however increasing `opt` may lead to wrong results\n when the system presents small eigenvalues. For this reason a manual\n selection of the number of eigenvalues considered for the analyisis may\n be needed (check `svd_rank`). Also setting `svd_rank` to a value\n between 0 and 1 may give better results. Default is False.\n :type opt: bool or int\n :param rescale_mode: Scale Atilde as shown in\n 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its\n eigendecomposition. None means no rescaling, 'auto' means automatic\n rescaling using singular values, otherwise the scaling factors.\n :type rescale_mode: {'auto'} or None or numpy.ndarray\n :param bool forward_backward: If True, the low-rank operator is computed\n like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is\n False.\n :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by\n magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary\n part to break ties) if `sorted_eigs='real'`. Default: False.\n :type sorted_eigs: {'real', 'abs'} or False\n :param tikhonov_regularization: Tikhonov parameter for the regularization.\n If `None`, no regularization is applied, if `float`, it is used as the\n :math:`\\\\lambda` tikhonov parameter.\n :type tikhonov_regularization: int or float\n\n :cvar dict original_time: dictionary that contains information about the\n time window where the system is sampled:\n\n - `t0` is the time of the first input snapshot;\n - `tend` is the time of the last input snapshot;\n - `dt` is the delta time between the snapshots.\n\n :cvar dict dmd_time: dictionary that contains information about the time\n window where the system is reconstructed:\n\n - `t0` is the time of the first approximated solution;\n - `tend` is the time of the last approximated solution;\n - `dt` is the delta time between the approximated solutions.\n\n \"\"\"\n\n def __init__(\n self,\n svd_rank=0,\n tlsq_rank=0,\n exact=False,\n opt=False,\n rescale_mode=None,\n forward_backward=False,\n sorted_eigs=False,\n tikhonov_regularization=None,\n ):\n self._Atilde = DMDOperator(\n svd_rank=svd_rank,\n exact=exact,\n rescale_mode=rescale_mode,\n forward_backward=forward_backward,\n sorted_eigs=sorted_eigs,\n tikhonov_regularization=tikhonov_regularization,\n )\n\n self._tlsq_rank = tlsq_rank\n self._original_time = None\n self._dmd_time = None\n self._opt = opt\n self._exact = exact\n\n self._b = None # amplitudes\n self._snapshots_holder = None\n\n self._modes_activation_bitmask_proxy = None\n\n @property\n def dmd_timesteps(self):\n \"\"\"\n Get the timesteps of the reconstructed states.\n\n :return: the time intervals of the original snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n return np.arange(\n self.dmd_time[\"t0\"],\n self.dmd_time[\"tend\"] + self.dmd_time[\"dt\"],\n self.dmd_time[\"dt\"],\n )\n\n @property\n def original_timesteps(self):\n \"\"\"\n Get the timesteps of the original snapshot.\n\n :return: the time intervals of the original snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n return np.arange(\n self.original_time[\"t0\"],\n self.original_time[\"tend\"] + self.original_time[\"dt\"],\n self.original_time[\"dt\"],\n )\n\n @property\n def modes(self):\n \"\"\"\n Get the matrix containing the DMD modes, stored by column.\n\n :return: the matrix containing the DMD modes.\n :rtype: numpy.ndarray\n \"\"\"\n if self.fitted:\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n # if the value is still None, it means that we cannot create\n # the proxy at the moment\n if not self._modes_activation_bitmask_proxy:\n return self.operator.modes\n return self._modes_activation_bitmask_proxy.modes\n\n @property\n def operator(self):\n \"\"\"\n Get the instance of DMDOperator.\n\n :return: the instance of DMDOperator\n :rtype: DMDOperator\n \"\"\"\n return self._Atilde\n\n @property\n def eigs(self):\n \"\"\"\n Get the eigenvalues of A tilde.\n\n :return: the eigenvalues from the eigendecomposition of `atilde`.\n :rtype: numpy.ndarray\n \"\"\"\n if self.fitted:\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n # if the value is still None, it means that we cannot create\n # the proxy at the moment\n if not self._modes_activation_bitmask_proxy:\n return self.operator.eigenvalues\n return self._modes_activation_bitmask_proxy.eigs\n\n @property\n def dynamics(self):\n \"\"\"\n Get the time evolution of each mode.\n\n .. math::\n\n \\\\mathbf{x}(t) \\\\approx\n \\\\sum_{k=1}^{r} \\\\boldsymbol{\\\\phi}_{k} \\\\exp \\\\left( \\\\omega_{k} t\n \\\\right) b_{k} = \\\\sum_{k=1}^{r} \\\\boldsymbol{\\\\phi}_{k} \\\\left(\n \\\\lambda_{k} \\\\right)^{\\\\left( t / \\\\Delta t \\\\right)} b_{k}\n\n :return: the matrix that contains all the time evolution, stored by\n row.\n :rtype: numpy.ndarray\n \"\"\"\n temp = np.repeat(\n self.eigs[:, None], self.dmd_timesteps.shape[0], axis=1\n )\n tpow = (\n self.dmd_timesteps - self.original_time[\"t0\"]\n ) // self.original_time[\"dt\"]\n\n # The new formula is x_(k+j) = \\Phi \\Lambda^k \\Phi^(-1) x_j.\n # Since j is fixed, for a given snapshot \"u\" we have the following\n # formula:\n # x_u = \\Phi \\Lambda^{u-j} \\Phi^(-1) x_j\n # Therefore tpow must be scaled appropriately.\n tpow = self._translate_eigs_exponent(tpow)\n\n return np.power(temp, tpow) * self.amplitudes[:, None]\n\n def _translate_eigs_exponent(self, tpow):\n \"\"\"\n Transforms the exponent of the eigenvalues in the dynamics formula\n according to the selected value of `self._opt` (check the documentation\n for `opt` in :func:`__init__ <dmdbase.DMDBase.__init__>`).\n\n :param tpow: the exponent(s) of Sigma in the original DMD formula.\n :type tpow: int or np.ndarray\n :return: the exponent(s) adjusted according to `self._opt`\n :rtype: int or np.ndarray\n \"\"\"\n\n if isinstance(self._opt, bool):\n amplitudes_snapshot_index = 0\n else:\n amplitudes_snapshot_index = self._opt\n\n if amplitudes_snapshot_index < 0:\n # we take care of negative indexes: -n becomes T - n\n return tpow - (self.snapshots.shape[1] + amplitudes_snapshot_index)\n else:\n return tpow - amplitudes_snapshot_index\n\n @property\n def reconstructed_data(self):\n \"\"\"\n Get the reconstructed data.\n\n :return: the matrix that contains the reconstructed snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n return self.modes.dot(self.dynamics)\n\n @property\n def snapshots(self):\n \"\"\"\n Get the input data (space flattened).\n\n :return: the matrix that contains the flattened snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n if self._snapshots_holder:\n return self._snapshots_holder.snapshots\n return None\n\n @property\n def snapshots_shape(self):\n \"\"\"\n Get the original input snapshot shape.\n\n :return: input snapshots shape.\n :rtype: tuple\n \"\"\"\n if self._snapshots_holder:\n return self._snapshots_holder.snapshots_shape\n return None\n\n @property\n def frequency(self):\n \"\"\"\n Get the amplitude spectrum.\n\n :return: the array that contains the frequencies of the eigenvalues.\n :rtype: numpy.ndarray\n \"\"\"\n return np.log(self.eigs).imag / (2 * np.pi * self.original_time[\"dt\"])\n\n @property\n def growth_rate(self): # To check\n \"\"\"\n Get the growth rate values relative to the modes.\n\n :return: the Floquet values\n :rtype: numpy.ndarray\n \"\"\"\n return self.eigs.real / self.original_time[\"dt\"]\n\n @property\n def amplitudes(self):\n \"\"\"\n Get the coefficients that minimize the error between the original\n system and the reconstructed one. For futher information, see\n `dmdbase._compute_amplitudes`.\n\n :return: the array that contains the amplitudes coefficient.\n :rtype: numpy.ndarray\n \"\"\"\n if self.fitted:\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n return self._modes_activation_bitmask_proxy.amplitudes\n\n @property\n def fitted(self):\n \"\"\"Check whether this DMD instance has been fitted.\n\n :return: `True` is the instance has been fitted, `False` otherwise.\n :rtype: bool\n \"\"\"\n try:\n return self.operator.modes is not None\n except (ValueError, AttributeError):\n return False\n\n @property\n def modes_activation_bitmask(self):\n \"\"\"\n Get the bitmask which controls which DMD modes are enabled at the\n moment in this DMD instance.\n\n The DMD instance must be fitted before this property becomes valid.\n After :func:`fit` is called, the defalt value of\n `modes_activation_bitmask` is an array of `True` values of the same\n shape of :func:`amplitudes`.\n\n The array returned is read-only (this allow us to react appropriately\n to changes in the bitmask). In order to modify the bitmask you need to\n set the field to a brand-new value (see example below).\n\n Example:\n\n .. code-block:: python\n\n >>> # this is an error\n >>> dmd.modes_activation_bitmask[[1,2]] = False\n ValueError: assignment destination is read-only\n >>> tmp = np.array(dmd.modes_activation_bitmask)\n >>> tmp[[1,2]] = False\n >>> dmd.modes_activation_bitmask = tmp\n\n :return: The DMD modes activation bitmask.\n :rtype: numpy.ndarray\n \"\"\"\n # check that the DMD was fitted\n if not self.fitted:\n raise RuntimeError(\"This DMD instance has not been fitted yet.\")\n\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n\n bitmask = self._modes_activation_bitmask_proxy.old_bitmask\n # make sure that the array is immutable\n bitmask.flags.writeable = False\n return bitmask\n\n @modes_activation_bitmask.setter\n def modes_activation_bitmask(self, value):\n # check that the DMD was fitted\n if not self.fitted:\n raise RuntimeError(\"This DMD instance has not been fitted yet.\")\n\n value = np.array(value)\n if value.dtype != bool:\n raise RuntimeError(\n \"Unxpected dtype, expected bool, got {}.\".format(value.dtype)\n )\n\n # check that the shape is correct\n if value.shape != self.modes_activation_bitmask.shape:\n raise ValueError(\n \"Expected shape {}, got {}\".format(\n self.modes_activation_bitmask.shape, value.shape\n )\n )\n\n self._modes_activation_bitmask_proxy.change_bitmask(value)\n\n def _allocate_modes_bitmask_proxy(self):\n \"\"\"\n Utility method which allocates the activation bitmask proxy using the\n quantities that are currently available in this DMD instance. Fails\n quietly if the amplitudes are not set.\n \"\"\"\n if hasattr(self, \"_b\") and self._b is not None:\n self._modes_activation_bitmask_proxy = ActivationBitmaskProxy(\n self.operator, self._b\n )\n\n def __getitem__(self, key):\n \"\"\"\n Restrict the DMD modes used by this instance to a subset of indexes\n specified by keys. The value returned is a shallow copy of this DMD\n instance, with a different value in :func:`modes_activation_bitmask`.\n Therefore assignments to attributes are not reflected into the original\n instance.\n\n However the DMD instance returned should not be used for low-level\n manipulations on DMD modes, since the underlying DMD operator is shared\n with the original instance. For this reasons modifications to NumPy\n arrays may result in unwanted and unspecified situations which should\n be avoided in principle.\n\n :param key: An index (integer), slice or list of indexes.\n :type key: int or slice or list or np.ndarray\n :return: A shallow copy of this DMD instance having only a subset of\n DMD modes which are those indexed by `key`.\n :rtype: DMDBase\n \"\"\"\n\n if isinstance(key, (slice, int, list, np.ndarray)):\n filter_function = lambda x: isinstance(x, int)\n\n if isinstance(key, (list, np.ndarray)):\n if not all(map(filter_function, key)):\n raise ValueError(\n \"Invalid argument type, expected a slice, an int, or \"\n \"a list of indexes.\"\n )\n # no repeated elements\n if len(key) != len(set(key)):\n raise ValueError(\"Repeated indexes are not supported.\")\n else:\n raise ValueError(\n \"Invalid argument type, expected a slice, an int, or a list \"\n \"of indexes, got {}\".format(type(key))\n )\n\n mask = np.full(self.modes_activation_bitmask.shape, False)\n mask[key] = True\n\n shallow_copy = copy(self)\n shallow_copy._allocate_modes_bitmask_proxy()\n shallow_copy.modes_activation_bitmask = mask\n\n return shallow_copy\n\n @property\n def original_time(self):\n \"\"\"\n A dictionary which contains information about the time window used to\n fit this DMD instance.\n\n Inside the dictionary:\n\n ====== ====================================================================================\n Key Value\n ====== ====================================================================================\n `t0` Time of the first input snapshot (0 by default).\n `tend` Time of the last input snapshot (usually corresponds to the number of snapshots).\n `dt` Timestep between two snapshots (1 by default).\n ====== ====================================================================================\n\n :return: A dict which contains info about the input time frame.\n :rtype: dict\n \"\"\"\n if self._original_time is None:\n raise RuntimeError(\n \"\"\"\n_set_initial_time_dictionary() has not been called, did you call fit()?\"\"\"\n )\n return self._original_time\n\n @property\n def dmd_time(self):\n \"\"\"\n A dictionary which contains information about the time window used to\n reconstruct/predict using this DMD instance. By default this is equal\n to :func:`original_time`.\n\n Inside the dictionary:\n\n ====== ====================================================================================\n Key Value\n ====== ====================================================================================\n `t0` Time of the first output snapshot.\n `tend` Time of the last output snapshot.\n `dt` Timestep between two snapshots.\n ====== ====================================================================================\n\n :return: A dict which contains info about the input time frame.\n :rtype: dict\n \"\"\"\n if self._dmd_time is None:\n raise RuntimeError(\n \"\"\"\n_set_initial_time_dictionary() has not been called, did you call fit()?\"\"\"\n )\n return self._dmd_time\n\n @dmd_time.setter\n def dmd_time(self, value):\n self._dmd_time = deepcopy(value)\n\n def _set_initial_time_dictionary(self, time_dict):\n \"\"\"\n Set the initial values for the class fields `time_dict` and\n `original_time`. This is usually called in `fit()` and never again.\n\n :param time_dict: Initial time dictionary for this DMD instance.\n :type time_dict: dict\n \"\"\"\n if not (\n \"t0\" in time_dict and \"tend\" in time_dict and \"dt\" in time_dict\n ):\n raise ValueError(\n 'time_dict must contain the keys \"t0\", \"tend\" and \"dt\".'\n )\n if len(time_dict) > 3:\n raise ValueError(\n 'time_dict must contain only the keys \"t0\", \"tend\" and \"dt\".'\n )\n\n self._original_time = DMDTimeDict(dict(time_dict))\n self._dmd_time = DMDTimeDict(dict(time_dict))\n\n def fit(self, X):\n \"\"\"\n Abstract method to fit the snapshots matrices.\n\n Not implemented, it has to be implemented in subclasses.\n \"\"\"\n name = self.__class__.__name__\n msg = f\"Subclass must implement abstract method {name}.fit\"\n raise NotImplementedError(msg)\n\n def _reset(self):\n \"\"\"\n Reset this instance. Should be called in :func:`fit`.\n \"\"\"\n self._modes_activation_bitmask_proxy = None\n self._b = None\n self._snapshots_holder = None\n\n def save(self, fname):\n \"\"\"\n Save the object to `fname` using the pickle module.\n\n :param str fname: the name of file where the reduced order model will\n be saved.\n\n Example:\n\n >>> from pydmd import DMD\n >>> dmd = DMD(...) # Construct here the rom\n >>> dmd.fit(...)\n >>> dmd.save('pydmd.dmd')\n \"\"\"\n with open(fname, \"wb\") as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n\n @staticmethod\n def load(fname):\n \"\"\"\n Load the object from `fname` using the pickle module.\n\n :return: The `ReducedOrderModel` loaded\n\n Example:\n\n >>> from pydmd import DMD\n >>> dmd = DMD.load('pydmd.dmd')\n >>> print(dmd.reconstructed_data)\n \"\"\"\n with open(fname, \"rb\") as output:\n return pickle.load(output)\n\n def _optimal_dmd_matrices(self):\n # compute the vandermonde matrix\n vander = np.vander(self.eigs, len(self.dmd_timesteps), True)\n\n P = np.multiply(\n np.dot(self.modes.conj().T, self.modes),\n np.conj(np.dot(vander, vander.conj().T)),\n )\n\n if self._exact:\n q = np.conj(\n np.diag(\n np.linalg.multi_dot(\n [vander, self.snapshots.conj().T, self.modes]\n )\n )\n )\n else:\n _, s, V = compute_svd(self.snapshots[:, :-1], self.modes.shape[-1])\n\n q = np.conj(\n np.diag(\n np.linalg.multi_dot(\n [\n vander[:, :-1],\n V,\n np.diag(s).conj(),\n self.operator.eigenvectors,\n ]\n )\n )\n )\n\n return P, q\n\n def _compute_amplitudes(self):\n \"\"\"\n Compute the amplitude coefficients. If `self._opt` is False the\n amplitudes are computed by minimizing the error between the modes and\n the first snapshot; if `self._opt` is True the amplitudes are computed\n by minimizing the error between the modes and all the snapshots, at the\n expense of bigger computational cost.\n\n This method uses the class variables self.snapshots (for the\n snapshots), self.modes and self.eigs.\n\n :return: the amplitudes array\n :rtype: numpy.ndarray\n\n References for optimal amplitudes:\n Jovanovic et al. 2014, Sparsity-promoting dynamic mode decomposition,\n https://hal-polytechnique.archives-ouvertes.fr/hal-00995141/document\n \"\"\"\n if isinstance(self._opt, bool) and self._opt:\n # b optimal\n a = np.linalg.solve(*self._optimal_dmd_matrices())\n else:\n if isinstance(self._opt, bool):\n amplitudes_snapshot_index = 0\n else:\n amplitudes_snapshot_index = self._opt\n\n a = np.linalg.lstsq(\n self.modes,\n self.snapshots.T[amplitudes_snapshot_index],\n rcond=None,\n )[0]\n\n return a" }, { "identifier": "DMDOperator", "path": "weidmd/dmdoperator.py", "snippet": "class DMDOperator:\n \"\"\"\n Dynamic Mode Decomposition standard operator class. Non-standard ways of\n computing the low-rank Atilde operator should be coded into subclasses of\n this class.\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param bool exact: flag to compute either exact DMD or projected DMD.\n Default is False.\n :param rescale_mode: Scale Atilde as shown in\n 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its\n eigendecomposition. None means no rescaling, 'auto' means automatic\n rescaling using singular values, otherwise the scaling factors.\n :type rescale_mode: {'auto'} or None or numpy.ndarray\n :param bool forward_backward: If True, the low-rank operator is computed\n like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is\n False.\n :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by\n magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary\n part to break ties) if `sorted_eigs='real'`. Default: False.\n :type sorted_eigs: {'real', 'abs'} or False\n :param tikhonov_regularization: Tikhonov parameter for the regularization.\n If `None`, no regularization is applied, if `float`, it is used as the\n :math:`\\lambda` tikhonov parameter.\n :type tikhonov_regularization: int or float\n \"\"\"\n\n def __init__(\n self,\n svd_rank,\n exact,\n forward_backward,\n rescale_mode,\n sorted_eigs,\n tikhonov_regularization,\n ):\n self._exact = exact\n self._rescale_mode = rescale_mode\n self._svd_rank = svd_rank\n self._forward_backward = forward_backward\n self._sorted_eigs = sorted_eigs\n self._tikhonov_regularization = tikhonov_regularization\n self._norm_X = None\n\n def compute_operator(self, X, Y):\n \"\"\"\n Compute the low-rank operator.\n\n :param numpy.ndarray X: matrix containing the snapshots x0,..x{n-1} by\n column.\n :param numpy.ndarray Y: matrix containing the snapshots x1,..x{n} by\n column.\n :return: the (truncated) left-singular vectors matrix, the (truncated)\n singular values array, the (truncated) right-singular vectors\n matrix of X.\n :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray\n \"\"\"\n\n U, s, V = compute_svd(X, self._svd_rank)\n\n if self._tikhonov_regularization is not None:\n self._norm_X = np.linalg.norm(X)\n atilde = self._least_square_operator(U, s, V, Y)\n\n if self._forward_backward:\n # b stands for \"backward\"\n bU, bs, bV = compute_svd(Y, svd_rank=len(s))\n atilde_back = self._least_square_operator(bU, bs, bV, X)\n atilde = sqrtm(atilde.dot(np.linalg.inv(atilde_back)))\n if hasattr(np, \"complex256\") and atilde.dtype == np.complex256:\n atilde = atilde.astype(np.complex128)\n msg = \"Casting atilde from np.complex256 to np.complex128\"\n logging.info(msg)\n\n if self._rescale_mode == \"auto\":\n self._rescale_mode = s\n\n self._Atilde = atilde\n self._compute_eigenquantities()\n self._compute_modes(Y, U, s, V)\n\n return U, s, V\n\n @property\n def shape(self):\n \"\"\"Shape of the operator\"\"\"\n return self.as_numpy_array.shape\n\n def __call__(self, snapshot_lowrank_modal_coefficients):\n \"\"\"\n Apply the low-rank operator to a vector of the modal coefficients of a\n snapshot(s).\n\n :param numpy.ndarray snapshot_lowrank_modal_coefficients: low-rank\n representation (in modal coefficients) of a snapshot x{n}.\n :return: low-rank representation (in modal coefficients) of x{n+1}.\n :rtype: numpy.ndarray\n \"\"\"\n\n return self._Atilde.dot(snapshot_lowrank_modal_coefficients)\n\n @property\n def eigenvalues(self):\n if not hasattr(self, \"_eigenvalues\"):\n raise ValueError(\"You need to call fit before\")\n return self._eigenvalues\n\n @property\n def eigenvectors(self):\n if not hasattr(self, \"_eigenvectors\"):\n raise ValueError(\"You need to call fit before\")\n return self._eigenvectors\n\n @property\n def modes(self):\n if not hasattr(self, \"_modes\"):\n raise ValueError(\"You need to call fit before\")\n return self._modes\n\n @property\n def Lambda(self):\n if not hasattr(self, \"_Lambda\"):\n raise ValueError(\"You need to call fit before\")\n return self._Lambda\n\n @property\n def as_numpy_array(self):\n if not hasattr(self, \"_Atilde\") or self._Atilde is None:\n raise ValueError(\"You need to call fit before\")\n else:\n return self._Atilde\n\n def _least_square_operator(self, U, s, V, Y):\n \"\"\"\n Private method that computes the lowrank operator from the singular\n value decomposition of matrix X and the matrix Y.\n\n .. math::\n\n \\\\mathbf{\\\\tilde{A}} =\n \\\\mathbf{U}^* \\\\mathbf{Y} \\\\mathbf{X}^\\\\dagger \\\\mathbf{U} =\n \\\\mathbf{U}^* \\\\mathbf{Y} \\\\mathbf{V} \\\\mathbf{S}^{-1}\n\n :param numpy.ndarray U: 2D matrix that contains the left-singular\n vectors of X, stored by column.\n :param numpy.ndarray s: 1D array that contains the singular values of\n X.\n :param numpy.ndarray V: 2D matrix that contains the right-singular\n vectors of X, stored by row.\n :param numpy.ndarray Y: input matrix Y.\n :return: the lowrank operator\n :rtype: numpy.ndarray\n \"\"\"\n if self._tikhonov_regularization is not None:\n s = (\n s**2 + self._tikhonov_regularization * self._norm_X\n ) * np.reciprocal(s)\n return np.linalg.multi_dot([U.T.conj(), Y, V]) * np.reciprocal(s)\n\n def _compute_eigenquantities(self):\n \"\"\"\n Private method that computes eigenvalues and eigenvectors of the\n low-dimensional operator, scaled according to self._rescale_mode.\n \"\"\"\n\n if self._rescale_mode is None:\n # scaling isn't required\n Ahat = self._Atilde\n elif isinstance(self._rescale_mode, np.ndarray):\n if len(self._rescale_mode) != self.as_numpy_array.shape[0]:\n raise ValueError(\n \"\"\"Scaling by an invalid number of\n coefficients\"\"\"\n )\n scaling_factors_array = self._rescale_mode\n\n factors_inv_sqrt = np.diag(np.power(scaling_factors_array, -0.5))\n factors_sqrt = np.diag(np.power(scaling_factors_array, 0.5))\n\n # if an index is 0, we get inf when taking the reciprocal\n for idx, item in enumerate(scaling_factors_array):\n if item == 0:\n factors_inv_sqrt[idx] = 0\n\n Ahat = np.linalg.multi_dot(\n [factors_inv_sqrt, self.as_numpy_array, factors_sqrt]\n )\n else:\n raise ValueError(\n \"Invalid value for rescale_mode: {} of type {}\".format(\n self._rescale_mode, type(self._rescale_mode)\n )\n )\n\n self._eigenvalues, self._eigenvectors = np.linalg.eig(Ahat)\n\n if self._sorted_eigs is not False and self._sorted_eigs is not None:\n if self._sorted_eigs == \"abs\":\n\n def k(tp):\n return abs(tp[0])\n\n elif self._sorted_eigs == \"real\":\n\n def k(tp):\n eig = tp[0]\n if isinstance(eig, complex):\n return (eig.real, eig.imag)\n return (eig.real, 0)\n\n else:\n raise ValueError(\n \"Invalid value for sorted_eigs: {}\".format(\n self._sorted_eigs\n )\n )\n\n # each column is an eigenvector, therefore we take the\n # transpose to associate each row (former column) to an\n # eigenvalue before sorting\n a, b = zip(\n *sorted(zip(self._eigenvalues, self._eigenvectors.T), key=k)\n )\n self._eigenvalues = np.array([eig for eig in a])\n # we restore the original condition (eigenvectors in columns)\n self._eigenvectors = np.array([vec for vec in b]).T\n\n def _compute_modes(self, Y, U, Sigma, V):\n \"\"\"\n Private method that computes eigenvalues and eigenvectors of the\n high-dimensional operator (stored in self.modes and self.Lambda).\n\n :param numpy.ndarray Y: matrix containing the snapshots x1,..x{n} by\n column.\n :param numpy.ndarray U: (truncated) left singular vectors of X\n :param numpy.ndarray Sigma: (truncated) singular values of X\n :param numpy.ndarray V: (truncated) right singular vectors of X\n \"\"\"\n\n if self._rescale_mode is None:\n W = self.eigenvectors\n else:\n # compute W as shown in arXiv:1409.5496 (section 2.4)\n factors_sqrt = np.diag(np.power(self._rescale_mode, 0.5))\n W = factors_sqrt.dot(self.eigenvectors)\n\n # compute the eigenvectors of the high-dimensional operator\n if self._exact:\n if self._tikhonov_regularization is not None:\n Sigma = (\n Sigma**2 + self._tikhonov_regularization * self._norm_X\n ) * np.reciprocal(Sigma)\n high_dimensional_eigenvectors = (\n Y.dot(V) * np.reciprocal(Sigma)\n ).dot(W)\n else:\n high_dimensional_eigenvectors = U.dot(W)\n\n # eigenvalues are the same of lowrank\n high_dimensional_eigenvalues = self.eigenvalues\n\n self._modes = high_dimensional_eigenvectors\n self._Lambda = high_dimensional_eigenvalues" }, { "identifier": "compute_svd", "path": "weidmd/utils.py", "snippet": "def compute_svd(X, svd_rank=0):\n \"\"\"\n Truncated Singular Value Decomposition.\n\n :param numpy.ndarray X: the matrix to decompose.\n :param svd_rank: the rank for the truncation; If 0, the method computes\n the optimal rank and uses it for truncation; if positive interger,\n the method uses the argument for the truncation; if float between 0\n and 1, the rank is the number of the biggest singular values that\n are needed to reach the 'energy' specified by `svd_rank`; if -1,\n the method does not compute truncation. Default is 0.\n :type svd_rank: int or float\n :return: the truncated left-singular vectors matrix, the truncated\n singular values array, the truncated right-singular vectors matrix.\n :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray\n\n References:\n Gavish, Matan, and David L. Donoho, The optimal hard threshold for\n singular values is, IEEE Transactions on Information Theory 60.8\n (2014): 5040-5053.\n \"\"\"\n U, s, V = np.linalg.svd(X, full_matrices=False)\n V = V.conj().T\n\n def omega(x):\n return 0.56 * x**3 - 0.95 * x**2 + 1.82 * x + 1.43\n\n if svd_rank == 0:\n beta = np.divide(*sorted(X.shape))\n tau = np.median(s) * omega(beta)\n rank = np.sum(s > tau)\n if rank == 0:\n warnings.warn(\n \"SVD optimal rank is 0. The largest singular values are \"\n \"indistinguishable from noise. Setting rank truncation to 1.\",\n RuntimeWarning,\n )\n rank = 1\n elif 0 < svd_rank < 1:\n cumulative_energy = np.cumsum(s**2 / (s**2).sum())\n rank = np.searchsorted(cumulative_energy, svd_rank) + 1\n elif svd_rank >= 1 and isinstance(svd_rank, int):\n rank = min(svd_rank, U.shape[1])\n else:\n rank = X.shape[1]\n\n U = U[:, :rank]\n V = V[:, :rank]\n s = s[:rank]\n\n return U, s, V" }, { "identifier": "compute_rank", "path": "weidmd/rdmd.py", "snippet": "def compute_rank(X, svd_rank=0):\n \"\"\"\n Rank computation for the truncated Singular Value Decomposition.\n :param numpy.ndarray X: the matrix to decompose.\n :param svd_rank: the rank for the truncation; If 0, the method computes\n the optimal rank and uses it for truncation; if positive interger,\n the method uses the argument for the truncation; if float between 0\n and 1, the rank is the number of the biggest singular values that\n are needed to reach the 'energy' specified by `svd_rank`; if -1,\n the method does not compute truncation. Default is 0.\n :type svd_rank: int or float\n :return: the computed rank truncation.\n :rtype: int\n References:\n Gavish, Matan, and David L. Donoho, The optimal hard threshold for\n singular values is, IEEE Transactions on Information Theory 60.8\n (2014): 5040-5053.\n \"\"\"\n U, s, _ = np.linalg.svd(X, full_matrices=False)\n\n def omega(x):\n return 0.56 * x**3 - 0.95 * x**2 + 1.82 * x + 1.43\n\n if svd_rank == 0:\n beta = np.divide(*sorted(X.shape))\n tau = np.median(s) * omega(beta)\n rank = np.sum(s > tau)\n elif 0 < svd_rank < 1:\n cumulative_energy = np.cumsum(s**2 / (s**2).sum())\n rank = np.searchsorted(cumulative_energy, svd_rank) + 1\n elif svd_rank >= 1 and isinstance(svd_rank, int):\n rank = min(svd_rank, U.shape[1])\n else:\n rank = min(X.shape)\n\n return rank" }, { "identifier": "Snapshots", "path": "weidmd/snapshots.py", "snippet": "class Snapshots:\n \"\"\"\n Utility class to preprocess snapshots shape for DMD.\n\n This class expects the time to be the last dimensions of the array.\n If a Python list is passed to the constructor, each element in the\n list is assumed to be a snapshot in time.\n\n Space dimensions are flattened (C-order) such that the\n matrix becomes 2D (time changes along the last axis).\n\n :param numpy.array | list(numpy.array) X: Training snapshots.\n \"\"\"\n\n def __init__(self, X):\n (\n self._snapshots,\n self._snapshots_shape,\n ) = Snapshots._unroll_space_dimensions(X)\n\n if self._snapshots.shape[-1] == 1:\n raise ValueError(\"Received only one time snapshot.\")\n\n Snapshots._check_condition_number(self._snapshots)\n\n logging.info(\n \"Snapshots: %s, snapshot shape: %s\",\n self._snapshots.shape,\n self._snapshots_shape,\n )\n\n @staticmethod\n def _unroll_space_dimensions(X):\n if hasattr(X, \"ndim\"):\n if X.ndim == 1:\n raise ValueError(\n \"Expected at least a 2D matrix (space x time).\"\n )\n snapshots = X.reshape((-1, X.shape[-1]))\n shapes = set((X.shape[:-1],))\n else:\n shapes, arrays = zip(\n *[(xarr.shape, xarr.flatten()) for xarr in map(np.asarray, X)]\n )\n\n shapes = set(shapes)\n if len(shapes) != 1:\n raise ValueError(\n f\"Snapshots must have the same size, found {len(shapes)}.\"\n )\n if len(next(iter(shapes))) == 0:\n raise ValueError(\"Expected at least a 2D matrix\")\n\n # move the time to the last axis\n snapshots = np.moveaxis(np.stack(arrays), 0, -1)\n\n return snapshots, shapes.pop()\n\n @staticmethod\n def _check_condition_number(X):\n cond_number = np.linalg.cond(X)\n if cond_number > 10e4:\n warnings.warn(\n f\"Input data condition number {cond_number}. \"\n \"\"\"Consider preprocessing data, passing in augmented data\nmatrix, or regularization methods.\"\"\"\n )\n\n @property\n def snapshots(self):\n \"\"\"\n Snapshots of the system (space flattened).\n \"\"\"\n return self._snapshots\n\n @property\n def snapshots_shape(self):\n \"\"\"\n Original (i.e. non-flattened) snapshot shape (time is ignored).\n \"\"\"\n return self._snapshots_shape" } ]
import warnings import numpy as np from collections import OrderedDict from scipy.sparse import csr_matrix from scipy.linalg import qr from .dmdbase import DMDBase from .dmdoperator import DMDOperator from .utils import compute_svd from .rdmd import compute_rank from .snapshots import Snapshots
12,650
:return: the projection basis used, with modes stored by column. :rtype: numpy.ndarray """ if self._proj_basis is None: msg = ( "fit() hasn't been called " "and no projection basis has been given." ) raise RuntimeError(msg) return self._proj_basis @property def num_trials(self): """ :return: the number of BOP-DMD trials to perform. :rtype: int """ return self._num_trials @property def trial_size(self): """ :return: size of the data subsets used during each BOP-DMD trial. :rtype: int or float """ return self._trial_size @property def time(self): """ Get the vector that contains the time points of the fitted snapshots. :return: the vector that contains the original time points. :rtype: numpy.ndarray """ if self._time is None: raise RuntimeError("fit() hasn't been called.") return self._time @property def atilde(self): """ Get the reduced Koopman operator A, called Atilde. :return: the reduced Koopman operator A. :rtype: numpy.ndarray """ return self.operator.as_numpy_array @property def A(self): """ Get the full Koopman operator A. :return: the full Koopman operator A. :rtype: numpy.ndarray """ return self.operator.A @property def dynamics(self): """ Get the time evolution of each mode. :return: matrix that contains all the time evolution, stored by row. :rtype: numpy.ndarray """ t_omega = np.exp(np.outer(self.eigs, self._time)) return np.diag(self.amplitudes).dot(t_omega) def print_varpro_opts(self): """ Prints a formatted information string that displays all chosen variable projection parameter values. """ if self._Atilde is None: raise ValueError("You need to call fit before") opt_names = [ "init_lambda", "maxlam", "lamup", "use_levmarq", "maxiter", "tol", "eps_stall", "use_fulljac", "verbose", ] print("VARIABLE PROJECTION OPTIONS:") print("============================") for name, value in zip(opt_names, self.operator.varpro_opts): if len(name) < 7: print(name + ":\t\t" + str(value)) else: print(name + ":\t" + str(value)) def _initialize_alpha(self): """ Uses projected trapezoidal rule to approximate the eigenvalues of A in z' = Az. The computed eigenvalues will serve as our initial guess for alpha. :return: Approximated eigenvalues of the matrix A. :rtype: numpy.ndarray """ # Project the snapshot data onto the projection basis. ux = self._proj_basis.conj().T.dot(self.snapshots) ux1 = ux[:, :-1] ux2 = ux[:, 1:] # Define the diagonal matrix T as the following. t1 = self._time[:-1] t2 = self._time[1:] T = np.diag(t2 - t1) # Define the matrices Y and Z as the following and compute the # rank-truncated SVD of Y. Y = (ux1 + ux2) / 2 Z = (ux2 - ux1).dot(np.linalg.inv(T))
class BOPDMDOperator(DMDOperator): """ BOP-DMD operator. :param compute_A: Flag that determines whether or not to compute the full Koopman operator A. :type compute_A: bool :param use_proj: Flag that determines the type of computation to perform. If True, fit input data projected onto the first svd_rank POD modes or columns of proj_basis if provided. If False, fit the full input data. :type use_proj: bool :param init_alpha: Initial guess for the continuous-time DMD eigenvalues. :type init_alpha: numpy.ndarray :param proj_basis: Orthogonal basis for projection, where each column of proj_basis contains a basis mode. :type proj_basis: numpy.ndarray :param num_trials: Number of BOP-DMD trials to perform. If num_trials is a positive integer, num_trials BOP-DMD trials are performed. Otherwise, standard optimized dmd is performed. :type num_trials: int :param trial_size: Size of the randomly selected subset of observations to use for each trial of bagged optimized dmd (BOP-DMD). If trial_size is a positive integer, trial_size many observations will be used per trial. If trial_size is a float between 0 and 1, int(trial_size * m) many observations will be used per trial, where m denotes the total number of data points observed. Note that any other type of input for trial_size will yield an error. :type trial_size: int or float :param eig_sort: Method used to sort eigenvalues (and modes accordingly) when performing BOP-DMD. Eigenvalues will be sorted by real part and then by imaginary part to break ties if `eig_sort="real"`, by imaginary part and then by real part to break ties if `eig_sort="imag"`, or by magnitude if `eig_sort="abs"`. If `eig_sort="auto"`, one of the previously-mentioned sorting methods is chosen depending on eigenvalue variance. :type eig_sort: {"real", "imag", "abs", "auto"} :param init_lambda: Initial value used for the regularization parameter in the Levenberg method. Default is 1.0. Note: Larger lambda values make the method more like gradient descent. :type init_lambda: float :param maxlam: Maximum number of of steps used in the inner Levenberg loop, i.e. the number of times you increase lambda before quitting. Default is 52. :type maxlam: int :param lamup: The factor by which you increase lambda when searching for an appropriate step. Default is 2.0. :type lamup: float :param use_levmarq: Flag that determines whether you use the Levenberg algorithm or the Levenberg-Marquardt algorithm. Default is True, use Levenberg-Marquardt. :type use_levmarq: bool :param maxiter: The maximum number of outer loop iterations to use before quitting. Default is 30. :type maxiter: int :param tol: The tolerance for the relative error in the residual. i.e. the program will terminate if norm(y-Phi(alpha)*b,'fro')/norm(y,'fro') < tol is achieved. Default is 1e-6. :type tol: float :param eps_stall: The tolerance for detecting a stall. i.e. if error(iter-1)-error(iter) < eps_stall*err(iter-1) the program halts. Default is 1e-12. :type eps_stall: float :param use_fulljac: Flag that determines whether or not to use the full expression for the Jacobian or Kaufman's approximation. Default is True, use full expression. :type use_fulljac: bool :param verbose: Flag that determines whether or not to print warning messages that arise during the variable projection routine, and whether or not to print information regarding the method's iterative progress. Default is False, don't print information. :type verbose: bool """ def __init__( self, compute_A, use_proj, init_alpha, proj_basis, num_trials, trial_size, eig_sort, init_lambda=1.0, maxlam=52, lamup=2.0, use_levmarq=True, maxiter=30, tol=1e-6, eps_stall=1e-12, use_fulljac=True, verbose=False, ): self._compute_A = compute_A self._use_proj = use_proj self._init_alpha = init_alpha self._proj_basis = proj_basis self._num_trials = num_trials self._trial_size = trial_size self._eig_sort = eig_sort self._varpro_opts = ( init_lambda, maxlam, lamup, use_levmarq, maxiter, tol, eps_stall, use_fulljac, verbose, ) self._varpro_opts_warn() self._modes = None self._eigenvalues = None self._eigenvalues_std = None self._amplitudes_std = None self._Atilde = None self._A = None @property def varpro_opts(self): """ Get the variable projection options. :return: the variable projection options. :rtype: tuple """ return self._varpro_opts @property def A(self): """ Get the full Koopman operator A. :return: the full Koopman operator A. :rtype: numpy.ndarray """ if not self._compute_A: msg = ( "A not computed during fit. " "Set parameter compute_A = True to compute A." ) raise ValueError(msg) if self._A is None: raise ValueError("You need to call fit before") return self._A @property def amplitudes_std(self): """ Get the amplitudes standard deviation. :return: amplitudes standard deviation. :rtype: numpy.ndarray """ return self._amplitudes_std @property def eigenvalues_std(self): """ Get the eigenvalues standard deviation. :return: eigenvalues standard deviation. :rtype: numpy.ndarray """ return self._eigenvalues_std def _varpro_opts_warn(self): """ Checks the validity of the parameter values in _varpro_opts. Throws an error if any parameter value has an invalid type and generates a warning if any value lies outside of the recommended range. """ # Generate dictionary of recommended value range for each parameter. rec_ranges = OrderedDict() rec_ranges["init_lambda"] = [0.0, 1e16] rec_ranges["maxlam"] = [0, 200] rec_ranges["lamup"] = [1.0, 1e16] rec_ranges["use_levmarq"] = [-np.inf, np.inf] rec_ranges["maxiter"] = [0, 1e12] rec_ranges["tol"] = [0.0, 1e16] rec_ranges["eps_stall"] = [-np.inf, 1.0] rec_ranges["use_fulljac"] = [-np.inf, np.inf] rec_ranges["verbose"] = [-np.inf, np.inf] for opt_value, (opt_name, (opt_min, opt_max)) in zip( self._varpro_opts, rec_ranges.items() ): if not isinstance(opt_value, (int, float, bool)): raise ValueError("Invalid variable projection option given.") if opt_value < opt_min: msg = ( "Option {} with value {} is less than {}, " "which is not recommended." ) warnings.warn(msg.format(opt_name, opt_value, opt_min)) elif opt_value > opt_max: msg = ( "Option {} with value {} is greater than {}, " "which is not recommended." ) warnings.warn(msg.format(opt_name, opt_value, opt_max)) def _exp_function(self, alpha, t): """ Matrix of exponentials. :param alpha: Vector of time scalings in the exponent. :type alpha: numpy.ndarray :param t: Vector of time values. :type t: numpy.ndarray :return: Matrix A such that A[i, j] = exp(t_i * alpha_j). :rtype: numpy.ndarray """ return np.exp(np.outer(t, alpha)) def _exp_function_deriv(self, alpha, t, i): """ Derivatives of the matrix of exponentials. :param alpha: Vector of time scalings in the exponent. :type alpha: numpy.ndarray :param t: Vector of time values. :type t: numpy.ndarray :param i: Index in alpha of the derivative variable. :type i: int :return: Derivatives of Phi(alpha, t) with respect to alpha[i]. :rtype: scipy.sparse.csr_matrix """ m = len(t) n = len(alpha) if i < 0 or i > n - 1: raise ValueError("Invalid index i given to exp_function_deriv.") A = np.multiply(t, np.exp(alpha[i] * t)) return csr_matrix( (A, (np.arange(m), np.full(m, fill_value=i))), shape=(m, n) ) def _compute_irank_svd(self, X, tolrank): """ Helper function that computes and returns the SVD of X with a rank truncation of irank, which denotes the number of singular values of X greater than tolrank * s1, where s1 is the largest singular value of the matrix X. :param X: Matrix to decompose. :type X: numpy.ndarray :param tolrank: Determines the rank of the returned SVD. :type tolrank: float :return: irank truncated SVD of X. :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray """ U, s, Vh = np.linalg.svd(X, full_matrices=False) irank = np.sum(s > tolrank * s[0]) U = U[:, :irank] S = np.diag(s[:irank]) Vh = Vh[:irank] return U, S, Vh def _bag(self, H, trial_size): """ Given a 2D array of data X, where each row contains a data snapshot, randomly sub-selects and returns data snapshots while preserving the original snapshot order. Note that if trial_size is a positive integer, trial_size many observations will be used per trial. If trial_size is a float between 0 and 1, int(trial_size * m) many observations will be used per trial, where m denotes the total number of snapshots in X. The indices of the sub-selected snapshots are also returned. :param H: Full data matrix to be sub-selected from. :type H: numpy.ndarray :param trial_size: Size of the sub-selection from H. :type trial_size: int or float :return: Matrix of sub-selected data snapshots, stored in each row, and a vector of each snapshots's row index location in H. :rtype: numpy.ndarray, numpy.ndarray """ # Ensure that H is a 2D numpy.ndarray. if not isinstance(H, np.ndarray) or H.ndim != 2: msg = "H must be a 2D np.ndarray." raise ValueError(msg) if 0 < trial_size < 1: batch_size = int(trial_size * H.shape[0]) elif trial_size >= 1 and isinstance(trial_size, int): batch_size = trial_size else: msg = ( "Invalid trial_size parameter. trial_size must be either " "a positive integer or a float between 0 and 1." ) raise ValueError(msg) # Throw an error if the batch size is too large or too small. if batch_size > H.shape[0]: msg = ( "Error bagging the input data. Please ensure that the " "trial_size parameter is small enough for bagging." ) raise ValueError(msg) if batch_size == 0: msg = ( "Error bagging the input data. Please ensure that the " "trial_size parameter is large enough for bagging." ) raise ValueError(msg) # Obtain and return subset of the data. all_inds = np.arange(H.shape[0]) subset_inds = np.sort( np.random.choice(all_inds, size=batch_size, replace=False) ) return H[subset_inds], subset_inds def _variable_projection(self, H, t, init_alpha, Phi, dPhi): """ Variable projection routine for multivariate data. Attempts to fit the columns of H as linear combinations of the columns of Phi(alpha,t) such that H = Phi(alpha,t)B. Note that M denotes the number of data samples, N denotes the number of columns of Phi, IS denotes the number of functions to fit, and IA denotes the length of the alpha vector. :param H: (M, IS) matrix of data. :type H: numpy.ndarray :param t: (M,) vector of sample times. :type t: numpy.ndarray :param init_alpha: initial guess for alpha. :type init_alpha: numpy.ndarray :param Phi: (M, N) matrix-valued function Phi(alpha,t). :type Phi: function :param dPhi: (M, N) matrix-valued function dPhi(alpha,t,i) that contains the derivatives of Phi wrt the ith component of alpha. :type dPhi: function :return: Tuple of two numpy arrays representing... 1. (N, IS) best-fit matrix B. 2. (N,) best-fit vector alpha. :rtype: Tuple[numpy.ndarray, numpy.ndarray] References: - Extensions and Uses of the Variable Projection Algorith for Solving Nonlinear Least Squares Problems by G. H. Golub and R. J. LeVeque ARO Report 79-3, Proceedings of the 1979 Army Numerical Analsysis and Computers Conference. - Variable projection for nonlinear least squares problems. Computational Optimization and Applications 54.3 (2013): 579-593 by Dianne P. O'Leary and Bert W. Rust. """ def compute_residual(alpha): """ Helper function that, given alpha, and using H, t, Phi as they are passed to the _variable_projection function, computes and returns the matrix Phi(alpha,t), B from the expression H = Phi(alpha,t)B, the residual H - Phi(alpha,t)B, and 0.5*norm(residual,'fro')^2, which will be used to denote the error. """ Phi_matrix = Phi(alpha, t) B = np.linalg.lstsq(Phi_matrix, H, rcond=None)[0] residual = H - Phi_matrix.dot(B) error = 0.5 * np.linalg.norm(residual, "fro") ** 2 return B, residual, error # Define M, IS, and IA. M, IS = H.shape IA = len(init_alpha) # Unpack all variable projection parameters stored in varpro_opts. ( init_lambda, maxlam, lamup, use_levmarq, maxiter, tol, eps_stall, use_fulljac, verbose, ) = self._varpro_opts # Initialize values. tolrank = M * np.finfo(float).eps _lambda = init_lambda alpha = np.copy(init_alpha) B, residual, error = compute_residual(alpha) U, S, Vh = self._compute_irank_svd(Phi(alpha, t), tolrank) # Initialize storage. all_error = np.zeros(maxiter) djac_matrix = np.zeros((M * IS, IA), dtype="complex") rjac = np.zeros((2 * IA, IA), dtype="complex") scales = np.zeros(IA) for itr in range(maxiter): # Build Jacobian matrix, looping over alpha indices. for i in range(IA): # Build the approximate expression for the Jacobian. dphi_temp = dPhi(alpha, t, i) ut_dphi = csr_matrix(U.conj().T @ dphi_temp) uut_dphi = csr_matrix(U @ ut_dphi) djac_a = (dphi_temp - uut_dphi) @ B djac_matrix[:, i] = djac_a.ravel(order="F") # Compute the full expression for the Jacobian. if use_fulljac: transform = np.linalg.multi_dot([U, np.linalg.inv(S), Vh]) dphit_res = csr_matrix(dphi_temp.conj().T @ residual) djac_b = transform @ dphit_res djac_matrix[:, i] += djac_b.ravel(order="F") # Scale for the Levenberg algorithm. scales[i] = 1 # Scale for the Levenberg-Marquardt algorithm. if use_levmarq: scales[i] = min(np.linalg.norm(djac_matrix[:, i]), 1) scales[i] = max(scales[i], 1e-6) # Loop to determine lambda (the step-size parameter). rhs_temp = np.copy(residual.ravel(order="F"))[:, None] q_out, djac_out, j_pvt = qr( djac_matrix, mode="economic", pivoting=True ) ij_pvt = np.arange(IA) ij_pvt = ij_pvt[j_pvt] rjac[:IA] = np.triu(djac_out[:IA]) rhs_top = q_out.conj().T.dot(rhs_temp) scales_pvt = scales[j_pvt[:IA]] rhs = np.concatenate( (rhs_top[:IA], np.zeros(IA, dtype="complex")), axis=None ) def step(_lambda, rhs, scales_pvt, ij_pvt): """ Helper function that, given a step size _lambda and the current right-hand side and pivots, computes and returns delta, the amount in which we update alpha, and the updated alpha vector. Note that this function uses rjac and alpha as they are defined outside of this function. """ # Compute the step delta. rjac[IA:] = _lambda * np.diag(scales_pvt) delta = np.linalg.lstsq(rjac, rhs, rcond=None)[0] delta = delta[ij_pvt] # Compute the updated alpha vector. alpha_updated = alpha.ravel() + delta.ravel() return delta, alpha_updated # Take a step using our initial step size init_lambda. delta_0, alpha_0 = step(_lambda, rhs, scales_pvt, ij_pvt) B_0, residual_0, error_0 = compute_residual(alpha_0) # Check actual improvement vs predicted improvement. actual_improvement = error - error_0 pred_improvement = ( 0.5 * np.linalg.multi_dot( [delta_0.conj().T, djac_matrix.conj().T, rhs_temp] ).real ) improvement_ratio = actual_improvement / pred_improvement if error_0 < error: # Rescale lambda based on the improvement ratio. _lambda *= max(1 / 3, 1 - (2 * improvement_ratio - 1) ** 3) alpha, B, residual, error = alpha_0, B_0, residual_0, error_0 else: # Increase lambda until something works. for _ in range(maxlam): _lambda *= lamup delta_0, alpha_0 = step(_lambda, rhs, scales_pvt, ij_pvt) B_0, residual_0, error_0 = compute_residual(alpha_0) if error_0 < error: alpha, B = alpha_0, B_0 residual, error = residual_0, error_0 break # Terminate if no appropriate step length was found. if error_0 >= error: if verbose: msg = ( "Failed to find appropriate step length at " "iteration {}. Current error {}." ) warnings.warn(msg.format(itr, error)) return B, alpha # Record the current error. all_error[itr] = error # Print iterative progress if the verbose flag is turned on. if verbose: update_msg = "Step {} Error {} Lambda {}" print(update_msg.format(itr, error, _lambda)) # Terminate if the tolerance is met. if error < tol: return B, alpha # Terminate if a stall is detected. if ( itr > 0 and all_error[itr - 1] - all_error[itr] < eps_stall * all_error[itr - 1] ): if verbose: msg = ( "Stall detected: error reduced by less than {} " "times the error at the previous step. " "Iteration {}. Current error {}." ) warnings.warn(msg.format(eps_stall, itr, error)) return B, alpha U, S, Vh = self._compute_irank_svd(Phi(alpha, t), tolrank) # Failed to meet tolerance in maxiter steps. if verbose: msg = ( "Failed to reach tolerance after maxiter = {} iterations. " "Current error {}." ) warnings.warn(msg.format(maxiter, error)) return B, alpha def _single_trial_compute_operator(self, H, t, init_alpha): """ Helper function that computes the standard optimized dmd operator. Returns the resulting DMD modes, eigenvalues, amplitudes, reduced system matrix, and full system matrix respectively. """ B, alpha = self._variable_projection( H, t, init_alpha, self._exp_function, self._exp_function_deriv ) # Save the modes, eigenvalues, and amplitudes respectively. w = B.T e = alpha b = np.sqrt(np.sum(np.abs(w) ** 2, axis=0)) # Normalize the modes and the amplitudes. inds_small = np.abs(b) < (10 * np.finfo(float).eps * np.max(b)) b[inds_small] = 1.0 w = w.dot(np.diag(1 / b)) w[:, inds_small] = 0.0 b[inds_small] = 0.0 # Compute the projected propagator Atilde. if self._use_proj: Atilde = np.linalg.multi_dot([w, np.diag(e), np.linalg.pinv(w)]) # Unproject the dmd modes. w = self._proj_basis.dot(w) else: w_proj = self._proj_basis.conj().T.dot(w) Atilde = np.linalg.multi_dot( [w_proj, np.diag(e), np.linalg.pinv(w_proj)] ) # Compute the full system matrix A. if self._compute_A: A = np.linalg.multi_dot([w, np.diag(e), np.linalg.pinv(w)]) else: A = None return w, e, b, Atilde, A def compute_operator(self, H, t): """ Compute the low-rank and the full BOP-DMD operators. :param H: Matrix of data to fit. :type H: numpy.ndarray :param t: Vector of sample times. :type t: numpy.ndarray :return: The BOP-DMD amplitudes. :rtype: numpy.ndarray """ # Perform an initial optimized dmd solve using init_alpha. w_0, e_0, b_0, Atilde_0, A_0 = self._single_trial_compute_operator( H, t, self._init_alpha ) # If num_trials isn't a positive int, perform standard optimized dmd. if self._num_trials <= 0 or not isinstance(self._num_trials, int): self._modes = w_0 self._eigenvalues = e_0 self._Atilde = Atilde_0 self._A = A_0 return b_0 # Perform BOP-DMD. # Initialize bagging result storage. all_w = np.empty((self._num_trials, *w_0.shape), dtype="complex") all_e = np.empty((self._num_trials, *e_0.shape), dtype="complex") all_b = np.empty((self._num_trials, *b_0.shape), dtype="complex") # Perform num_trials many trials of optimized dmd. for i in range(self._num_trials): H_i, subset_inds = self._bag(H, self._trial_size) w_i, e_i, b_i, _, _ = self._single_trial_compute_operator( H_i, t[subset_inds], e_0 ) # Set the sorting style if _eig_sort is "auto". if self._eig_sort == "auto": real_var = np.var(e_i.real) imag_var = np.var(e_i.imag) abs_var = np.var(np.abs(e_i)) all_var = [real_var, imag_var, abs_var] if np.argmax(all_var) == 0: self._eig_sort = "real" elif np.argmax(all_var) == 1: self._eig_sort = "imag" else: self._eig_sort = "abs" # Sort the results according to eigenvalue. if self._eig_sort == "real": sorted_inds = np.argsort(e_i) elif self._eig_sort == "imag": e_i_real_imag_swapped = e_i.imag + (1j * e_i.real) sorted_inds = np.argsort(e_i_real_imag_swapped) elif self._eig_sort == "abs": sorted_inds = np.argsort(np.abs(e_i)) else: raise ValueError("Provided eig_sort method is not supported.") all_w[i] = w_i[:, sorted_inds] all_e[i] = e_i[sorted_inds] all_b[i] = b_i[sorted_inds] # Compute and use the average optimized dmd results. self._modes = np.mean(all_w, axis=0) self._eigenvalues = np.mean(all_e, axis=0) # Compute Atilde using the average optimized dmd results. w_proj = self._proj_basis.conj().T.dot(self._modes) self._Atilde = np.linalg.multi_dot( [w_proj, np.diag(self._eigenvalues), np.linalg.pinv(w_proj)] ) # Compute A if requested. if self._compute_A: self._A = np.linalg.multi_dot( [ self._modes, np.diag(self._eigenvalues), np.linalg.pinv(self._modes), ] ) # Compute and save the standard deviation of the optimized dmd results. self._eigenvalues_std = np.std(all_e, axis=0) self._amplitudes_std = np.std(all_b, axis=0) return np.mean(all_b, axis=0) class BOPDMD(DMDBase): """ Bagging, Optimized Dynamic Mode Decomposition. :param svd_rank: The rank for the truncation; If 0, the method computes the optimal rank and uses it for truncation; if positive integer, the method uses the argument for the truncation; if float between 0 and 1, the rank is the number of the biggest singular values that are needed to reach the 'energy' specified by `svd_rank`; if -1, the method does not compute truncation. :type svd_rank: int or float :param compute_A: Flag that determines whether or not to compute the full Koopman operator A. Default is False, do not compute the full operator. Note that the full operator is potentially prohibitively expensive to compute. :type compute_A: bool :param use_proj: Flag that determines the type of computation to perform. If True, fit input data projected onto the first svd_rank POD modes or columns of proj_basis if provided. If False, fit the full input data. Default is True, fit projected data. :type use_proj: bool :param init_alpha: Initial guess for the continuous-time DMD eigenvalues. If not provided, one is computed via a trapezoidal rule approximation. Default is None (alpha not provided). :type init_alpha: numpy.ndarray :param proj_basis: Orthogonal basis for projection, where each column of proj_basis contains a basis mode. If not provided, POD modes are used. Default is None (basis not provided). :type proj_basis: numpy.ndarray :param num_trials: Number of BOP-DMD trials to perform. If num_trials is a positive integer, num_trials BOP-DMD trials are performed. Otherwise, standard optimized dmd is performed. Default is 0. :type num_trials: int :param trial_size: Size of the randomly selected subset of observations to use for each trial of bagged optimized dmd (BOP-DMD). If trial_size is a positive integer, trial_size many observations will be used per trial. If trial_size is a float between 0 and 1, int(trial_size * m) many observations will be used per trial, where m denotes the total number of data points observed. Note that any other type of input for trial_size will yield an error. Default is 0.2. :type trial_size: int or float :param eig_sort: Method used to sort eigenvalues (and modes accordingly) when performing BOP-DMD. Eigenvalues will be sorted by real part and then by imaginary part to break ties if `eig_sort="real"`, by imaginary part and then by real part to break ties if `eig_sort="imag"`, or by magnitude if `eig_sort="abs"`. If `eig_sort="auto"`, one of the previously-mentioned sorting methods is chosen depending on eigenvalue variance. Default is "auto". :type eig_sort: {"real", "imag", "abs", "auto"} :param varpro_opts_dict: Dictionary containing the desired parameter values for variable projection. The following parameters may be specified: `init_lambda`, `maxlam`, `lamup`, `use_levmarq`, `maxiter`, `tol`, `eps_stall`, `use_fulljac`, `verbose`. Default values will be used for any parameters not specified in `varpro_opts_dict`. See `BOPDMDOperator` documentation for default values and descriptions for each parameter. :type varpro_opts_dict: dict """ def __init__( self, svd_rank=0, compute_A=False, use_proj=True, init_alpha=None, proj_basis=None, num_trials=0, trial_size=0.2, eig_sort="auto", varpro_opts_dict=None, ): self._svd_rank = svd_rank self._compute_A = compute_A self._use_proj = use_proj self._init_alpha = init_alpha self._proj_basis = proj_basis self._num_trials = num_trials self._trial_size = trial_size self._eig_sort = eig_sort if varpro_opts_dict is None: self._varpro_opts_dict = {} elif not isinstance(varpro_opts_dict, dict): raise ValueError("varpro_opts_dict must be a dict.") else: self._varpro_opts_dict = varpro_opts_dict self._snapshots_holder = None self._time = None self._Atilde = None self._modes_activation_bitmask_proxy = None @property def svd_rank(self): """ :return: the rank used for the svd truncation. :rtype: int or float """ return self._svd_rank @property def compute_A(self): """ :return: flag that determines whether to compute the full operator A. :rtype: bool """ return self._compute_A @property def use_proj(self): """ :return: flag that determines whether to fit projected or full data. :rtype: bool """ return self._use_proj @property def init_alpha(self): """ :return: initial guess used for the continuous-time DMD eigenvalues. :rtype: numpy.ndarray """ if self._init_alpha is None: msg = ( "fit() hasn't been called " "and no initial value for alpha has been given." ) raise RuntimeError(msg) return self._init_alpha @property def proj_basis(self): """ :return: the projection basis used, with modes stored by column. :rtype: numpy.ndarray """ if self._proj_basis is None: msg = ( "fit() hasn't been called " "and no projection basis has been given." ) raise RuntimeError(msg) return self._proj_basis @property def num_trials(self): """ :return: the number of BOP-DMD trials to perform. :rtype: int """ return self._num_trials @property def trial_size(self): """ :return: size of the data subsets used during each BOP-DMD trial. :rtype: int or float """ return self._trial_size @property def time(self): """ Get the vector that contains the time points of the fitted snapshots. :return: the vector that contains the original time points. :rtype: numpy.ndarray """ if self._time is None: raise RuntimeError("fit() hasn't been called.") return self._time @property def atilde(self): """ Get the reduced Koopman operator A, called Atilde. :return: the reduced Koopman operator A. :rtype: numpy.ndarray """ return self.operator.as_numpy_array @property def A(self): """ Get the full Koopman operator A. :return: the full Koopman operator A. :rtype: numpy.ndarray """ return self.operator.A @property def dynamics(self): """ Get the time evolution of each mode. :return: matrix that contains all the time evolution, stored by row. :rtype: numpy.ndarray """ t_omega = np.exp(np.outer(self.eigs, self._time)) return np.diag(self.amplitudes).dot(t_omega) def print_varpro_opts(self): """ Prints a formatted information string that displays all chosen variable projection parameter values. """ if self._Atilde is None: raise ValueError("You need to call fit before") opt_names = [ "init_lambda", "maxlam", "lamup", "use_levmarq", "maxiter", "tol", "eps_stall", "use_fulljac", "verbose", ] print("VARIABLE PROJECTION OPTIONS:") print("============================") for name, value in zip(opt_names, self.operator.varpro_opts): if len(name) < 7: print(name + ":\t\t" + str(value)) else: print(name + ":\t" + str(value)) def _initialize_alpha(self): """ Uses projected trapezoidal rule to approximate the eigenvalues of A in z' = Az. The computed eigenvalues will serve as our initial guess for alpha. :return: Approximated eigenvalues of the matrix A. :rtype: numpy.ndarray """ # Project the snapshot data onto the projection basis. ux = self._proj_basis.conj().T.dot(self.snapshots) ux1 = ux[:, :-1] ux2 = ux[:, 1:] # Define the diagonal matrix T as the following. t1 = self._time[:-1] t2 = self._time[1:] T = np.diag(t2 - t1) # Define the matrices Y and Z as the following and compute the # rank-truncated SVD of Y. Y = (ux1 + ux2) / 2 Z = (ux2 - ux1).dot(np.linalg.inv(T))
U, s, V = compute_svd(Y, self._svd_rank)
2
2023-10-30 12:37:40+00:00
16k
lewandofskee/DiAD
ldm/models/diffusion/.ipynb_checkpoints/ddpm-checkpoint.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n # x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True,timesteps=1000):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n x_T=None,\n timesteps=1000,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose,timesteps=timesteps)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n timesteps=timesteps,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0])\n # subset_end = int(timesteps+1 * self.ddim_timesteps.shape[0] / self.ddpm_num_timesteps)\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % 500 == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" }, { "identifier": "cal_anomaly_map", "path": "utils/util.py", "snippet": "def cal_anomaly_map(fs_list, ft_list, out_size=224, amap_mode='mul'):\n if amap_mode == 'mul':\n anomaly_map = np.ones([out_size, out_size])\n else:\n anomaly_map = np.zeros([out_size, out_size])\n a_map_list = []\n for i in range(len(ft_list)):\n fs = fs_list[i]\n ft = ft_list[i]\n #fs_norm = F.normalize(fs, p=2)\n #ft_norm = F.normalize(ft, p=2)\n a_map = 1 - F.cosine_similarity(fs, ft)\n a_map = torch.unsqueeze(a_map, dim=1)\n a_map = F.interpolate(a_map, size=out_size, mode='bilinear', align_corners=True)\n a_map = a_map[0, 0, :, :].to('cpu').detach().numpy()\n a_map_list.append(a_map)\n if amap_mode == 'mul':\n anomaly_map *= a_map\n else:\n anomaly_map += a_map\n return anomaly_map, a_map_list" }, { "identifier": "log_local", "path": "utils/util.py", "snippet": "def log_local(images, filenames):\n pixel_mean = [0.485, 0.456, 0.406]\n pixel_std = [0.229, 0.224, 0.225]\n pixel_mean = torch.tensor(pixel_mean).cuda().unsqueeze(1).unsqueeze(1) # 3 x 1 x 1\n pixel_std = torch.tensor(pixel_std).cuda().unsqueeze(1).unsqueeze(1)\n root = os.path.join('log_image/')\n name = filenames[-7:-4]\n for k in images:\n image = (images[k].squeeze() * pixel_std + pixel_mean) * 255\n image = image.permute(1, 2, 0).to('cpu').numpy()\n filename = \"{}-{}.jpg\".format(name, k)\n path = os.path.join(root, filenames[:-7],filename)\n os.makedirs(os.path.split(path)[0], exist_ok=True)\n # Image.fromarray(image).save(path)\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n cv2.imwrite(path, image)" }, { "identifier": "create_logger", "path": "utils/util.py", "snippet": "def create_logger(name, log_file, level=logging.INFO):\n log = logging.getLogger(name)\n formatter = logging.Formatter(\n \"[%(asctime)s][%(filename)15s][line:%(lineno)4d][%(levelname)8s] %(message)s\"\n )\n fh = logging.FileHandler(log_file)\n fh.setFormatter(formatter)\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n log.setLevel(level)\n log.addHandler(fh)\n log.addHandler(sh)\n return log" }, { "identifier": "dump", "path": "utils/eval_helper.py", "snippet": "def dump(save_dir, outputs):\n filenames = outputs[\"filename\"]\n batch_size = len(filenames)\n preds = outputs[\"pred\"].cpu().numpy() # B x 1 x H x W\n masks = outputs[\"mask\"].cpu().numpy() # B x 1 x H x W\n # heights = outputs[\"height\"].cpu().numpy()\n # widths = outputs[\"width\"].cpu().numpy()\n clsnames = outputs[\"clsname\"]\n for i in range(batch_size):\n file_dir, filename = os.path.split(filenames[i])\n _, subname = os.path.split(file_dir)\n filename = \"{}_{}_{}\".format(clsnames[i], subname, filename)\n filename, _ = os.path.splitext(filename)\n save_file = os.path.join(save_dir, filename + \".npz\")\n np.savez(\n save_file,\n filename=filenames[i],\n pred=preds[i],\n mask=masks[i],\n # height=heights[i],\n # width=widths[i],\n clsname=clsnames[i],\n )" }, { "identifier": "log_metrics", "path": "utils/eval_helper.py", "snippet": "def log_metrics(ret_metrics, config):\n logger = logging.getLogger(\"global_logger\")\n clsnames = set([k.rsplit(\"_\", 2)[0] for k in ret_metrics.keys()])\n clsnames = list(clsnames - set([\"mean\"])) + [\"mean\"]\n\n # auc\n if config.get(\"auc\", None):\n auc_keys = [k for k in ret_metrics.keys() if \"auc\" in k]\n evalnames = list(set([k.rsplit(\"_\", 2)[1] for k in auc_keys]))\n record = Report([\"clsname\"] + evalnames)\n\n for clsname in clsnames:\n clsvalues = [\n ret_metrics[\"{}_{}_auc\".format(clsname, evalname)]\n for evalname in evalnames\n ]\n record.add_one_record([clsname] + clsvalues)\n\n logger.info(f\"\\n{record}\")" }, { "identifier": "merge_together", "path": "utils/eval_helper.py", "snippet": "def merge_together(save_dir):\n npz_file_list = glob.glob(os.path.join(save_dir, \"*.npz\"))\n fileinfos = []\n preds = []\n masks = []\n for npz_file in npz_file_list:\n npz = np.load(npz_file)\n fileinfos.append(\n {\n \"filename\": str(npz[\"filename\"]),\n # \"height\": npz[\"height\"],\n # \"width\": npz[\"width\"],\n \"clsname\": str(npz[\"clsname\"]),\n }\n )\n preds.append(npz[\"pred\"])\n masks.append(npz[\"mask\"])\n preds = np.concatenate(np.asarray(preds), axis=0) # N x H x W\n masks = np.concatenate(np.asarray(masks), axis=0) # N x H x W\n return fileinfos, preds, masks" }, { "identifier": "performances", "path": "utils/eval_helper.py", "snippet": "def performances(fileinfos, preds, masks, config):\n ret_metrics = {}\n clsnames = set([fileinfo[\"clsname\"] for fileinfo in fileinfos])\n for clsname in clsnames:\n preds_cls = []\n masks_cls = []\n file_cls = []\n for fileinfo, pred, mask in zip(fileinfos, preds, masks):\n if fileinfo[\"clsname\"] == clsname:\n preds_cls.append(pred[None, ...])\n masks_cls.append(mask[None, ...])\n file_cls.append(fileinfo['filename'])\n preds_cls = np.concatenate(np.asarray(preds_cls), axis=0) # N x H x W\n masks_cls = np.concatenate(np.asarray(masks_cls), axis=0) # N x H x W\n data_meta = EvalDataMeta(preds_cls, masks_cls, file_cls)\n\n # auc\n if config.get(\"auc\", None):\n for metric in config[\"auc\"]:\n evalname = metric[\"name\"]\n kwargs = metric.get(\"kwargs\", {})\n eval_method = eval_lookup_table[evalname](data_meta, **kwargs)\n auc = eval_method.eval_auc()\n ret_metrics[\"{}_{}_auc\".format(clsname, evalname)] = auc\n\n if config.get(\"auc\", None):\n for metric in config[\"auc\"]:\n evalname = metric[\"name\"]\n evalvalues = [\n ret_metrics[\"{}_{}_auc\".format(clsname, evalname)]\n for clsname in clsnames\n ]\n mean_auc = np.mean(np.array(evalvalues))\n ret_metrics[\"{}_{}_auc\".format(\"mean\", evalname)] = mean_auc\n\n return ret_metrics" } ]
import torch import os import logging import timm import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from scipy.ndimage import gaussian_filter from utils.util import cal_anomaly_map, log_local, create_logger from utils.eval_helper import dump, log_metrics, merge_together, performances
14,064
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] # x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): input_img = batch['jpg'] input_features = self.pretrained_model(input_img) output = self.log_images_test(batch) log_local(output, batch["filename"][0]) output_img = output['samples'] output_features = self.pretrained_model(output_img) input_features = input_features[1:4] output_features = output_features[1:4] anomaly_map, _ = cal_anomaly_map(input_features, output_features, input_img.shape[-1], amap_mode='a') anomaly_map = gaussian_filter(anomaly_map, sigma=5) anomaly_map = torch.from_numpy(anomaly_map) anomaly_map_prediction = anomaly_map.unsqueeze(dim=0).unsqueeze(dim=1) batch['pred'] = anomaly_map_prediction batch["output"] = output_img.cpu() batch["input"] = input_img.cpu() dump(self.evl_dir, batch) @torch.no_grad() def on_validation_epoch_start(self): self.evl_dir = "npz_result"
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] # x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): input_img = batch['jpg'] input_features = self.pretrained_model(input_img) output = self.log_images_test(batch) log_local(output, batch["filename"][0]) output_img = output['samples'] output_features = self.pretrained_model(output_img) input_features = input_features[1:4] output_features = output_features[1:4] anomaly_map, _ = cal_anomaly_map(input_features, output_features, input_img.shape[-1], amap_mode='a') anomaly_map = gaussian_filter(anomaly_map, sigma=5) anomaly_map = torch.from_numpy(anomaly_map) anomaly_map_prediction = anomaly_map.unsqueeze(dim=0).unsqueeze(dim=1) batch['pred'] = anomaly_map_prediction batch["output"] = output_img.cpu() batch["input"] = input_img.cpu() dump(self.evl_dir, batch) @torch.no_grad() def on_validation_epoch_start(self): self.evl_dir = "npz_result"
self.logger_val = create_logger("global_logger", "log/")
19
2023-10-30 14:21:09+00:00
16k
nv-tlabs/trace
tbsim/algos/algos.py
[ { "identifier": "batch_utils", "path": "tbsim/utils/batch_utils.py", "snippet": "def batch_utils():\n return trajdataBatchUtils()" }, { "identifier": "Action", "path": "tbsim/policies/common.py", "snippet": "class Action(Trajectory):\n pass" }, { "identifier": "DiffuserModel", "path": "tbsim/models/trace.py", "snippet": "class DiffuserModel(nn.Module):\n '''\n TRACE model.\n '''\n def __init__(\n self,\n map_encoder_model_arch: str,\n input_image_shape,\n map_feature_dim: int,\n map_grid_feature_dim: int,\n diffuser_model_arch: str,\n horizon: int,\n observation_dim: int, \n action_dim: int,\n output_dim: int,\n cond_feature_dim = 256,\n rasterized_map = True,\n use_map_feat_global = False,\n use_map_feat_grid = True,\n hist_num_frames = 31,\n hist_feature_dim = 128,\n n_timesteps=1000,\n loss_type='l2', \n action_weight=1.0, \n loss_discount=1.0, \n dim_mults=(1, 2, 4, 8),\n dynamics_type=None,\n dynamics_kwargs={},\n base_dim=32,\n diffuser_input_mode='state_and_action',\n use_conditioning=True,\n cond_fill_value=-1.0,\n # norm info is ([add_coeffs, div_coeffs])\n diffuser_norm_info=([-17.5, 0, 0, 0, 0, 0],[22.5, 10, 40, 3.14, 500, 31.4]),\n # if using non-rasterized histories, need these\n agent_hist_norm_info=([0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0]),\n neighbor_hist_norm_info=([0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0]),\n dt=0.1,\n ) -> None:\n\n super().__init__()\n\n # this applies to map and past NEIGHBOR conditioning only\n # curr state or past ego trajecotry are always given\n self.use_conditioning = use_conditioning\n # for test-time classifier-free guidance, if desired\n self.cond_fill_value = cond_fill_value \n\n self.rasterized_map = rasterized_map\n\n cond_in_feat_size = 0\n cond_out_feat_size = cond_feature_dim\n\n # history encoding\n self.agent_hist_encoder = self.neighbor_hist_encoder = None\n # ego history is ALWAYS used as conditioning\n self.agent_hist_encoder = AgentHistoryEncoder(hist_num_frames,\n out_dim=hist_feature_dim,\n use_norm=True,\n norm_info=agent_hist_norm_info)\n cond_in_feat_size += hist_feature_dim\n\n if self.use_conditioning:\n self.neighbor_hist_encoder = NeighborHistoryEncoder(hist_num_frames,\n out_dim=hist_feature_dim,\n use_norm=True,\n norm_info=neighbor_hist_norm_info)\n cond_in_feat_size += hist_feature_dim\n\n # map encoding\n self.map_encoder = None\n self.use_map_feat_global = use_map_feat_global\n self.use_map_feat_grid = use_map_feat_grid\n self.input_image_shape = input_image_shape\n if self.use_conditioning and self.rasterized_map:\n self.map_encoder = MapEncoder(\n model_arch=map_encoder_model_arch,\n input_image_shape=input_image_shape,\n global_feature_dim=map_feature_dim if self.use_map_feat_global else None,\n grid_feature_dim=map_grid_feature_dim if self.use_map_feat_grid else None,\n )\n\n if self.use_map_feat_global:\n cond_in_feat_size += map_feature_dim\n\n # MLP to combine conditioning from all sources\n combine_layer_dims = (cond_in_feat_size, cond_in_feat_size, cond_out_feat_size, cond_out_feat_size)\n self.process_cond_mlp = base_models.MLP(cond_in_feat_size,\n cond_out_feat_size,\n combine_layer_dims,\n normalization=True)\n\n self._dynamics_type = dynamics_type\n self._dynamics_kwargs = dynamics_kwargs\n self._create_dynamics()\n \n # ----- diffuser -----\n self.dt = dt\n # x, y, vel, yaw, acc, yawvel\n assert len(diffuser_norm_info) == 2\n norm_add_coeffs = diffuser_norm_info[0]\n norm_div_coeffs = diffuser_norm_info[1]\n assert len(norm_add_coeffs) == 6\n assert len(norm_div_coeffs) == 6\n self.add_coeffs = np.array(norm_add_coeffs).astype('float32')\n self.div_coeffs = np.array(norm_div_coeffs).astype('float32')\n \n self.diffuser_input_mode = diffuser_input_mode\n\n if diffuser_input_mode == 'state_and_action':\n self.default_chosen_inds = [0, 1, 2, 3, 4, 5]\n else:\n raise\n \n self.horizon = horizon\n \n self.observation_dim = observation_dim\n self.action_dim = action_dim\n self.transition_dim = observation_dim + action_dim\n self.output_dim = output_dim\n \n if diffuser_model_arch == \"TemporalMapUnet\":\n transition_in_dim = self.transition_dim\n if self.use_map_feat_grid and self.map_encoder is not None:\n # will be appending map features to each step of trajectory\n transition_in_dim += map_grid_feature_dim\n self.model = TemporalMapUnet(horizon=horizon,\n transition_dim=transition_in_dim,\n cond_dim=cond_out_feat_size,\n output_dim=self.output_dim,\n dim=base_dim,\n dim_mults=dim_mults,\n )\n else:\n print('unknown diffuser_model_arch:', diffuser_model_arch)\n raise\n\n betas = cosine_beta_schedule(n_timesteps)\n alphas = 1. - betas\n alphas_cumprod = torch.cumprod(alphas, axis=0)\n alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]])\n\n self.n_timesteps = int(n_timesteps)\n\n self.register_buffer('betas', betas)\n self.register_buffer('alphas_cumprod', alphas_cumprod)\n self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))\n self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))\n self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))\n\n # calculations for posterior q(x_{t-1} | x_t, x_0)\n posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)\n self.register_buffer('posterior_variance', posterior_variance)\n\n # calculations for class-free guidance\n self.sqrt_alphas_over_one_minus_alphas_cumprod = torch.sqrt(alphas_cumprod / (1.0 - alphas_cumprod))\n self.sqrt_recip_one_minus_alphas_cumprod = 1.0 / torch.sqrt(1. - alphas_cumprod)\n\n ## log calculation clipped because the posterior variance\n ## is 0 at the beginning of the diffusion chain\n self.register_buffer('posterior_log_variance_clipped',\n torch.log(torch.clamp(posterior_variance, min=1e-20)))\n self.register_buffer('posterior_mean_coef1',\n betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))\n self.register_buffer('posterior_mean_coef2',\n (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))\n\n ## get loss coefficients and initialize objective\n loss_weights = self.get_loss_weights(action_weight, loss_discount)\n self.loss_fn = Losses[loss_type](loss_weights, self.action_dim)\n\n # for guided sampling\n self.current_guidance = None\n\n #------------------------------------------ guidance utils ------------------------------------------#\n\n def set_guidance(self, guidance_config_list, example_batch=None):\n '''\n Instantiates test-time guidance functions using the list of configs (dicts) passed in.\n '''\n if guidance_config_list is not None:\n if len(guidance_config_list) > 0 and verify_guidance_config_list(guidance_config_list):\n print('Instantiating test-time guidance with configs:')\n print(guidance_config_list)\n self.current_guidance = DiffuserGuidance(guidance_config_list, example_batch)\n\n def update_guidance(self, **kwargs):\n if self.current_guidance is not None:\n self.current_guidance.update(**kwargs)\n\n def clear_guidance(self):\n self.current_guidance = None\n\n #------------------------------------------ utility ------------------------------------------#\n def _create_dynamics(self):\n if self._dynamics_type in [\"Unicycle\", dynamics.DynType.UNICYCLE]:\n self.dyn = dynamics.Unicycle(\n \"dynamics\",\n max_steer=self._dynamics_kwargs[\"max_steer\"],\n max_yawvel=self._dynamics_kwargs[\"max_yawvel\"],\n acce_bound=self._dynamics_kwargs[\"acce_bound\"]\n )\n else:\n self.dyn = None\n\n def get_aux_info(self, data_batch, include_class_free_cond=False):\n N = data_batch[\"history_positions\"].size(0)\n device = data_batch[\"history_positions\"].device\n\n cond_feat_in = torch.empty((N,0)).to(device)\n non_cond_feat_in = torch.empty((N,0)).to(device)\n\n #\n # current ego state\n #\n # always need this for rolling out actions\n if self._dynamics_type is not None:\n curr_states = batch_utils().get_current_states(data_batch, dyn_type=self.dyn.type())\n else:\n curr_states = None\n\n #\n # rasterized map\n #\n map_grid_feat = map_grid_feat_non_cond = raster_from_agent = None\n if self.map_encoder is not None:\n image_batch = data_batch[\"image\"]\n map_global_feat, map_grid_feat = self.map_encoder(image_batch)\n if self.use_map_feat_global:\n cond_feat_in = torch.cat([cond_feat_in, map_global_feat], dim=-1)\n if self.use_map_feat_grid and self.map_encoder is not None:\n raster_from_agent = data_batch[\"raster_from_agent\"]\n\n if include_class_free_cond:\n image_non_cond = torch.ones_like(image_batch) * self.cond_fill_value\n map_global_feat_non_cond, map_grid_feat_non_cond = self.map_encoder(image_non_cond)\n if self.use_map_feat_global:\n non_cond_feat_in = torch.cat([non_cond_feat_in, map_global_feat_non_cond], dim=-1)\n\n #\n # ego history\n #\n if self.agent_hist_encoder is not None:\n agent_hist_feat = self.agent_hist_encoder(data_batch[\"history_positions\"],\n data_batch[\"history_yaws\"],\n data_batch[\"history_speeds\"],\n data_batch[\"extent\"],\n data_batch[\"history_availabilities\"])\n cond_feat_in = torch.cat([cond_feat_in, agent_hist_feat], dim=-1)\n if include_class_free_cond:\n # make all agents zero availability\n non_cond_avail = torch.zeros_like(data_batch[\"history_speeds\"]).to(torch.bool) # BxT\n agent_hist_feat_non_cond = self.agent_hist_encoder(data_batch[\"history_positions\"],\n data_batch[\"history_yaws\"],\n data_batch[\"history_speeds\"],\n data_batch[\"extent\"],\n non_cond_avail)\n non_cond_feat_in = torch.cat([non_cond_feat_in, agent_hist_feat_non_cond], dim=-1)\n\n #\n # neighbor history\n #\n\n # neighbor trajectory encoding\n if self.neighbor_hist_encoder is not None:\n neighbor_hist_feat = self.neighbor_hist_encoder(data_batch[\"all_other_agents_history_positions\"],\n data_batch[\"all_other_agents_history_yaws\"],\n data_batch[\"all_other_agents_history_speeds\"],\n data_batch[\"all_other_agents_extents\"],\n data_batch[\"all_other_agents_history_availabilities\"])\n cond_feat_in = torch.cat([cond_feat_in, neighbor_hist_feat], dim=-1) \n if include_class_free_cond:\n # make all agents zero availability\n non_cond_neighbor_avail = torch.zeros_like(data_batch[\"all_other_agents_history_speeds\"]).to(torch.bool) # BxNxT\n neighbor_hist_feat_non_cond = self.neighbor_hist_encoder(data_batch[\"all_other_agents_history_positions\"],\n data_batch[\"all_other_agents_history_yaws\"],\n data_batch[\"all_other_agents_history_speeds\"],\n data_batch[\"all_other_agents_extents\"],\n non_cond_neighbor_avail)\n non_cond_feat_in = torch.cat([non_cond_feat_in, neighbor_hist_feat_non_cond], dim=-1)\n\n #\n # Process all features together\n #\n cond_feat = self.process_cond_mlp(cond_feat_in)\n non_cond_feat = None\n if include_class_free_cond:\n non_cond_feat = self.process_cond_mlp(non_cond_feat_in)\n\n aux_info = {\n 'cond_feat': cond_feat, \n 'curr_states': curr_states,\n }\n if include_class_free_cond:\n aux_info['non_cond_feat'] = non_cond_feat\n if self.use_map_feat_grid and self.map_encoder is not None:\n aux_info['map_grid_feat'] = map_grid_feat\n if include_class_free_cond:\n aux_info['map_grid_feat_non_cond'] = map_grid_feat_non_cond\n aux_info['raster_from_agent'] = raster_from_agent\n\n return aux_info\n\n def query_map_feats(self, x, map_grid_feat, raster_from_agent):\n '''\n - x : (B, T, D)\n - map_grid_feat : (B, C, H, W)\n - raster_from_agent: (B, 3, 3)\n '''\n B, T, _ = x.size()\n _, C, Hfeat, Wfeat = map_grid_feat.size()\n\n # unscale to agent coords\n pos_traj = self.descale_traj(x.detach())[:,:,:2]\n # convert to raster frame\n raster_pos_traj = transform_points_tensor(pos_traj, raster_from_agent)\n\n # scale to the feature map size\n _, H, W = self.input_image_shape\n xscale = Wfeat / W\n yscale = Hfeat / H\n raster_pos_traj[:,:,0] = raster_pos_traj[:,:,0] * xscale\n raster_pos_traj[:,:,1] = raster_pos_traj[:,:,1] * yscale\n\n # interpolate into feature grid\n feats_out = query_feature_grid(\n raster_pos_traj,\n map_grid_feat\n )\n feats_out = feats_out.reshape((B, T, -1))\n return feats_out\n\n def get_state_and_action_from_data_batch(self, data_batch, chosen_inds=[]):\n '''\n Extract state and(or) action from the data_batch from data_batch\n\n Input:\n data_batch: dict\n Output:\n x: (batch_size, num_steps, len(chosen_inds)).\n '''\n if len(chosen_inds) == 0:\n chosen_inds = self.default_chosen_inds\n\n # NOTE: for predicted agent, history and future with always be fully available\n traj_state = torch.cat(\n (data_batch[\"target_positions\"], data_batch[\"target_yaws\"]), dim=2)\n\n traj_state_and_action = convert_state_to_state_and_action(traj_state, data_batch[\"curr_speed\"], self.dt)\n\n return traj_state_and_action[..., chosen_inds]\n \n def convert_action_to_state_and_action(self, x_out, aux_info, scaled_input=True, descaled_output=False):\n '''\n Apply dynamics on input action trajectory to get state+action trajectory\n Input:\n x_out: (batch_size, num_steps, 2). scaled action trajectory\n Output:\n x_out: (batch_size, num_steps, 6). scaled state+action trajectory\n '''\n if scaled_input:\n x_out = self.descale_traj(x_out, [4, 5])\n \n x_out_state = unicyle_forward_dynamics(\n dyn_model=self.dyn,\n initial_states=aux_info['curr_states'],\n actions=x_out,\n step_time=self.dt,\n mode='parallel'\n )\n\n x_out_all = torch.cat([x_out_state, x_out], dim=-1)\n if scaled_input and not descaled_output:\n x_out_all = self.scale_traj(x_out_all, [0, 1, 2, 3, 4, 5])\n\n return x_out_all\n\n def scale_traj(self, target_traj_orig, chosen_inds=[]):\n '''\n - traj: B x T x D\n '''\n if len(chosen_inds) == 0:\n chosen_inds = self.default_chosen_inds\n add_coeffs = self.add_coeffs[chosen_inds][None,None] \n div_coeffs = self.div_coeffs[chosen_inds][None,None] \n\n device = target_traj_orig.get_device()\n dx_add = torch.tensor(add_coeffs, device=device)\n dx_div = torch.tensor(div_coeffs, device=device)\n\n target_traj = (target_traj_orig + dx_add) / dx_div\n\n return target_traj\n\n def descale_traj(self, target_traj_orig, chosen_inds=[]):\n '''\n - traj: B x T x D\n '''\n if len(chosen_inds) == 0:\n chosen_inds = self.default_chosen_inds\n add_coeffs = self.add_coeffs[chosen_inds][None,None] \n div_coeffs = self.div_coeffs[chosen_inds][None,None] \n\n device = target_traj_orig.get_device()\n dx_add = torch.tensor(add_coeffs, device=device)\n dx_div = torch.tensor(div_coeffs, device=device) \n\n target_traj = target_traj_orig * dx_div - dx_add\n\n return target_traj\n\n \n def forward(self, data_batch: Dict[str, torch.Tensor], num_samp=1,\n return_diffusion=False,\n return_guidance_losses=False,\n class_free_guide_w=0.0,\n apply_guidance=True,\n guide_clean=False) -> Dict[str, torch.Tensor]:\n use_class_free_guide = class_free_guide_w != 0.0\n aux_info = self.get_aux_info(data_batch, use_class_free_guide)\n \n cond_samp_out = self.conditional_sample(data_batch, \n horizon=None,\n aux_info=aux_info,\n return_diffusion=return_diffusion,\n return_guidance_losses=return_guidance_losses,\n num_samp=num_samp,\n class_free_guide_w=class_free_guide_w,\n apply_guidance=apply_guidance,\n guide_clean=guide_clean)\n traj_init = cond_samp_out['pred_traj']\n diff_init = guide_losses = None\n if return_diffusion:\n diff_init = cond_samp_out['diffusion']\n if return_guidance_losses:\n guide_losses = cond_samp_out['guide_losses']\n\n traj = self.descale_traj(traj_init)\n if diff_init is not None:\n diff_steps = self.descale_traj(diff_init)\n else:\n diff_steps = None\n\n if self.diffuser_input_mode in ['state_and_action']:\n traj = traj[..., [0, 1, 3]]\n else:\n raise\n\n pred_positions = traj[..., :2]\n pred_yaws = traj[..., 2:3]\n\n out_dict = {\n \"trajectories\": traj,\n \"predictions\": {\"positions\": pred_positions, \"yaws\": pred_yaws},\n }\n if diff_steps is not None:\n out_dict[\"predictions\"][\"diffusion_steps\"] = diff_steps\n if guide_losses is not None:\n out_dict[\"predictions\"][\"guide_losses\"] = guide_losses\n if self.dyn is not None:\n out_dict[\"curr_states\"] = aux_info['curr_states']\n\n return out_dict\n\n def compute_losses(self, data_batch):\n aux_info = self.get_aux_info(data_batch)\n target_traj = self.get_state_and_action_from_data_batch(data_batch) \n\n x = self.scale_traj(target_traj)\n \n diffusion_loss, _ = self.loss(x, aux_info=aux_info)\n losses = OrderedDict(\n diffusion_loss=diffusion_loss,\n )\n return losses\n\n def get_loss_weights(self, action_weight, discount):\n '''\n sets loss coefficients for trajectory\n\n action_weight : float\n coefficient on first action loss\n discount : float\n multiplies t^th timestep of trajectory loss by discount**t\n '''\n self.action_weight = action_weight\n\n dim_weights = torch.ones(self.transition_dim, dtype=torch.float32)\n\n ## decay loss with trajectory timestep: discount**t\n discounts = discount ** torch.arange(self.horizon, dtype=torch.float)\n discounts = discounts / discounts.mean()\n loss_weights = torch.einsum('h,t->ht', discounts, dim_weights)\n ## manually set a0 weight\n loss_weights[0, -self.action_dim:] = action_weight\n\n return loss_weights\n\n #------------------------------------------ sampling ------------------------------------------#\n def predict_start_from_noise(self, x_t, t, noise, force_noise=False):\n if force_noise:\n return (\n extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -\n extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise\n )\n else:\n return noise\n\n def predict_noise_from_start(self, x_t, t, x_start):\n return (\n extract(self.sqrt_recip_one_minus_alphas_cumprod.to(x_t.device), t, x_t.shape) * x_t -\n extract(self.sqrt_alphas_over_one_minus_alphas_cumprod.to(x_t.device), t, x_t.shape) * x_start\n )\n\n def q_posterior(self, x_start, x_t, t):\n posterior_mean = (\n extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +\n extract(self.posterior_mean_coef2, t, x_t.shape) * x_t\n )\n posterior_variance = extract(self.posterior_variance, t, x_t.shape)\n posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def p_mean_variance(self, x, t, aux_info={}, class_free_guide_w=0.0):\n t_inp = t\n\n x_model_in = x\n if self.use_map_feat_grid and self.map_encoder is not None:\n # get features from map and append to the trajectory\n map_feat_traj = self.query_map_feats(x_model_in.detach(),\n aux_info['map_grid_feat'],\n aux_info['raster_from_agent'])\n x_model_in = torch.cat([x_model_in, map_feat_traj], dim=-1)\n\n model_prediction = self.model(x_model_in, aux_info['cond_feat'], t_inp)\n\n if self.diffuser_input_mode == 'state_and_action':\n x_tmp = x[..., 4:].detach()\n else:\n raise\n\n if class_free_guide_w != 0.0:\n # now run non-cond once\n x_model_non_cond_in = x\n if self.use_map_feat_grid and self.map_encoder is not None:\n # get features from map and append to the trajectory\n map_feat_traj = self.query_map_feats(x_model_non_cond_in.detach(),\n aux_info['map_grid_feat_non_cond'],\n aux_info['raster_from_agent'])\n x_model_non_cond_in = torch.cat([x_model_non_cond_in, map_feat_traj], dim=-1)\n model_non_cond_prediction = self.model(x_model_non_cond_in, aux_info['non_cond_feat'], t_inp)\n\n # and combine to get actual model prediction (in noise space as in original paper)\n model_pred_noise = self.predict_noise_from_start(x_tmp, t=t, x_start=model_prediction)\n model_non_cond_pred_noise = self.predict_noise_from_start(x_tmp, t=t, x_start=model_non_cond_prediction)\n\n class_free_guide_noise = (1 + class_free_guide_w)*model_pred_noise - class_free_guide_w*model_non_cond_pred_noise\n\n model_prediction = self.predict_start_from_noise(x_tmp, t=t, noise=class_free_guide_noise, force_noise=True)\n\n x_recon = self.predict_start_from_noise(x_tmp, t=t, noise=model_prediction)\n \n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(\n x_start=x_recon, x_t=x_tmp, t=t)\n return model_mean, posterior_variance, posterior_log_variance, (x_recon, x_tmp, t)\n\n def guidance(self, x, data_batch, aux_info, num_samp=1,\n return_grad_of=None):\n '''\n estimate the gradient of rule reward w.r.t. the input trajectory\n Input:\n x: [batch_size*num_samp, time_steps, feature_dim]. scaled input trajectory.\n data_batch: additional info.\n aux_info: additional info.\n return_grad_of: which variable to take gradient of guidance loss wrt, if not given,\n takes wrt the input x.\n '''\n assert self.current_guidance is not None, 'Must instantiate guidance object before calling'\n bsize = int(x.size(0) / num_samp)\n num_t = x.size(1)\n with torch.enable_grad():\n # losses are applied on unscaled trajectories containing both states and actions\n if self.diffuser_input_mode in ['state_and_action']:\n # forward dynamics to get actions\n x_all = self.convert_action_to_state_and_action(x, aux_info, scaled_input=True, descaled_output=True)\n else:\n raise\n\n # compute losses and gradient\n x_loss = x_all.reshape((bsize, num_samp, num_t, 6))\n tot_loss, per_losses = self.current_guidance.compute_guidance_loss(x_loss, data_batch)\n # print(tot_loss)\n tot_loss.backward()\n guide_grad = x.grad if return_grad_of is None else return_grad_of.grad\n\n return guide_grad, per_losses\n\n @torch.no_grad()\n def p_sample(self, x, t, data_batch, aux_info={}, num_samp=1, class_free_guide_w=0.0, \n apply_guidance=True, guide_clean=False, eval_final_guide_loss=False):\n b, *_, device = *x.shape, x.device\n with_func = torch.no_grad\n if self.current_guidance is not None and apply_guidance and guide_clean:\n # will need to take grad wrt noisy input\n x = x.detach()\n x.requires_grad_()\n with_func = torch.enable_grad\n\n with with_func():\n # get prior mean and variance for next step\n model_mean, _, model_log_variance, q_posterior_in = self.p_mean_variance(x=x, t=t, aux_info=aux_info,\n class_free_guide_w=class_free_guide_w)\n\n # no noise or guidance when t == 0\n # i.e. use the mean of the distribution predicted at the final step rather than sampling.\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n noise = torch.randn_like(model_mean)\n sigma = (0.5 * model_log_variance).exp()\n\n # compute guidance\n guide_losses = None\n guide_grad = torch.zeros_like(model_mean)\n if self.current_guidance is not None and apply_guidance:\n if guide_clean:\n # want to guide the predicted clean traj from model, not the noisy one\n model_clean_pred = q_posterior_in[0]\n x_guidance = model_clean_pred\n return_grad_of = x\n else:\n x_guidance = model_mean.clone().detach()\n return_grad_of = x_guidance\n x_guidance.requires_grad_()\n\n guide_grad, guide_losses = self.guidance(x_guidance, data_batch, aux_info, num_samp=num_samp, return_grad_of=return_grad_of)\n\n if guide_clean and self.diffuser_input_mode == 'state_and_action':\n # only need the grad w.r.t noisy action\n guide_grad = guide_grad[..., [4,5]]\n\n # NOTE: empirally, scaling by the variance (sigma) seems to degrade results\n guide_grad = nonzero_mask * guide_grad #* sigma\n\n noise = nonzero_mask * sigma * noise\n\n if self.current_guidance is not None and guide_clean:\n # perturb clean trajectory\n guided_clean = q_posterior_in[0] - guide_grad\n # use the same noisy input again\n guided_x_t = q_posterior_in[1]\n # re-compute next step distribution with guided clean & noisy trajectories\n model_mean, _, _ = self.q_posterior(x_start=guided_clean,\n x_t=guided_x_t,\n t=q_posterior_in[2])\n # NOTE: variance is not dependent on x_start, so it won't change. Therefore, fine to use same noise.\n x_out = model_mean + noise\n else:\n x_out = model_mean - guide_grad + noise\n\n if self.current_guidance is not None and eval_final_guide_loss:\n # eval guidance loss one last time for filtering if desired\n # (even if not applied during sampling)\n _, guide_losses = self.guidance(x_out.clone().detach().requires_grad_(), data_batch, aux_info, num_samp=num_samp)\n \n # convert action to state+action\n if self.diffuser_input_mode == 'state_and_action':\n x_out = self.convert_action_to_state_and_action(x_out, aux_info)\n \n return x_out, guide_losses\n\n \n @torch.no_grad()\n def p_sample_loop(self, shape, data_batch, num_samp,\n aux_info={},\n return_diffusion=False,\n return_guidance_losses=False,\n class_free_guide_w=0.0,\n apply_guidance=True,\n guide_clean=False):\n device = self.betas.device\n\n batch_size = shape[0]\n # sample from base distribution\n x = torch.randn(shape, device=device) # (B, N, T, D)\n\n x = TensorUtils.join_dimensions(x, begin_axis=0, end_axis=2) # B*N, T, D\n aux_info = TensorUtils.repeat_by_expand_at(aux_info, repeats=num_samp, dim=0)\n\n if self.current_guidance is not None and not apply_guidance:\n print('DIFFUSER: Note, not using guidance during sampling, only evaluating guidance loss at very end...')\n\n # convert action to state+action\n if self.diffuser_input_mode == 'state_and_action':\n x = self.convert_action_to_state_and_action(x[..., [4, 5]], aux_info)\n\n if return_diffusion: diffusion = [x]\n\n stride = 1 # NOTE: different from training time if > 1\n steps = [i for i in reversed(range(0, self.n_timesteps, stride))]\n for i in steps:\n timesteps = torch.full((batch_size*num_samp,), i, device=device, dtype=torch.long)\n \n x, guide_losses = self.p_sample(x, timesteps, data_batch,\n aux_info=aux_info,\n num_samp=num_samp,\n class_free_guide_w=class_free_guide_w,\n apply_guidance=apply_guidance,\n guide_clean=guide_clean,\n eval_final_guide_loss=(i == steps[-1]))\n \n\n if return_diffusion: diffusion.append(x)\n\n if guide_losses is not None:\n print('===== GUIDANCE LOSSES ======')\n for k,v in guide_losses.items():\n print('%s: %.012f' % (k, np.nanmean(v.cpu())))\n\n x = TensorUtils.reshape_dimensions(x, begin_axis=0, end_axis=1, target_dims=(batch_size, num_samp))\n\n out_dict = {'pred_traj' : x}\n if return_guidance_losses:\n out_dict['guide_losses'] = guide_losses\n if return_diffusion:\n diffusion = [TensorUtils.reshape_dimensions(cur_diff, begin_axis=0, end_axis=1, target_dims=(batch_size, num_samp))\n for cur_diff in diffusion]\n out_dict['diffusion'] = torch.stack(diffusion, dim=3)\n\n return out_dict\n\n @torch.no_grad()\n def conditional_sample(self, data_batch, horizon=None, num_samp=1, class_free_guide_w=0.0, **kwargs):\n batch_size = data_batch['history_positions'].size()[0]\n horizon = horizon or self.horizon\n shape = (batch_size, num_samp, horizon, self.transition_dim)\n\n return self.p_sample_loop(shape, data_batch, num_samp, class_free_guide_w=class_free_guide_w, **kwargs)\n\n #------------------------------------------ training ------------------------------------------#\n\n def q_sample(self, x_start, t, noise): \n sample = (\n extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +\n extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise\n )\n return sample\n\n def p_losses(self, x_start_init, t, aux_info={}):\n noise_init = torch.randn_like(x_start_init)\n\n x_start = x_start_init\n noise = noise_init\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n t_inp = t\n\n if self.diffuser_input_mode == 'state_and_action':\n x_action_noisy = x_noisy[..., [4, 5]]\n x_noisy = self.convert_action_to_state_and_action(x_action_noisy, aux_info)\n\n if self.use_map_feat_grid and self.map_encoder is not None:\n # get features from map and append to the trajectory\n map_feat_traj = self.query_map_feats(x_noisy.detach(),\n aux_info['map_grid_feat'],\n aux_info['raster_from_agent'])\n x_noisy = torch.cat([x_noisy, map_feat_traj], dim=-1)\n\n noise = self.model(x_noisy, aux_info['cond_feat'], t_inp)\n\n if self.diffuser_input_mode == 'state_and_action':\n x_recon_action = self.predict_start_from_noise(x_action_noisy, t=t, noise=noise)\n x_recon = self.convert_action_to_state_and_action(x_recon_action, aux_info)\n else:\n x_recon = self.predict_start_from_noise(x_noisy, t=t, noise=noise)\n\n # Note: we convert noise into x_start for loss estimation since we need to apply forward dynamics\n loss, info = self.loss_fn(x_recon, x_start)\n\n return loss, info\n\n def loss(self, x, aux_info={}):\n batch_size = len(x)\n t = torch.randint(0, self.n_timesteps, (batch_size,), device=x.device).long()\n \n return self.p_losses(x, t, aux_info=aux_info)" }, { "identifier": "EMA", "path": "tbsim/models/trace_helpers.py", "snippet": "class EMA():\n '''\n empirical moving average\n '''\n def __init__(self, beta):\n super().__init__()\n self.beta = beta\n\n def update_model_average(self, ma_model, current_model):\n with torch.no_grad():\n ema_state_dict = ma_model.state_dict()\n for key, value in current_model.state_dict().items():\n ema_value = ema_state_dict[key]\n ema_value.copy_(self.beta * ema_value + (1. - self.beta) * value)" }, { "identifier": "choose_action_from_guidance", "path": "tbsim/utils/guidance_loss.py", "snippet": "def choose_action_from_guidance(preds, obs_dict, guide_configs, guide_losses):\n B, N, T, _ = preds[\"positions\"].size()\n # arbitrarily use the first sample as the action if no guidance given\n act_idx = torch.zeros((B), dtype=torch.long, device=preds[\"positions\"].device)\n # choose sample closest to desired guidance\n accum_guide_loss = torch.stack([v for k,v in guide_losses.items()], dim=2)\n # each scene separately since may contain different guidance\n scount = 0\n for sidx in range(len(guide_configs)):\n scene_guide_cfg = guide_configs[sidx]\n ends = scount + len(scene_guide_cfg)\n scene_guide_loss = accum_guide_loss[..., scount:ends]\n scount = ends\n scene_mask = ~torch.isnan(torch.sum(scene_guide_loss, dim=[1,2]))\n scene_guide_loss = scene_guide_loss[scene_mask].cpu()\n scene_guide_loss = torch.nansum(scene_guide_loss, dim=-1)\n is_scene_level = np.array([guide_cfg.name in ['agent_collision', 'social_group'] for guide_cfg in scene_guide_cfg])\n if np.sum(is_scene_level) > 0: \n # choose which sample minimizes at the scene level (where each sample is a \"scene\")\n scene_act_idx = torch.argmin(torch.sum(scene_guide_loss, dim=0))\n else:\n # each agent can choose the sample that minimizes guidance loss independently\n scene_act_idx = torch.argmin(scene_guide_loss, dim=-1)\n\n act_idx[scene_mask] = scene_act_idx.to(act_idx.device)\n\n return act_idx" }, { "identifier": "choose_action_from_gt", "path": "tbsim/utils/guidance_loss.py", "snippet": "def choose_action_from_gt(preds, obs_dict):\n B, N, T, _ = preds[\"positions\"].size()\n # arbitrarily use the first sample as the action if no gt given\n act_idx = torch.zeros((B), dtype=torch.long, device=preds[\"positions\"].device)\n if \"target_positions\" in obs_dict:\n print(\"DIFFUSER: WARNING using sample closest to GT from diffusion model!\")\n # use the sample closest to GT\n # pred and gt may not be the same if gt is missing data at the end\n endT = min(T, obs_dict[\"target_positions\"].size(1))\n pred_pos = preds[\"positions\"][:,:,:endT]\n gt_pos = obs_dict[\"target_positions\"][:,:endT].unsqueeze(1)\n gt_valid = obs_dict[\"target_availabilities\"][:,:endT].unsqueeze(1).expand((B, N, endT))\n err = torch.norm(pred_pos - gt_pos, dim=-1)\n err[~gt_valid] = torch.nan # so doesn't affect\n ade = torch.nanmean(err, dim=-1) # B x N\n res_valid = torch.sum(torch.isnan(ade), dim=-1) == 0\n if torch.sum(res_valid) > 0:\n min_ade_idx = torch.argmin(ade, dim=-1)\n act_idx[res_valid] = min_ade_idx[res_valid]\n else:\n print('Could not choose sample based on GT, as no GT in data')\n\n return act_idx" } ]
import numpy as np import copy import torch import torch.nn as nn import torch.optim as optim import pytorch_lightning as pl import torch.nn.functional as F import tbsim.utils.tensor_utils as TensorUtils import tbsim.utils.metrics as Metrics from tbsim.utils.batch_utils import batch_utils from tbsim.policies.common import Action from tbsim.models.trace import DiffuserModel from tbsim.models.trace_helpers import EMA from tbsim.utils.guidance_loss import choose_action_from_guidance, choose_action_from_gt
12,314
batch = batch_utils().parse_batch(batch) # drop out conditioning if desired if self.use_cond: if self.use_rasterized_map: num_sem_layers = batch['maps'].size(1) if self.cond_drop_map_p > 0: drop_mask = torch.rand((batch["image"].size(0))) < self.cond_drop_map_p # only fill the last num_sem_layers as these correspond to semantic map batch["image"][drop_mask, -num_sem_layers:] = self.cond_fill_val if self.cond_drop_neighbor_p > 0: # drop actual neighbor trajectories instead # set availability to False, will be zeroed out in model B = batch["all_other_agents_history_availabilities"].size(0) drop_mask = torch.rand((B)) < self.cond_drop_neighbor_p batch["all_other_agents_history_availabilities"][drop_mask] = 0 # diffuser only take the data to estimate loss losses = self.nets["policy"].compute_losses(batch) total_loss = 0.0 for lk, l in losses.items(): losses[lk] = l * self.algo_config.loss_weights[lk] total_loss += losses[lk] for lk, l in losses.items(): self.log("train/losses_" + lk, l) return { "loss": total_loss, "all_losses": losses, } def validation_step(self, batch, batch_idx): cur_policy = self.nets["policy"] batch = batch_utils().parse_batch(batch) losses = TensorUtils.detach(cur_policy.compute_losses(batch)) pout = cur_policy(batch, num_samp=self.algo_config.diffuser.num_eval_samples, return_diffusion=False, return_guidance_losses=False) metrics = self._compute_metrics(pout, batch) return_dict = {"losses": losses, "metrics": metrics} # use EMA for val if self.use_ema: cur_policy = self.ema_policy ema_losses = TensorUtils.detach(cur_policy.compute_losses(batch)) pout = cur_policy(batch, num_samp=self.algo_config.diffuser.num_eval_samples, return_diffusion=False, return_guidance_losses=False) ema_metrics = self._compute_metrics(pout, batch) return_dict["ema_losses"] = ema_losses return_dict["ema_metrics"] = ema_metrics return return_dict def validation_epoch_end(self, outputs) -> None: for k in outputs[0]["losses"]: m = torch.stack([o["losses"][k] for o in outputs]).mean() self.log("val/losses_" + k, m) for k in outputs[0]["metrics"]: m = np.stack([o["metrics"][k] for o in outputs]).mean() self.log("val/metrics_" + k, m) if self.use_ema: for k in outputs[0]["ema_losses"]: m = torch.stack([o["ema_losses"][k] for o in outputs]).mean() self.log("val/ema_losses_" + k, m) for k in outputs[0]["ema_metrics"]: m = np.stack([o["ema_metrics"][k] for o in outputs]).mean() self.log("val/ema_metrics_" + k, m) def configure_optimizers(self): optim_params = self.algo_config.optim_params["policy"] return optim.Adam( params=self.nets["policy"].parameters(), lr=optim_params["learning_rate"]["initial"], ) def get_action(self, obs_dict, num_action_samples=1, class_free_guide_w=0.0, guide_as_filter_only=False, guide_with_gt=False, guide_clean=False, **kwargs): cur_policy = self.nets["policy"] # use EMA for val if self.use_ema: cur_policy = self.ema_policy cur_policy.eval() # update with current "global" timestep cur_policy.update_guidance(global_t=kwargs['step_index']) preds = self(obs_dict, num_samp=num_action_samples, class_free_guide_w=class_free_guide_w, guide_as_filter_only=guide_as_filter_only, guide_clean=guide_clean) # [B, N, T, 2] B, N, T, _ = preds["positions"].size() # arbitrarily use the first sample as the action by default act_idx = torch.zeros((B), dtype=torch.long, device=preds["positions"].device) if guide_with_gt and "target_positions" in obs_dict: act_idx = choose_action_from_gt(preds, obs_dict) elif cur_policy.current_guidance is not None: guide_losses = preds.pop("guide_losses", None) act_idx = choose_action_from_guidance(preds, obs_dict, cur_policy.current_guidance.guide_configs, guide_losses) action_preds = TensorUtils.map_tensor(preds, lambda x: x[torch.arange(B), act_idx]) info = dict(
class DiffuserTrafficModel(pl.LightningModule): def __init__(self, algo_config, modality_shapes, guidance_config=None): """ Creates networks and places them into @self.nets. """ super(DiffuserTrafficModel, self).__init__() self.algo_config = algo_config self.nets = nn.ModuleDict() if algo_config.diffuser_input_mode == 'state_and_action': # "Observations" are inputs to diffuser that are not outputs observation_dim = 4 # x, y, vel, yaw # "Actions" are inputs and outputs action_dim = 2 # acc, yawvel # "output" is final output of the entired denoising process output_dim = 2 # acc, yawvel else: raise self.cond_drop_map_p = algo_config.conditioning_drop_map_p self.cond_drop_neighbor_p = algo_config.conditioning_drop_neighbor_p min_cond_drop_p = min([self.cond_drop_map_p, self.cond_drop_neighbor_p]) max_cond_drop_p = max([self.cond_drop_map_p, self.cond_drop_neighbor_p]) assert min_cond_drop_p >= 0.0 and max_cond_drop_p <= 1.0 self.use_cond = self.cond_drop_map_p < 1.0 and self.cond_drop_neighbor_p < 1.0 # no need for conditioning arch if always dropping self.cond_fill_val = algo_config.conditioning_drop_fill self.use_rasterized_map = algo_config.rasterized_map if self.use_cond: if self.cond_drop_map_p > 0: print('DIFFUSER: Dropping map input conditioning with p = %f during training...' % (self.cond_drop_map_p)) if self.cond_drop_neighbor_p > 0: print('DIFFUSER: Dropping neighbor traj input conditioning with p = %f during training...' % (self.cond_drop_neighbor_p)) self.nets["policy"] = DiffuserModel( rasterized_map=algo_config.rasterized_map, use_map_feat_global=algo_config.use_map_feat_global, use_map_feat_grid=algo_config.use_map_feat_grid, map_encoder_model_arch=algo_config.map_encoder_model_arch, input_image_shape=modality_shapes["image"], # [C, H, W] map_feature_dim=algo_config.map_feature_dim, map_grid_feature_dim=algo_config.map_grid_feature_dim, hist_num_frames=algo_config.history_num_frames+1, # the current step is concat to the history hist_feature_dim=algo_config.history_feature_dim, cond_feature_dim=algo_config.cond_feat_dim, diffuser_model_arch=algo_config.diffuser_model_arch, horizon=algo_config.horizon, observation_dim=observation_dim, action_dim=action_dim, output_dim=output_dim, n_timesteps=algo_config.n_diffusion_steps, loss_type=algo_config.loss_type, action_weight=algo_config.action_weight, loss_discount=algo_config.loss_discount, dim_mults=algo_config.dim_mults, dynamics_type=algo_config.dynamics.type, dynamics_kwargs=algo_config.dynamics, base_dim=algo_config.base_dim, diffuser_input_mode=algo_config.diffuser_input_mode, use_conditioning=self.use_cond, cond_fill_value=self.cond_fill_val, diffuser_norm_info=algo_config.diffuser_norm_info, agent_hist_norm_info=algo_config.agent_hist_norm_info, neighbor_hist_norm_info=algo_config.neighbor_hist_norm_info, dt=algo_config.step_time, ) # set up initial guidance if guidance_config is not None: self.set_guidance(guidance_config) # set up EMA self.use_ema = algo_config.use_ema if self.use_ema: print('DIFFUSER: using EMA... val and get_action will use ema model') self.ema = EMA(algo_config.ema_decay) self.ema_policy = copy.deepcopy(self.nets["policy"]) self.ema_policy.requires_grad_(False) self.ema_update_every = algo_config.ema_step self.ema_start_step = algo_config.ema_start_step self.reset_parameters() self.cur_train_step = 0 @property def checkpoint_monitor_keys(self): if self.use_ema: return {"valLoss": "val/ema_losses_diffusion_loss"} else: return {"valLoss": "val/losses_diffusion_loss"} def forward(self, obs_dict, num_samp=1, class_free_guide_w=0.0, guide_as_filter_only=False, guide_clean=False): cur_policy = self.nets["policy"] # this function is only called at validation time, so use ema if self.use_ema: cur_policy = self.ema_policy return cur_policy(obs_dict, num_samp, return_diffusion=True, return_guidance_losses=True, class_free_guide_w=class_free_guide_w, apply_guidance=(not guide_as_filter_only), guide_clean=guide_clean)["predictions"] def _compute_metrics(self, pred_batch, data_batch): metrics = {} predictions = pred_batch["predictions"] preds = TensorUtils.to_numpy(predictions["positions"]) gt = TensorUtils.to_numpy(data_batch["target_positions"]) avail = TensorUtils.to_numpy(data_batch["target_availabilities"]) # compute ADE & FDE based on trajectory samples sample_preds = preds conf = np.ones(sample_preds.shape[0:2]) / float(sample_preds.shape[1]) metrics["ego_avg_ADE"] = Metrics.batch_average_displacement_error(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_min_ADE"] = Metrics.batch_average_displacement_error(gt, sample_preds, conf, avail, "oracle").mean() metrics["ego_avg_FDE"] = Metrics.batch_final_displacement_error(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_min_FDE"] = Metrics.batch_final_displacement_error(gt, sample_preds, conf, avail, "oracle").mean() # compute diversity scores based on trajectory samples metrics["ego_avg_APD"] = Metrics.batch_average_diversity(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_max_APD"] = Metrics.batch_average_diversity(gt, sample_preds, conf, avail, "max").mean() metrics["ego_avg_FPD"] = Metrics.batch_final_diversity(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_max_FPD"] = Metrics.batch_final_diversity(gt, sample_preds, conf, avail, "max").mean() return metrics def reset_parameters(self): self.ema_policy.load_state_dict(self.nets["policy"].state_dict()) def step_ema(self, step): if step < self.ema_start_step: self.reset_parameters() return self.ema.update_model_average(self.ema_policy, self.nets["policy"]) def training_step_end(self, batch_parts): self.cur_train_step += 1 def training_step(self, batch, batch_idx): """ Training on a single batch of data. Args: batch (dict): dictionary with torch.Tensors sampled from a data loader and filtered by @process_batch_for_training batch_idx (int): training step number (relative to the CURRENT epoch) - required by some Algos that need to perform staged training and early stopping Returns: info (dict): dictionary of relevant inputs, outputs, and losses that might be relevant for logging """ if self.use_ema and self.cur_train_step % self.ema_update_every == 0: self.step_ema(self.cur_train_step) batch = batch_utils().parse_batch(batch) # drop out conditioning if desired if self.use_cond: if self.use_rasterized_map: num_sem_layers = batch['maps'].size(1) if self.cond_drop_map_p > 0: drop_mask = torch.rand((batch["image"].size(0))) < self.cond_drop_map_p # only fill the last num_sem_layers as these correspond to semantic map batch["image"][drop_mask, -num_sem_layers:] = self.cond_fill_val if self.cond_drop_neighbor_p > 0: # drop actual neighbor trajectories instead # set availability to False, will be zeroed out in model B = batch["all_other_agents_history_availabilities"].size(0) drop_mask = torch.rand((B)) < self.cond_drop_neighbor_p batch["all_other_agents_history_availabilities"][drop_mask] = 0 # diffuser only take the data to estimate loss losses = self.nets["policy"].compute_losses(batch) total_loss = 0.0 for lk, l in losses.items(): losses[lk] = l * self.algo_config.loss_weights[lk] total_loss += losses[lk] for lk, l in losses.items(): self.log("train/losses_" + lk, l) return { "loss": total_loss, "all_losses": losses, } def validation_step(self, batch, batch_idx): cur_policy = self.nets["policy"] batch = batch_utils().parse_batch(batch) losses = TensorUtils.detach(cur_policy.compute_losses(batch)) pout = cur_policy(batch, num_samp=self.algo_config.diffuser.num_eval_samples, return_diffusion=False, return_guidance_losses=False) metrics = self._compute_metrics(pout, batch) return_dict = {"losses": losses, "metrics": metrics} # use EMA for val if self.use_ema: cur_policy = self.ema_policy ema_losses = TensorUtils.detach(cur_policy.compute_losses(batch)) pout = cur_policy(batch, num_samp=self.algo_config.diffuser.num_eval_samples, return_diffusion=False, return_guidance_losses=False) ema_metrics = self._compute_metrics(pout, batch) return_dict["ema_losses"] = ema_losses return_dict["ema_metrics"] = ema_metrics return return_dict def validation_epoch_end(self, outputs) -> None: for k in outputs[0]["losses"]: m = torch.stack([o["losses"][k] for o in outputs]).mean() self.log("val/losses_" + k, m) for k in outputs[0]["metrics"]: m = np.stack([o["metrics"][k] for o in outputs]).mean() self.log("val/metrics_" + k, m) if self.use_ema: for k in outputs[0]["ema_losses"]: m = torch.stack([o["ema_losses"][k] for o in outputs]).mean() self.log("val/ema_losses_" + k, m) for k in outputs[0]["ema_metrics"]: m = np.stack([o["ema_metrics"][k] for o in outputs]).mean() self.log("val/ema_metrics_" + k, m) def configure_optimizers(self): optim_params = self.algo_config.optim_params["policy"] return optim.Adam( params=self.nets["policy"].parameters(), lr=optim_params["learning_rate"]["initial"], ) def get_action(self, obs_dict, num_action_samples=1, class_free_guide_w=0.0, guide_as_filter_only=False, guide_with_gt=False, guide_clean=False, **kwargs): cur_policy = self.nets["policy"] # use EMA for val if self.use_ema: cur_policy = self.ema_policy cur_policy.eval() # update with current "global" timestep cur_policy.update_guidance(global_t=kwargs['step_index']) preds = self(obs_dict, num_samp=num_action_samples, class_free_guide_w=class_free_guide_w, guide_as_filter_only=guide_as_filter_only, guide_clean=guide_clean) # [B, N, T, 2] B, N, T, _ = preds["positions"].size() # arbitrarily use the first sample as the action by default act_idx = torch.zeros((B), dtype=torch.long, device=preds["positions"].device) if guide_with_gt and "target_positions" in obs_dict: act_idx = choose_action_from_gt(preds, obs_dict) elif cur_policy.current_guidance is not None: guide_losses = preds.pop("guide_losses", None) act_idx = choose_action_from_guidance(preds, obs_dict, cur_policy.current_guidance.guide_configs, guide_losses) action_preds = TensorUtils.map_tensor(preds, lambda x: x[torch.arange(B), act_idx]) info = dict(
action_samples=Action(
1
2023-10-31 18:43:07+00:00
16k
nv-tlabs/pacer
uhc/smpllib/smpl_local_robot.py
[ { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_local.py", "snippet": "class Skeleton:\n def __init__(self):\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1.0\n self.len_scale = 1.0\n self.dof_name = [\"x\", \"y\", \"z\"]\n self.root = None\n\n def forward_bvh(self, bone):\n bone.pos = bone.offset\n for bone_c in bone.child:\n self.forward_bvh(bone_c)\n\n def load_from_offsets(\n self,\n offsets,\n parents,\n scale,\n jrange,\n hull_dict,\n exclude_bones=None,\n channels=None,\n spec_channels=None,\n upright_start=False,\n remove_toe=False,\n real_weight_porpotion=False,\n real_weight=False,\n big_ankle = False,\n ):\n if channels is None:\n channels = [\"x\", \"y\", \"z\"]\n if exclude_bones is None:\n exclude_bones = {}\n if spec_channels is None:\n spec_channels = dict()\n self.hull_dict = hull_dict\n self.upright_start = upright_start\n self.remove_toe = remove_toe\n self.real_weight_porpotion = real_weight_porpotion\n self.real_weight = real_weight\n self.big_ankle = big_ankle\n joint_names = list(\n filter(lambda x: all([t not in x for t in exclude_bones]),\n offsets.keys()))\n dof_ind = {\"x\": 0, \"y\": 1, \"z\": 2}\n self.len_scale = scale\n self.root = Bone()\n self.root.id = 0\n self.root.name = joint_names[0]\n self.root.channels = channels\n self.name2bone[self.root.name] = self.root\n self.root.offset = offsets[self.root.name]\n self.bones.append(self.root)\n for i, joint in enumerate(joint_names[1:]):\n bone = Bone()\n bone.id = i + 1\n bone.name = joint\n\n bone.channels = (spec_channels[joint]\n if joint in spec_channels.keys() else channels)\n bone.dof_index = [dof_ind[x] for x in bone.channels]\n bone.offset = np.array(offsets[joint]) * self.len_scale\n bone.lb = np.rad2deg(jrange[joint][:, 0])\n bone.ub = np.rad2deg(jrange[joint][:, 1])\n\n self.bones.append(bone)\n self.name2bone[joint] = bone\n for bone in self.bones[1:]:\n parent_name = parents[bone.name]\n # print(parent_name)\n if parent_name in self.name2bone.keys():\n bone_p = self.name2bone[parent_name]\n bone_p.child.append(bone)\n bone.parent = bone_p\n self.forward_bvh(self.root)\n for bone in self.bones:\n if len(bone.child) == 0:\n bone.end = bone.pos.copy() + 0.002\n for c_bone, p_bone in parents.items():\n if p_bone == bone.name:\n bone.end += np.array(offsets[c_bone]) * self.len_scale\n break\n else:\n bone.end = sum([bone_c.pos for bone_c in bone.child]) / len(bone.child)\n\n def write_xml(\n self,\n fname,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n self.size_buffer = {}\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n joints = worldbody.findall(\".//joint\")\n\n for joint in joints[1:]:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"1\"\n SubElement(actuators, \"motor\", attr)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", {\n \"njmax\": \"700\",\n \"nconmax\": \"700\"\n })\n tree.write(fname, pretty_print=True)\n\n def write_str(\n self,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n self.size_buffer = {}\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n joints = worldbody.findall(\".//joint\")\n for joint in joints:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"500\"\n SubElement(actuators, \"motor\", attr)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", {\n \"njmax\": \"700\",\n \"nconmax\": \"700\"\n })\n\n return etree.tostring(tree, pretty_print=False)\n\n def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):\n attr = dict()\n attr[\"name\"] = bone.name\n attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n node = SubElement(parent_node, \"body\", attr)\n\n # write joints\n if bone.parent is None:\n j_attr = dict()\n j_attr[\"name\"] = bone.name\n SubElement(node, \"freejoint\", j_attr)\n else:\n for i in range(len(bone.dof_index)):\n ind = bone.dof_index[i]\n axis = bone.orient[:, ind]\n j_attr = dict()\n j_attr[\"name\"] = bone.name + \"_\" + self.dof_name[ind]\n j_attr[\"type\"] = \"hinge\"\n j_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos +\n offset))\n j_attr[\"axis\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*axis)\n j_attr[\"stiffness\"] = str(GAINS[bone.name][0] )\n j_attr[\"damping\"] = str(GAINS[bone.name][1])\n j_attr[\"armature\"] = \"0.02\"\n\n if i < len(bone.lb):\n j_attr[\"range\"] = \"{0:.4f} {1:.4f}\".format(\n bone.lb[i], bone.ub[i])\n else:\n j_attr[\"range\"] = \"-180.0 180.0\"\n if j_attr[\"name\"] in ref_angles.keys():\n j_attr[\"ref\"] = f\"{ref_angles[j_attr['name']]:.1f}\"\n\n SubElement(node, \"joint\", j_attr)\n\n # write geometry\n g_attr = dict()\n g_attr[\"type\"] = GEOM_TYPES[bone.name]\n g_attr[\"contype\"] = \"1\"\n g_attr[\"conaffinity\"] = \"1\"\n if self.real_weight:\n base_density = 1000\n else:\n base_density = 500\n g_attr[\"density\"] = str(base_density)\n e1 = np.zeros(3)\n e2 = bone.end.copy() + offset\n if bone.name in [\"Torso\", \"Chest\", \"Spine\"]:\n seperation = 0.45\n else:\n seperation = 0.2\n\n # if bone.name in [\"L_Hip\"]:\n # seperation = 0.3\n\n\n e1 += e2 * seperation\n e2 -= e2 * seperation\n hull_params = self.hull_dict[bone.name]\n\n if g_attr[\"type\"] == \"capsule\":\n g_attr[\n \"fromto\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f} {4:.4f} {5:.4f}\".format(\n *np.concatenate([e1, e2]))\n\n side_len = np.linalg.norm(e2 - e1)\n # radius = 0.067\n # V = np.pi * radius ** 2 * ((4/3) * radius + side_len)\n\n roots = np.polynomial.polynomial.Polynomial(\n [-hull_params['volume'], 0, side_len * np.pi,\n 4 / 3 * np.pi]).roots()\n real_valued = roots.real[abs(roots.imag) < 1e-5]\n real_valued = real_valued[real_valued > 0]\n if bone.name in [\"Torso\", \"Spine\", \"L_Hip\", \"R_Hip\"]:\n real_valued *= 0.7 # ZL Hack: shrinkage\n if self.real_weight_porpotion: # If shift is enabled, shift the weight based on teh shrinkage factor\n g_attr[\"density\"] = str((1 / 0.7**2) * base_density)\n\n if bone.name in [\"Chest\"]:\n real_valued *= 0.7 # ZL Hack: shrinkage\n if self.real_weight_porpotion:\n g_attr[\"density\"] = str((1 / 0.7**2) * base_density)\n\n if bone.name in [\"L_Knee\", 'R_Knee']:\n real_valued *= 0.9 # ZL Hack: shrinkage\n if self.real_weight_porpotion:\n g_attr[\"density\"] = str((1 / 0.9**2) * base_density)\n\n # if bone.name in [\"Spine\"]:\n # real_valued *= 0.01 # ZL Hack: shrinkage\n\n # g_attr[\"size\"] = \"{0:.4f}\".format(*template_attributes[\"size\"])\n g_attr[\"size\"] = \"{0:.4f}\".format(*real_valued)\n\n elif g_attr[\"type\"] == \"box\":\n pos = (e1 + e2) / 2\n min_verts = hull_params['norm_verts'].min(axis=0).values\n size = (hull_params['norm_verts'].max(axis=0).values - min_verts).numpy()\n if self.upright_start:\n if bone.name == \"L_Toe\" or bone.name == \"R_Toe\":\n size[0] = hull_params['volume'] / (size[2] * size[0])\n else:\n size[2] = hull_params['volume'] / (size[1] * size[0])\n else:\n size[1] = hull_params['volume'] / (size[2] * size[0])\n size /= 2\n\n if bone.name == \"L_Toe\" or bone.name == \"R_Toe\":\n if self.upright_start:\n pos[2] = -bone.pos[2] / 2 - self.size_buffer[bone.parent.name][2] + size[2] # To get toe to be at the same height as the parent\n pos[1] = -bone.pos[1] / 2 # To get toe to be at the same x as the parent\n else:\n pos[1] = -bone.pos[1] / 2 - self.size_buffer[bone.parent.name][1] + size[1] # To get toe to be at the same height as the parent\n pos[0] = -bone.pos[0] / 2 # To get toe to be at the same x as the parent\n\n if self.remove_toe:\n size /= 20 # Smaller toes...\n pos[1] = 0\n pos[0] = 0\n bone_dir = bone.end / np.linalg.norm(bone.end)\n if not self.remove_toe:\n rot = np.array([1, 0, 0, 0])\n else:\n rot = sRot.from_euler(\"xyz\",[0, 0, np.arctan(bone_dir[1] / bone_dir[0])]).as_quat()[[3, 0, 1, 2]]\n\n if self.big_ankle:\n # Big ankle override\n g_attr = {}\n hull_params = self.hull_dict[bone.name]\n min_verts, max_verts = hull_params['norm_verts'].min(axis=0).values, hull_params['norm_verts'].max(axis=0).values\n size = max_verts - min_verts\n\n bone_end = bone.end\n pos = (max_verts + min_verts)/2\n size /= 2\n\n if bone.name == \"L_Toe\" or bone.name == \"R_Toe\":\n parent_min, parent_max = self.hull_dict[bone.parent.name]['norm_verts'].min(axis=0).values, self.hull_dict[bone.parent.name]['norm_verts'].max(axis=0).values\n parent_pos = (parent_max + parent_min)/2\n\n pos[2] = parent_min[2] - bone.pos[2] + size[2] # To get toe to be at the same height as the parent\n pos[1] = parent_pos[1] - bone.pos[1] # To get toe to be at the y as the parent\n\n rot = np.array([1, 0, 0, 0])\n g_attr[\"type\"] = \"box\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*pos)\n g_attr[\"size\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*size)\n g_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*rot)\n\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*pos)\n g_attr[\"size\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*size)\n g_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*rot)\n self.size_buffer[bone.name] = size\n\n elif g_attr[\"type\"] == \"sphere\":\n pos = np.zeros(3)\n radius = np.cbrt(hull_params['volume'] * 3 / (4 * np.pi))\n if bone.name in [\"Pelvis\"]:\n radius *= 0.6 # ZL Hack: shrinkage\n if self.real_weight_porpotion:\n g_attr[\"density\"] = str((1 / 0.6**3) * base_density)\n\n g_attr[\"size\"] = \"{0:.4f}\".format(radius)\n # g_attr[\"size\"] = \"{0:.4f}\".format(*template_attributes[\"size\"])\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*pos)\n\n SubElement(node, \"geom\", g_attr)\n\n # write child bones\n for bone_c in bone.child:\n self.write_xml_bodynode(bone_c, node, offset, ref_angles)" }, { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_mesh_local.py", "snippet": "class Skeleton:\n def __init__(self, model_dir):\n self.model_dir = model_dir\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1.0\n self.len_scale = 1.0\n self.root = None\n self.equalities = None\n self.exclude_contacts = None\n self.collision_groups = None\n self.simple_geom = False\n self.buffer_dict = {\"njmax\": \"2500\", \"nconmax\": \"500\"}\n\n def forward_bones(self, bone):\n if bone.parent:\n # bone.pos = bone.parent.pos + bone.offset\n bone.pos = bone.offset\n for bone_c in bone.child:\n self.forward_bones(bone_c)\n\n def load_from_offsets(\n self,\n offsets,\n parents,\n axes,\n channels,\n jrange,\n sites,\n scale,\n equalities,\n hull_dict, \n exclude_contacts=None,\n collision_groups=None,\n conaffinity=None,\n simple_geom=False,\n color_dict=None,\n real_weight = False, \n replace_feet = True, \n ):\n if exclude_contacts is None:\n exclude_contacts = []\n if collision_groups is None:\n collision_groups = {}\n self.exclude_contacts = exclude_contacts\n self.collision_groups = {}\n self.conaffinity = {}\n self.color_dict = color_dict # giving color to the meshes\n self.real_weight = real_weight\n self.real_weight_porpotion = True\n self.replace_feet = replace_feet\n self.hull_dict = hull_dict\n\n for group, bones in collision_groups.items():\n for bone in bones:\n self.collision_groups[bone] = group\n\n for group, bones in conaffinity.items():\n for bone in bones:\n self.conaffinity[bone] = group\n\n self.simple_geom = simple_geom\n\n joint_names = list(offsets.keys())\n dof_ind = {\"x\": 0, \"y\": 1, \"z\": 2}\n self.equalities = equalities\n self.len_scale = scale\n self.root = Bone()\n self.root.id = 0\n self.root.name = joint_names[0]\n self.root.orient = axes[joint_names[0]]\n self.root.pos = offsets[joint_names[0]]\n self.root.sites = sites.get(joint_names[0], [])\n self.name2bone[self.root.name] = self.root\n self.bones.append(self.root)\n\n for i, joint in enumerate(joint_names[1:]):\n bone = Bone()\n bone.id = i + 1\n bone.name = joint\n bone.channels = channels[joint]\n bone.dof_index = [dof_ind[x[0]] for x in bone.channels]\n bone.offset = offsets[joint] * self.len_scale\n bone.orient = axes[joint]\n bone.lb = np.rad2deg(jrange[joint][:, 0])\n bone.ub = np.rad2deg(jrange[joint][:, 1])\n bone.sites = sites.get(joint, [])\n self.bones.append(bone)\n self.name2bone[joint] = bone\n\n for bone in self.bones[1:]:\n parent_name = parents[bone.name]\n if parent_name in self.name2bone.keys():\n bone_p = self.name2bone[parent_name]\n bone_p.child.append(bone)\n bone.parent = bone_p\n\n self.forward_bones(self.root)\n for bone in self.bones:\n if len(bone.child) == 0:\n bone.ends.append(bone.pos.copy())\n bone.end = bone.pos.copy() + 0.002\n for c_bone, p_bone in parents.items():\n if p_bone == bone.name:\n bone.end += np.array(offsets[c_bone]) * self.len_scale\n break\n else:\n bone.end = sum([bone_c.pos for bone_c in bone.child]) / len(bone.child)\n for bone_c in bone.child:\n bone.ends.append(bone_c.pos.copy())\n\n def write_str(\n self,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n tree = self.construct_tree(ref_angles=ref_angles,\n offset=offset,\n template_fname=template_fname)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", self.buffer_dict)\n return etree.tostring(tree, pretty_print=True)\n\n def write_xml(\n self,\n fname,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n tree = self.construct_tree(ref_angles=ref_angles,\n offset=offset,\n template_fname=template_fname)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", self.buffer_dict)\n # create sensors\n # sensor = tree.getroot().find(\"sensor\")\n # for bone in self.bones:\n # SubElement(sensor, 'framelinvel', {'objtype': 'body', 'objname': bone.name})\n # for bone in self.bones:\n # SubElement(sensor, 'frameangvel', {'objtype': 'body', 'objname': bone.name})\n # for bone in self.bones:\n # SubElement(sensor, 'framelinvel', {'objtype': 'xbody', 'objname': bone.name})\n\n tree.write(fname, pretty_print=True)\n\n def construct_tree(\n self,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create meshes\n asset = tree.getroot().find(\"asset\")\n for bone in self.bones:\n if os.path.exists(f\"{self.model_dir}/geom/{bone.name}.stl\"):\n attr = {\n \"file\":\n f\"{self.model_dir.split('/')[-1]}/geom/{bone.name}.stl\",\n \"name\": f\"{bone.name}_mesh\"\n }\n # geom_relative_path = f'../mesh/smpl/{self.model_dir.split(\"/\")[-1]}'\n # attr = {\"file\": f\"{geom_relative_path}/geom/{bone.name}.stl\", \"name\": f\"{bone.name}_mesh\"}\n SubElement(asset, \"mesh\", attr)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n\n joints = worldbody.findall(\".//joint\")\n for joint in joints:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"1\"\n SubElement(actuators, \"motor\", attr)\n\n # create exclude contacts\n c_node = tree.getroot().find(\"contact\")\n for bname1, bname2 in self.exclude_contacts:\n attr = {\"body1\": bname1, \"body2\": bname2}\n SubElement(c_node, \"exclude\", attr)\n # create equalities\n eq_node = tree.getroot().find(\"equality\")\n for eq_joints in self.equalities.values():\n for j1 in range(len(eq_joints) - 1):\n for j2 in range(j1 + 1, len(eq_joints)):\n jname1, jcoeff1 = eq_joints[j1]\n jname2, jcoeff2 = eq_joints[j2]\n coeff = jcoeff1 / jcoeff2\n attr = {\n \"joint1\": jname1,\n \"joint2\": jname2,\n \"polycoef\": f\"0 {coeff:.6f} 0 0 0\",\n }\n SubElement(eq_node, \"joint\", attr)\n return tree\n\n def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):\n if self.real_weight:\n base_density = 1000\n else:\n base_density = 500\n \n attr = dict()\n attr[\"name\"] = bone.name\n attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n quat = quaternion_from_matrix(bone.orient)\n attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*quat)\n node = SubElement(parent_node, \"body\", attr)\n\n # write joints\n if bone.parent is None:\n j_attr = dict()\n j_attr[\"name\"] = bone.name\n # j_attr[\"limited\"] = \"false\"\n # j_attr[\"type\"] = \"free\"\n # j_attr[\"armature\"] = \"0.02\"\n # j_attr[\"damping\"] = \"50\"\n # j_attr[\"stiffness\"] = \"500\"\n # j_attr[\"frictionloss\"] = \"0\"\n\n SubElement(node, \"freejoint\", j_attr)\n else:\n\n for i in range(len(bone.channels)):\n ind = bone.dof_index[i]\n axis = bone.orient[:, ind]\n j_attr = dict()\n\n\n j_attr[\"name\"] = bone.name + \"_\" + bone.channels[i]\n j_attr[\"type\"] = \"hinge\"\n j_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos +\n offset))\n j_attr[\"axis\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*axis)\n\n\n j_attr[\"stiffness\"] = str(GAINS[bone.name][0])\n j_attr[\"damping\"] = str(GAINS[bone.name][1])\n if bone.name in [\"L_Ankle\", \"R_Ankle\"]:\n j_attr[\"armature\"] = \"0.01\"\n else:\n j_attr[\"armature\"] = \"0.02\"\n\n if i < len(bone.lb):\n j_attr[\"range\"] = \"{0:.4f} {1:.4f}\".format(\n bone.lb[i], bone.ub[i])\n else:\n j_attr[\"range\"] = \"-180.0 180.0\"\n if j_attr[\"name\"] in ref_angles.keys():\n j_attr[\"ref\"] = f\"{ref_angles[j_attr['name']]:.1f}\"\n SubElement(node, \"joint\", j_attr)\n\n # write sites\n for s_name, s_pos, s_quat in bone.sites:\n s_attr = {\"name\": s_name}\n s_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(s_pos + offset))\n s_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*s_quat)\n s_attr[\"type\"] = \"sphere\"\n s_attr[\"size\"] = \"0.03\"\n SubElement(node, \"site\", s_attr)\n\n\n geom_path = f\"{self.model_dir}/geom/{bone.name}.stl\"\n if os.path.exists(geom_path):\n g_attr = {\"type\": \"mesh\", \"mesh\": f\"{bone.name}_mesh\"}\n if bone.name in self.collision_groups.keys():\n g_attr[\"density\"] = str(base_density)\n \n\n g_attr[\"contype\"] = str(self.collision_groups[bone.name])\n g_attr[\"conaffinity\"] = str(self.conaffinity[bone.name])\n\n # g_attr[\"solimp\"] = \"0.9 0.95 0.001 0.5 2\"\n # g_attr[\"solref\"] = \"0.02 1\"\n # g_attr[\"size\"] = str(10)\n # g_attr[\"friction\"] = \"0.000000000005 0.000000000005 0.1\"\n if not self.color_dict is None:\n g_attr[\"rgba\"] = self.color_dict[bone.name]\n\n \n if bone.name in [\"L_Ankle\", \"R_Ankle\", \"L_Toe\", \"R_Toe\"] and self.replace_feet:\n g_attr = {}\n hull_params = self.hull_dict[bone.name]\n min_verts, max_verts = hull_params['norm_verts'].min(axis=0), hull_params['norm_verts'].max(axis=0)\n size = max_verts - min_verts\n\n bone_end = bone.end\n pos = (max_verts + min_verts)/2\n size /= 2\n\n if bone.name == \"L_Toe\" or bone.name == \"R_Toe\":\n parent_min, parent_max = self.hull_dict[bone.parent.name]['norm_verts'].min(axis=0), self.hull_dict[bone.parent.name]['norm_verts'].max(axis=0)\n parent_pos = (parent_max + parent_min)/2\n \n pos[2] = parent_min[2] - bone.pos[2] + size[2] # To get toe to be at the same height as the parent\n pos[1] = parent_pos[1] - bone.pos[1] # To get toe to be at the y as the parent\n\n rot = np.array([1, 0, 0, 0])\n if self.real_weight_porpotion:\n g_attr[\"density\"] = str((hull_params['volume'] / (size[0] * size[1] * size[2] * 8)) * base_density)\n g_attr[\"type\"] = \"box\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*pos)\n g_attr[\"size\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*size)\n g_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*rot)\n\n SubElement(node, \"geom\", g_attr)\n else:\n for end in bone.ends:\n g_attr = dict()\n e1 = bone.pos + offset\n e2 = end + offset\n v = e2 - e1\n if np.linalg.norm(v) > 1e-6:\n v /= np.linalg.norm(v)\n e1 += v * 0.02\n e2 -= v * 0.02\n g_attr[\"type\"] = \"capsule\"\n g_attr[\n \"fromto\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f} {4:.4f} {5:.4f}\".format(\n *np.concatenate([e1, e2]))\n else:\n g_attr[\"type\"] = \"sphere\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*bone.pos)\n g_attr[\"size\"] = \"0.0300\" if self.simple_geom else \"0.0100\"\n if not self.simple_geom:\n g_attr[\"contype\"] = \"0\"\n g_attr[\"conaffinity\"] = \"0\"\n elif bone.name in self.collision_groups.keys():\n group = str(self.collision_groups[bone.name])\n g_attr[\"contype\"] = group\n g_attr[\"conaffinity\"] = group\n SubElement(node, \"geom\", g_attr)\n\n # write child bones\n for bone_c in bone.child:\n self.write_xml_bodynode(bone_c, node, offset, ref_angles)" }, { "identifier": "SMPL_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPL_Parser(_SMPL):\n def __init__(self, create_transl=False, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n data_struct: Strct\n A struct object. If given, then the parameters of the model are\n read from the object. Otherwise, the model tries to read the\n parameters from the given `model_path`. (default = None)\n create_global_orient: bool, optional\n Flag for creating a member variable for the global orientation\n of the body. (default = True)\n global_orient: torch.tensor, optional, Bx3\n The default value for the global orientation variable.\n (default = None)\n create_body_pose: bool, optional\n Flag for creating a member variable for the pose of the body.\n (default = True)\n body_pose: torch.tensor, optional, Bx(Body Joints * 3)\n The default value for the body pose variable.\n (default = None)\n create_betas: bool, optional\n Flag for creating a member variable for the shape space\n (default = True).\n betas: torch.tensor, optional, Bx10\n The default value for the shape member variable.\n (default = None)\n create_transl: bool, optional\n Flag for creating a member variable for the translation\n of the body. (default = True)\n transl: torch.tensor, optional, Bx3\n The default value for the transl variable.\n (default = None)\n dtype: torch.dtype, optional\n The data type for the created variables\n batch_size: int, optional\n The batch size used for creating the member variables\n joint_mapper: object, optional\n An object that re-maps the joints. Useful if one wants to\n re-order the SMPL joints to some other convention (e.g. MSCOCO)\n (default = None)\n gender: str, optional\n Which gender to load\n vertex_ids: dict, optional\n A dictionary containing the indices of the extra vertices that\n will be selected\n \"\"\"\n super(SMPL_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPL_BONE_ORDER_NAMES\n\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"x\", \"y\", \"z\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n self.joint_range[\"L_Shoulder\"] *= 4\n self.joint_range[\"R_Shoulder\"] *= 4\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n\n # self.contype = {\n # 3: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 1: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n # self.conaffinity = {\n # 1: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 3: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n self.zero_pose = torch.zeros(1, 72).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPL_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 72\n \"\"\"\n if pose.shape[1] != 72:\n pose = pose.reshape(-1, 72)\n\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n if th_betas.shape[-1] == 16:\n th_betas = th_betas[:, :10]\n\n batch_size = pose.shape[0]\n\n smpl_output = self.forward(\n betas=th_betas,\n transl=th_trans,\n body_pose=pose[:, 3:],\n global_orient=pose[:, :3],\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints[:, :24]\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, zero_pose=None, betas=torch.zeros(1, 10).float()):\n with torch.no_grad():\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose,\n th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = Jtr.detach().cpu().numpy()\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n joint_names = self.joint_names\n joint_pos = Jtr[0].numpy()\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n parents_dict = {\n joint_names[i]: joint_names[parents[i]]\n for i in range(len(joint_names))\n }\n channels = [\"z\", \"y\", \"x\"]\n skin_weights = self.lbs_weights.numpy()\n return (verts[0], jts_np[0], skin_weights, self.joint_names,\n joint_offsets, parents_dict, channels, self.joint_range)\n\n def get_mesh_offsets(self,\n zero_pose=None,\n betas=torch.zeros(1, 10),\n flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose,\n th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )\n\n def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose.repeat(\n betas.shape[0], 1),\n th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr\n joint_offsets = {\n joint_names[c]:\n (joint_pos[:, c] - joint_pos[:, p]) if c > 0 else joint_pos[:,\n c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n skin_weights = self.lbs_weights\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLH_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLH_Parser(_SMPLH):\n def __init__(self, *args, **kwargs):\n super(SMPLH_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLH_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n L_hand_pose=pose[:, 66:111],\n R_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 16).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i]\n for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]]\n for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLX_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLX_Parser(_SMPLX):\n def __init__(self, *args, **kwargs):\n super(SMPLX_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n self.joint_to_use = [\n SMPLX_BONE_ORDER_NAMES.index(i) for i in SMPLH_BONE_ORDER_NAMES\n ]\n self.parents_to_use = np.concatenate(\n [np.arange(0, 22), np.arange(25, 55)])\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLX_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n left_hand_pose=pose[:, 66:111],\n right_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # return vertices, joints\n return vertices, joints\n\n def get_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i]\n for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]]\n for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n # joint_names = self.joint_names\n joint_names = SMPLX_BONE_ORDER_NAMES\n verts, Jtr = self.get_joints_verts(self.zero_pose)\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n # print(\n # joint_pos.shape,\n # smpl_joint_parents.shape,\n # len(self.parents_to_use),\n # self.parents.cpu().numpy().shape,\n # )\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n if joint_names[c] in self.joint_names\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n if joint_names[i] in self.joint_names\n }\n\n verts = verts[0].numpy()\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()[:, self.parents_to_use]\n return (\n verts,\n joint_pos,\n skin_weights,\n self.joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "quadric_mesh_decimation", "path": "uhc/utils/geom.py", "snippet": "def quadric_mesh_decimation(fname, reduction_rate, verbose=False):\n reader = vtkSTLReader()\n reader.SetFileName(fname)\n reader.Update()\n inputPoly = reader.GetOutput()\n\n decimate = vtkQuadricDecimation()\n decimate.SetInputData(inputPoly)\n decimate.SetTargetReduction(reduction_rate)\n decimate.Update()\n decimatedPoly = vtkPolyData()\n decimatedPoly.ShallowCopy(decimate.GetOutput())\n\n if verbose:\n print(\n f\"Mesh Decimation: (points, faces) goes from ({inputPoly.GetNumberOfPoints(), inputPoly.GetNumberOfPolys()}) \"\n f\"to ({decimatedPoly.GetNumberOfPoints(), decimatedPoly.GetNumberOfPolys()})\"\n )\n\n stlWriter = vtkSTLWriter()\n stlWriter.SetFileName(fname)\n stlWriter.SetFileTypeToBinary()\n stlWriter.SetInputData(decimatedPoly)\n stlWriter.Write()" }, { "identifier": "center_scale_mesh", "path": "uhc/utils/geom.py", "snippet": "def center_scale_mesh(fname, scale):\n reader = vtkSTLReader()\n reader.SetFileName(fname)\n reader.Update()\n inputPoly = reader.GetOutputPort()\n\n centerOfMassFilter = vtkCenterOfMass()\n centerOfMassFilter.SetInputConnection(inputPoly)\n centerOfMassFilter.SetUseScalarsAsWeights(False)\n centerOfMassFilter.Update()\n center = centerOfMassFilter.GetCenter()\n\n transform = vtkTransform()\n transform.PostMultiply()\n transform.Translate(-center[0], -center[1], -center[2])\n transform.Scale(scale, scale, scale)\n transform.Translate(center[0], center[1], center[2])\n transform.Update()\n\n transformFilter = vtkTransformPolyDataFilter()\n transformFilter.SetInputConnection(inputPoly)\n transformFilter.SetTransform(transform)\n transformFilter.Update()\n\n stlWriter = vtkSTLWriter()\n stlWriter.SetFileName(fname)\n stlWriter.SetFileTypeToBinary()\n stlWriter.SetInputConnection(transformFilter.GetOutputPort())\n stlWriter.Write()" }, { "identifier": "flags", "path": "uhc/utils/flags.py", "snippet": "class Flags(object):\n def __init__(self, *items):" } ]
import os import sys import time import argparse import torch import pdb import os.path as osp import numpy as np import math import uuid import atexit import shutil import joblib import cv2 import mujoco import mujoco.viewer from copy import deepcopy from collections import defaultdict from lxml.etree import XMLParser, parse, ElementTree, Element, SubElement from lxml import etree from io import BytesIO from uhc.khrylib.mocap.skeleton_local import Skeleton from uhc.khrylib.mocap.skeleton_mesh_local import Skeleton as SkeletonMesh from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from collections import defaultdict from scipy.spatial import ConvexHull from stl import mesh from uhc.utils.geom import quadric_mesh_decimation, center_scale_mesh from uhc.utils.flags import flags
14,302
sys.path.append(os.getcwd()) # from scipy.spatial.qhull import _Qhull def parse_vec(string): return np.fromstring(string, sep=" ") def parse_fromto(string): fromto = np.fromstring(string, sep=" ") return fromto[:3], fromto[3:] def normalize_range(value, lb, ub): return (value - lb) / (ub - lb) * 2 - 1 def denormalize_range(value, lb, ub): return (value + 1) * 0.5 * (ub - lb) + lb def vec_to_polar(v): phi = math.atan2(v[1], v[0]) theta = math.acos(v[2]) return np.array([theta, phi]) def polar_to_vec(p): v = np.zeros(3) v[0] = math.sin(p[0]) * math.cos(p[1]) v[1] = math.sin(p[0]) * math.sin(p[1]) v[2] = math.cos(p[0]) return v def in_hull(hull, queries): tolerance = 1e-3 if len(queries.shape) == 1: queries = queries[None, ] return np.all( np.add(np.dot(queries, hull.equations[:, :-1].T), hull.equations[:, -1]) <= tolerance, axis=1, ) def get_joint_geometries( smpl_verts, smpl_jts, skin_weights, joint_names, geom_dir, scale_dict={}, suffix=None, verbose=False, min_num_vert=50, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries os.makedirs(geom_dir, exist_ok=True) for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue norm_verts = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(smpl_verts[vind]) norm_hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": norm_hull, "norm_verts": norm_verts, "verts": smpl_verts[vind], "hull": hull, "volume": hull.volume } center = norm_verts[hull.vertices].mean(axis=0) jgeom = mesh.Mesh( np.zeros(hull.simplices.shape[0], dtype=mesh.Mesh.dtype)) for i, f in enumerate(hull.simplices): for j in range(3): jgeom.vectors[i][j] = norm_verts[f[j], :] # check if the face's normal is facing outward normal = np.cross( jgeom.vectors[i][1] - jgeom.vectors[i][0], jgeom.vectors[i][2] - jgeom.vectors[i][0], ) out_vec = jgeom.vectors[i].mean(axis=0) - center if np.dot(normal, out_vec) < 0: jgeom.vectors[i] = jgeom.vectors[i][[0, 2, 1]] # flip the face if suffix is None: fname = f"{geom_dir}/{jname}.stl" else: fname = f"{geom_dir}/{jname}_{suffix}.stl" jgeom.save(fname) # mesh simplification with vtk # min_num_vert = 50 min_num_vert = 50 cur_num_vert = len(hull.vertices) reduction_rate = min(0.9, 1.0 - min_num_vert / cur_num_vert)
sys.path.append(os.getcwd()) # from scipy.spatial.qhull import _Qhull def parse_vec(string): return np.fromstring(string, sep=" ") def parse_fromto(string): fromto = np.fromstring(string, sep=" ") return fromto[:3], fromto[3:] def normalize_range(value, lb, ub): return (value - lb) / (ub - lb) * 2 - 1 def denormalize_range(value, lb, ub): return (value + 1) * 0.5 * (ub - lb) + lb def vec_to_polar(v): phi = math.atan2(v[1], v[0]) theta = math.acos(v[2]) return np.array([theta, phi]) def polar_to_vec(p): v = np.zeros(3) v[0] = math.sin(p[0]) * math.cos(p[1]) v[1] = math.sin(p[0]) * math.sin(p[1]) v[2] = math.cos(p[0]) return v def in_hull(hull, queries): tolerance = 1e-3 if len(queries.shape) == 1: queries = queries[None, ] return np.all( np.add(np.dot(queries, hull.equations[:, :-1].T), hull.equations[:, -1]) <= tolerance, axis=1, ) def get_joint_geometries( smpl_verts, smpl_jts, skin_weights, joint_names, geom_dir, scale_dict={}, suffix=None, verbose=False, min_num_vert=50, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries os.makedirs(geom_dir, exist_ok=True) for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue norm_verts = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(smpl_verts[vind]) norm_hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": norm_hull, "norm_verts": norm_verts, "verts": smpl_verts[vind], "hull": hull, "volume": hull.volume } center = norm_verts[hull.vertices].mean(axis=0) jgeom = mesh.Mesh( np.zeros(hull.simplices.shape[0], dtype=mesh.Mesh.dtype)) for i, f in enumerate(hull.simplices): for j in range(3): jgeom.vectors[i][j] = norm_verts[f[j], :] # check if the face's normal is facing outward normal = np.cross( jgeom.vectors[i][1] - jgeom.vectors[i][0], jgeom.vectors[i][2] - jgeom.vectors[i][0], ) out_vec = jgeom.vectors[i].mean(axis=0) - center if np.dot(normal, out_vec) < 0: jgeom.vectors[i] = jgeom.vectors[i][[0, 2, 1]] # flip the face if suffix is None: fname = f"{geom_dir}/{jname}.stl" else: fname = f"{geom_dir}/{jname}_{suffix}.stl" jgeom.save(fname) # mesh simplification with vtk # min_num_vert = 50 min_num_vert = 50 cur_num_vert = len(hull.vertices) reduction_rate = min(0.9, 1.0 - min_num_vert / cur_num_vert)
quadric_mesh_decimation(fname, reduction_rate, verbose=verbose)
5
2023-10-31 20:47:12+00:00
16k
CVHub520/yolov5_obb
utils/datasets.py
[ { "identifier": "Albumentations", "path": "utils/augmentations.py", "snippet": "class Albumentations:\n # YOLOv5 Albumentations class (optional, only used if package is installed)\n def __init__(self):\n self.transform = None\n try:\n import albumentations as A\n check_version(A.__version__, '1.0.3', hard=True) # version requirement\n\n self.transform = A.Compose([\n A.Blur(p=0.01),\n A.MedianBlur(p=0.01),\n A.ToGray(p=0.01),\n A.CLAHE(p=0.01),\n A.RandomBrightnessContrast(p=0.0),\n A.RandomGamma(p=0.0),\n A.ImageCompression(quality_lower=75, p=0.0)],\n bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))\n\n LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))\n except ImportError: # package not installed, skip\n pass\n except Exception as e:\n LOGGER.info(colorstr('albumentations: ') + f'{e}')\n\n def __call__(self, im, labels, p=1.0):\n if self.transform and random.random() < p:\n new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed\n im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])\n return im, labels" }, { "identifier": "augment_hsv", "path": "utils/augmentations.py", "snippet": "def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):\n # HSV color-space augmentation\n if hgain or sgain or vgain:\n r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains\n hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))\n dtype = im.dtype # uint8\n\n x = np.arange(0, 256, dtype=r.dtype)\n lut_hue = ((x * r[0]) % 180).astype(dtype)\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\n cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed" }, { "identifier": "copy_paste", "path": "utils/augmentations.py", "snippet": "def copy_paste(im, labels, segments, p=0.5):\n # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)\n n = len(segments)\n if p and n:\n h, w, c = im.shape # height, width, channels\n im_new = np.zeros(im.shape, np.uint8)\n for j in random.sample(range(n), k=round(p * n)):\n l, s = labels[j], segments[j]\n box = w - l[3], l[2], w - l[1], l[4]\n ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area\n if (ioa < 0.30).all(): # allow 30% obscuration of existing labels\n labels = np.concatenate((labels, [[l[0], *box]]), 0)\n segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))\n cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)\n\n result = cv2.bitwise_and(src1=im, src2=im_new)\n result = cv2.flip(result, 1) # augment segments (flip left-right)\n i = result > 0 # pixels to replace\n # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch\n im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug\n\n return im, labels, segments" }, { "identifier": "letterbox", "path": "utils/augmentations.py", "snippet": "def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):\n \"\"\"\n Resize and pad image while meeting stride-multiple constraints\n Returns:\n im (array): (height, width, 3)\n ratio (array): [w_ratio, h_ratio]\n (dw, dh) (array): [w_padding h_padding]\n \"\"\"\n shape = im.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int): # [h_rect, w_rect]\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better val mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # wh ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) # w h \n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0]) # [w h]\n ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # [w_ratio, h_ratio]\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return im, ratio, (dw, dh)" }, { "identifier": "mixup", "path": "utils/augmentations.py", "snippet": "def mixup(im, labels, im2, labels2):\n # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf\n r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0\n im = (im * r + im2 * (1 - r)).astype(np.uint8)\n labels = np.concatenate((labels, labels2), 0)\n return im, labels" }, { "identifier": "random_perspective", "path": "utils/augmentations.py", "snippet": "def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,\n border=(0, 0)):\n # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))\n # targets = [cls, xyxyxyxy]\n\n height = im.shape[0] + border[0] * 2 # shape(h,w,c)\n width = im.shape[1] + border[1] * 2\n\n # Center\n C = np.eye(3)\n C[0, 2] = -im.shape[1] / 2 # x translation (pixels)\n C[1, 2] = -im.shape[0] / 2 # y translation (pixels)\n\n # Perspective\n P = np.eye(3)\n P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)\n P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)\n\n # Rotation and Scale\n R = np.eye(3)\n a = random.uniform(-degrees, degrees)\n # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations\n s = random.uniform(1 - scale, 1 + scale)\n # s = 2 ** random.uniform(-scale, scale)\n R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n # Shear\n S = np.eye(3)\n S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)\n S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)\n\n # Translation\n T = np.eye(3)\n T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)\n T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)\n\n # Combined rotation matrix\n M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT\n if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed\n if perspective:\n im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))\n else: # affine\n im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))\n\n # Visualize\n # import matplotlib.pyplot as plt\n # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()\n # ax[0].imshow(im[:, :, ::-1]) # base\n # ax[1].imshow(im2[:, :, ::-1]) # warped\n\n # Transform label coordinates\n n = len(targets)\n if n:\n use_segments = any(x.any() for x in segments)\n new = np.zeros((n, 4))\n if use_segments: # warp segments\n segments = resample_segments(segments) # upsample\n for i, segment in enumerate(segments):\n xy = np.ones((len(segment), 3))\n xy[:, :2] = segment\n xy = xy @ M.T # transform\n xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine\n\n # clip\n new[i] = segment2box(xy, width, height)\n\n else: # warp boxes\n xy = np.ones((n * 4, 3))\n # xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1\n xy[:, :2] = targets[:, 1:].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1\n xy = xy @ M.T # transform\n xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine\n\n # # create new boxes\n # x = xy[:, [0, 2, 4, 6]]\n # y = xy[:, [1, 3, 5, 7]]\n # new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n # # clip\n # new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)\n # new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)\n # clip boxes 不启用,保留预测完整物体的能力\n\n # filter candidates\n # i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)\n # targets = targets[i]\n # targets[:, 1:5] = new[i]\n targets_mask = poly_filter(polys=xy, h=height, w=width)\n targets[:, 1:] = xy\n targets = targets[targets_mask]\n\n return im, targets" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "NUM_THREADS", "path": "utils/general.py", "snippet": "NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads" }, { "identifier": "check_dataset", "path": "utils/general.py", "snippet": "def check_dataset(data, autodownload=True):\n # Download and/or unzip dataset if not found locally\n # Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip\n\n # Download (optional)\n extract_dir = ''\n if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip\n download(data, dir='../datasets', unzip=True, delete=False, curl=False, threads=1)\n data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml'))\n extract_dir, autodownload = data.parent, False\n\n # Read yaml (optional)\n if isinstance(data, (str, Path)):\n with open(data, errors='ignore') as f:\n data = yaml.safe_load(f) # dictionary\n\n # Parse yaml\n path = extract_dir or Path(data.get('path') or '') # optional 'path' default to '.'\n for k in 'train', 'val', 'test':\n if data.get(k): # prepend path\n data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]]\n\n assert 'nc' in data, \"Dataset 'nc' key missing.\"\n if 'names' not in data:\n data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing\n train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))\n if val:\n val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path\n if not all(x.exists() for x in val):\n print('\\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])\n if s and autodownload: # download script\n root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'\n if s.startswith('http') and s.endswith('.zip'): # URL\n f = Path(s).name # filename\n print(f'Downloading {s} to {f}...')\n torch.hub.download_url_to_file(s, f)\n Path(root).mkdir(parents=True, exist_ok=True) # create root\n ZipFile(f).extractall(path=root) # unzip\n Path(f).unlink() # remove zip\n r = None # success\n elif s.startswith('bash '): # bash script\n print(f'Running {s} ...')\n r = os.system(s)\n else: # python script\n r = exec(s, {'yaml': data}) # return None\n print(f\"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\\n\")\n else:\n raise Exception('Dataset not found.')\n\n return data # dictionary" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "@try_except\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n assert file.exists(), f\"{prefix} {file.resolve()} not found, check failed.\"\n with file.open() as f:\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception as e: # DistributionNotFound or VersionConflict if requirements not met\n s = f\"{prefix} {r} not found and is required by YOLOv5\"\n if install:\n print(f\"{s}, attempting auto-update...\")\n try:\n assert check_online(), f\"'pip install {r}' skipped (offline)\"\n print(check_output(f\"pip install '{r}'\", shell=True).decode())\n n += 1\n except Exception as e:\n print(f'{prefix} {e}')\n else:\n print(f'{s}. Please install and rerun your command.')\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n print(emojis(s))" }, { "identifier": "check_yaml", "path": "utils/general.py", "snippet": "def check_yaml(file, suffix=('.yaml', '.yml')):\n # Search/download YAML file (if necessary) and return path, checking suffix\n return check_file(file, suffix)" }, { "identifier": "clean_str", "path": "utils/general.py", "snippet": "def clean_str(s):\n # Cleans a string by replacing special characters with underscore _\n return re.sub(pattern=\"[|@#!¡·$€%&()=?¿^*;:,¨´><+]\", repl=\"_\", string=s)" }, { "identifier": "segments2boxes", "path": "utils/general.py", "snippet": "def segments2boxes(segments):\n # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)\n boxes = []\n for s in segments:\n x, y = s.T # segment xy\n boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy\n return xyxy2xywh(np.array(boxes)) # cls, xywh" }, { "identifier": "xyn2xy", "path": "utils/general.py", "snippet": "def xyn2xy(x, w=640, h=640, padw=0, padh=0):\n # Convert normalized segments into pixel segments, shape (n,2)\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = w * x[:, 0] + padw # top left x\n y[:, 1] = h * x[:, 1] + padh # top left y\n return y" }, { "identifier": "xywh2xyxy", "path": "utils/general.py", "snippet": "def xywh2xyxy(x):\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y" }, { "identifier": "xywhn2xyxy", "path": "utils/general.py", "snippet": "def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\n # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x\n y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y\n y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x\n y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y\n return y" }, { "identifier": "xyxy2xywhn", "path": "utils/general.py", "snippet": "def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right\n if clip:\n clip_coords(x, (h - eps, w - eps)) # warning: inplace clip\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center\n y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center\n y[:, 2] = (x[:, 2] - x[:, 0]) / w # width\n y[:, 3] = (x[:, 3] - x[:, 1]) / h # height\n return y" }, { "identifier": "torch_distributed_zero_first", "path": "utils/torch_utils.py", "snippet": "@contextmanager\ndef torch_distributed_zero_first(local_rank: int):\n \"\"\"\n Decorator to make all processes in distributed training wait for each local_master to do something.\n \"\"\"\n if local_rank not in [-1, 0]:\n dist.barrier(device_ids=[local_rank])\n yield\n if local_rank == 0:\n dist.barrier(device_ids=[0])" }, { "identifier": "poly_filter", "path": "utils/rboxs_utils.py", "snippet": "def poly_filter(polys, h, w): \n \"\"\"\n Filter the poly labels which is out of the image.\n Args:\n polys (array): (num, 8)\n\n Return:\n keep_masks (array): (num)\n \"\"\"\n x = polys[:, 0::2] # (num, 4) \n y = polys[:, 1::2]\n x_max = np.amax(x, axis=1) # (num)\n x_min = np.amin(x, axis=1) \n y_max = np.amax(y, axis=1)\n y_min = np.amin(y, axis=1)\n x_ctr, y_ctr = (x_max + x_min) / 2.0, (y_max + y_min) / 2.0 # (num)\n keep_masks = (x_ctr > 0) & (x_ctr < w) & (y_ctr > 0) & (y_ctr < h) \n return keep_masks" }, { "identifier": "poly2rbox", "path": "utils/rboxs_utils.py", "snippet": "def poly2rbox(polys, num_cls_thata=180, radius=6.0, use_pi=False, use_gaussian=False):\n \"\"\"\n Trans poly format to rbox format.\n Args:\n polys (array): (num_gts, [x1 y1 x2 y2 x3 y3 x4 y4]) \n num_cls_thata (int): [1], theta class num\n radius (float32): [1], window radius for Circular Smooth Label\n use_pi (bool): True θ∈[-pi/2, pi/2) , False θ∈[0, 180)\n\n Returns:\n use_gaussian True:\n rboxes (array): \n csl_labels (array): (num_gts, num_cls_thata)\n elif \n rboxes (array): (num_gts, [cx cy l s θ]) \n \"\"\"\n assert polys.shape[-1] == 8\n if use_gaussian:\n csl_labels = []\n rboxes = []\n for poly in polys:\n poly = np.float32(poly.reshape(4, 2))\n (x, y), (w, h), angle = cv2.minAreaRect(poly) # θ ∈ [0, 90]\n angle = -angle # θ ∈ [-90, 0]\n theta = angle / 180 * pi # 转为pi制\n\n # trans opencv format to longedge format θ ∈ [-pi/2, pi/2]\n if w != max(w, h): \n w, h = h, w\n theta += pi/2\n theta = regular_theta(theta) # limit theta ∈ [-pi/2, pi/2)\n angle = (theta * 180 / pi) + 90 # θ ∈ [0, 180)\n\n if not use_pi: # 采用angle弧度制 θ ∈ [0, 180)\n rboxes.append([x, y, w, h, angle])\n else: # 采用pi制\n rboxes.append([x, y, w, h, theta])\n if use_gaussian:\n csl_label = gaussian_label_cpu(label=angle, num_class=num_cls_thata, u=0, sig=radius)\n csl_labels.append(csl_label)\n if use_gaussian:\n return np.array(rboxes), np.array(csl_labels)\n return np.array(rboxes)" } ]
import glob import hashlib import json import os import random import shutil import time import cv2 import numpy as np import torch import torch.nn.functional as F import yaml import pafy from itertools import repeat from multiprocessing.pool import Pool, ThreadPool from pathlib import Path from threading import Thread from zipfile import ZipFile from PIL import ExifTags, Image, ImageOps from torch.utils.data import DataLoader, Dataset, dataloader, distributed from tqdm import tqdm from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import (LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first from utils.rboxs_utils import poly_filter, poly2rbox
10,965
else: c_num = 187 # labels_out = torch.zeros((nl, 6)) labels_out = torch.zeros((nl, c_num)) if nl: # labels_out[:, 1:] = torch.from_numpy(labels) labels_out[:, 1:] = torch.from_numpy(labels_obb) # Convert img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return torch.from_numpy(img), labels_out, self.img_files[index], shapes @staticmethod def collate_fn(batch): img, label, path, shapes = zip(*batch) # transposed; (tupe(b*tensor)) for i, l in enumerate(label): l[:, 0] = i # add target image index for build_targets() return torch.stack(img, 0), torch.cat(label, 0), path, shapes @staticmethod def collate_fn4(batch): img, label, path, shapes = zip(*batch) # transposed n = len(shapes) // 4 img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW i *= 4 if random.random() < 0.5: im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[ 0].type(img[i].type()) l = label[i] else: im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s img4.append(im) label4.append(l) for i, l in enumerate(label4): l[:, 0] = i # add target image index for build_targets() return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 # Ancillary functions -------------------------------------------------------------------------------------------------- def load_image_label(self, i): # loads 1 image from dataset index 'i', returns im, original hw, resized hw im = self.imgs[i] label = self.labels[i].copy() # labels (array): (num_gt_perimg, [cls_id, poly]) if im is None: # not cached in ram npy = self.img_npy[i] if npy and npy.exists(): # load npy im = np.load(npy) else: # read image path = self.img_files[i] im = cv2.imread(path) # BGR assert im is not None, f'Image Not Found {path}' h0, w0 = im.shape[:2] # orig hw r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) label[:, 1:] *= r return im, (h0, w0), im.shape[:2], label # im, hw_original, hw_resized, resized_label else: return self.imgs[i], self.img_hw0[i], self.img_hw[i], self.labels[i] # im, hw_original, hw_resized, resized_label def load_mosaic(self, index): # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic labels4, segments4 = [], [] s = self.img_size yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices random.shuffle(indices) for i, index in enumerate(indices): # Load image img, _, (h, w), img_label = load_image_label(self, index) # place img in img4 if i == 0: # top left img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) elif i == 1: # top right x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h elif i == 2: # bottom left x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) elif i == 3: # bottom right x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] padw = x1a - x1b padh = y1a - y1b # Labels labels, segments = img_label.copy(), self.segments[index].copy() # labels (array): (num_gt_perimg, [cls_id, poly]) if labels.size: # labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format labels[:, [1, 3, 5, 7]] = img_label[:, [1, 3, 5, 7]] + padw labels[:, [2, 4, 6, 8]] = img_label[:, [2, 4, 6, 8]] + padh segments = [xyn2xy(x, w, h, padw, padh) for x in segments] labels4.append(labels) segments4.extend(segments) # Concat/clip labels labels4 = np.concatenate(labels4, 0) # for x in (labels4[:, 1:], *segments4): for x in (segments4): np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() h_filter = 2 * s w_filter = 2 * s
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Dataloaders and dataset utils """ # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) # DPP # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): if ExifTags.TAGS[orientation] == 'Orientation': break def get_hash(paths): # Returns a single hash value of a list of paths (files or dirs) size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes h = hashlib.md5(str(size).encode()) # hash sizes h.update(''.join(paths).encode()) # hash paths return h.hexdigest() # return hash def exif_size(img): # Returns exif-corrected PIL size s = img.size # (width, height) try: rotation = dict(img._getexif().items())[orientation] if rotation == 6: # rotation 270 s = (s[1], s[0]) elif rotation == 8: # rotation 90 s = (s[1], s[0]) except: pass return s def exif_transpose(image): """ Transpose a PIL image accordingly if it has an EXIF Orientation tag. Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() :param image: The image to transpose. :return: An image. """ exif = image.getexif() orientation = exif.get(0x0112, 1) # default 1 if orientation > 1: method = {2: Image.FLIP_LEFT_RIGHT, 3: Image.ROTATE_180, 4: Image.FLIP_TOP_BOTTOM, 5: Image.TRANSPOSE, 6: Image.ROTATE_270, 7: Image.TRANSVERSE, 8: Image.ROTATE_90, }.get(orientation) if method is not None: image = image.transpose(method) del exif[0x0112] image.info["exif"] = exif.tobytes() return image def create_dataloader(path, imgsz, batch_size, stride, names, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False): if rect and shuffle: LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabels(path, names, imgsz, batch_size, augment=augment, # augmentation hyp=hyp, # hyperparameters rect=rect, # rectangular batches cache_images=cache, single_cls=single_cls, stride=int(stride), pad=pad, image_weights=image_weights, prefix=prefix) batch_size = min(batch_size, len(dataset)) nw = min([os.cpu_count() // WORLD_SIZE, batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates return loader(dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, pin_memory=True, collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn), dataset class InfiniteDataLoader(dataloader.DataLoader): """ Dataloader that reuses workers Uses same syntax as vanilla DataLoader """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) self.iterator = super().__iter__() def __len__(self): return len(self.batch_sampler.sampler) def __iter__(self): for i in range(len(self)): yield next(self.iterator) class _RepeatSampler: """ Sampler that repeats forever Args: sampler (Sampler) """ def __init__(self, sampler): self.sampler = sampler def __iter__(self): while True: yield from iter(self.sampler) class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True): p = str(Path(path).resolve()) # os-agnostic absolute path if '*' in p: files = sorted(glob.glob(p, recursive=True)) # glob elif os.path.isdir(p): files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir elif os.path.isfile(p): files = [p] # files else: raise Exception(f'ERROR: {p} does not exist') images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] ni, nv = len(images), len(videos) self.img_size = img_size self.stride = stride self.files = images + videos self.nf = ni + nv # number of files self.video_flag = [False] * ni + [True] * nv self.mode = 'image' self.auto = auto if any(videos): self.new_video(videos[0]) # new video else: self.cap = None assert self.nf > 0, f'No images or videos found in {p}. ' \ f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' def __iter__(self): self.count = 0 return self def __next__(self): if self.count == self.nf: raise StopIteration path = self.files[self.count] if self.video_flag[self.count]: # Read video self.mode = 'video' ret_val, img0 = self.cap.read() while not ret_val: self.count += 1 self.cap.release() if self.count == self.nf: # last video raise StopIteration else: path = self.files[self.count] self.new_video(path) ret_val, img0 = self.cap.read() self.frame += 1 s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' else: # Read image self.count += 1 img0 = cv2.imread(path) # BGR assert img0 is not None, f'Image Not Found {path}' s = f'image {self.count}/{self.nf} {path}: ' # Padded resize img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0] # Convert img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return path, img, img0, self.cap, s def new_video(self, path): self.frame = 0 self.cap = cv2.VideoCapture(path) self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) def __len__(self): return self.nf # number of files class LoadWebcam: # for inference # YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0` def __init__(self, pipe='0', img_size=640, stride=32): self.img_size = img_size self.stride = stride self.pipe = eval(pipe) if pipe.isnumeric() else pipe self.cap = cv2.VideoCapture(self.pipe) # video capture object self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size def __iter__(self): self.count = -1 return self def __next__(self): self.count += 1 if cv2.waitKey(1) == ord('q'): # q to quit self.cap.release() cv2.destroyAllWindows() raise StopIteration # Read frame ret_val, img0 = self.cap.read() img0 = cv2.flip(img0, 1) # flip left-right # Print assert ret_val, f'Camera Error {self.pipe}' img_path = 'webcam.jpg' s = f'webcam {self.count}: ' # Padded resize img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return img_path, img, img0, None, s def __len__(self): return 0 class LoadStreams: # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): self.mode = 'stream' self.img_size = img_size self.stride = stride if os.path.isfile(sources): with open(sources) as f: sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] else: sources = [sources] n = len(sources) self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n self.sources = [clean_str(x) for x in sources] # clean source names for later self.auto = auto for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video check_requirements(('pafy', 'youtube_dl')) s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam cap = cv2.VideoCapture(s) assert cap.isOpened(), f'{st}Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") self.threads[i].start() LOGGER.info('') # newline # check for common shapes s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs]) self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') def update(self, i, cap, stream): # Read stream `i` frames in daemon thread n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame while cap.isOpened() and n < f: n += 1 # _, self.imgs[index] = cap.read() cap.grab() if n % read == 0: success, im = cap.retrieve() if success: self.imgs[i] = im else: LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') self.imgs[i] = np.zeros_like(self.imgs[i]) cap.open(stream) # re-open stream if signal was lost time.sleep(1 / self.fps[i]) # wait time def __iter__(self): self.count = -1 return self def __next__(self): self.count += 1 if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit cv2.destroyAllWindows() raise StopIteration # Letterbox img0 = self.imgs.copy() img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0] # Stack img = np.stack(img, 0) # Convert img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW img = np.ascontiguousarray(img) return self.sources, img, img0, None, '' def __len__(self): return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years def img2label_paths(img_paths): # Define label paths as a function of image paths sa, sb = os.sep + 'images' + os.sep, os.sep + 'labelTxt' + os.sep # /images/, /labels/ substrings return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] class LoadImagesAndLabels(Dataset): # YOLOv5 train_loader/val_loader, loads images and labels for training and validation cache_version = 0.6 # dataset labels *.cache version def __init__(self, path, cls_names, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): """ Returns: Dataset.labels (list): n_imgs * array(num_gt_perimg, [cls_id, poly]) Dataset.shapes (array): (n_imgs, [ori_img_width, ori_img_height]) Dataset.batch_shapes (array): (n_batches, [h_rect, w_rect]) """ self.img_size = img_size self.augment = augment self.hyp = hyp self.image_weights = image_weights self.rect = False if image_weights else rect self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) self.mosaic_border = [-img_size // 2, -img_size // 2] self.stride = stride self.path = path self.albumentations = Albumentations() if augment else None self.cls_names = cls_names try: f = [] # image files for p in path if isinstance(path, list) else [path]: p = Path(p) # os-agnostic if p.is_dir(): # dir f += glob.glob(str(p / '**' / '*.*'), recursive=True) # f = list(p.rglob('*.*')) # pathlib elif p.is_file(): # file with open(p) as t: t = t.read().strip().splitlines() parent = str(p.parent) + os.sep f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) else: raise Exception(f'{prefix}{p} does not exist') self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib assert self.img_files, f'{prefix}No images found' except Exception as e: raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}') # Check cache self.label_files = img2label_paths(self.img_files) # labels cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') try: cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict assert cache['version'] == self.cache_version # same version assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash except: cache, exists = self.cache_labels(cache_path, prefix), False # cache # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total if exists: d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' # Read cache [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items labels, shapes, self.segments = zip(*cache.values()) self.labels = list(labels) # labels(list[array]): n_imgs * array(num_gt_perimg, [cls_id, poly]) self.shapes = np.array(shapes, dtype=np.float64) # img_ori shape self.img_files = list(cache.keys()) # update self.label_files = img2label_paths(cache.keys()) # update n = len(shapes) # number of images bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index nb = bi[-1] + 1 # number of batches self.batch = bi # batch index of image self.n = n self.indices = range(n) # Update labels include_class = [] # filter labels to include only these classes (optional) include_class_array = np.array(include_class).reshape(1, -1) for i, (label, segment) in enumerate(zip(self.labels, self.segments)): if include_class: j = (label[:, 0:1] == include_class_array).any(1) self.labels[i] = label[j] if segment: self.segments[i] = segment[j] if single_cls: # single-class training, merge all classes into 0 self.labels[i][:, 0] = 0 if segment: self.segments[i][:, 0] = 0 # Rectangular Training if self.rect: # Sort by aspect ratio s = self.shapes # wh ar = s[:, 1] / s[:, 0] # aspect ratio irect = ar.argsort() self.img_files = [self.img_files[i] for i in irect] self.label_files = [self.label_files[i] for i in irect] self.labels = [self.labels[i] for i in irect] self.shapes = s[irect] # wh ar = ar[irect] # Set training image shapes shapes = [[1, 1]] * nb for i in range(nb): ari = ar[bi == i] mini, maxi = ari.min(), ari.max() if maxi < 1: # batch图像高宽比均小于1时, shape=[h/w, 1] = [h_ratio, w_ratio] shapes[i] = [maxi, 1] elif mini > 1: # batch图像高宽比均大于1时, shape=[1, w/h] = [h_ratio, w_ratio] shapes[i] = [1, 1 / mini] self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride # (nb, [h_rect, w_rect]) # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) self.imgs, self.img_npy = [None] * n, [None] * n if cache_images: if cache_images == 'disk': self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy') self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files] self.im_cache_dir.mkdir(parents=True, exist_ok=True) gb = 0 # Gigabytes of cached images self.img_hw0, self.img_hw = [None] * n, [None] * n results = ThreadPool(NUM_THREADS).imap(lambda x: load_image_label(*x), zip(repeat(self), range(n))) pbar = tqdm(enumerate(results), total=n) for i, x in pbar: if cache_images == 'disk': if not self.img_npy[i].exists(): np.save(self.img_npy[i].as_posix(), x[0]) gb += self.img_npy[i].stat().st_size else: self.imgs[i], self.img_hw0[i], self.img_hw[i], self.labels[i] = x # im, hw_orig, hw_resized, label_resized = load_image_label(self, i) gb += self.imgs[i].nbytes pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' pbar.close() def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix), repeat(self.cls_names))), desc=desc, total=len(self.img_files)) for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f nf += nf_f ne += ne_f nc += nc_f if im_file: x[im_file] = [l, shape, segments] if msg: msgs.append(msg) pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted" pbar.close() if msgs: LOGGER.info('\n'.join(msgs)) if nf == 0: LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['msgs'] = msgs # warnings x['version'] = self.cache_version # cache version try: np.save(path, x) # save cache for next time path.with_suffix('.cache.npy').rename(path) # remove .npy suffix LOGGER.info(f'{prefix}New cache created: {path}') except Exception as e: LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable return x def __len__(self): return len(self.img_files) # def __iter__(self): # self.count = -1 # print('ran dataset iter') # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) # return self def __getitem__(self, index): ''' Augment the [clsid poly] labels and trans label format to rbox. Returns: img (tensor): (3, height, width), RGB labels_out (tensor): (n, [None clsid cx cy l s theta gaussian_θ_labels]) θ∈[-pi/2, pi/2) img_file (str): img_dir shapes : None or [(h_raw, w_raw), (hw_ratios, wh_paddings)], for COCO mAP rescaling ''' index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp mosaic = self.mosaic and random.random() < hyp['mosaic'] if mosaic: # Load mosaic img, labels = load_mosaic(self, index) shapes = None # MixUp augmentation if random.random() < hyp['mixup']: img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1))) else: # Load image and label img, (h0, w0), (h, w), img_label = load_image_label(self, index) # Letterbox shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape [h_rect, w_rect] img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) # ratio[w_ratio, h_ratio], pad[w_padding, h_padding] shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling [(h_raw, w_raw), (hw_ratios, wh_paddings)] labels = img_label.copy() # labels (array): (num_gt_perimg, [cls_id, poly]) if labels.size: # labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) labels[:, [1, 3, 5, 7]] = img_label[:, [1, 3, 5, 7]] * ratio[0] + pad[0] labels[:, [2, 4, 6, 8]] = img_label[:, [2, 4, 6, 8]] * ratio[1] + pad[1] if self.augment: img, labels = random_perspective(img, labels, degrees=hyp['degrees'], translate=hyp['translate'], scale=hyp['scale'], shear=hyp['shear'], perspective=hyp['perspective']) nl = len(labels) # number of labels # if nl: # labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) if self.augment: # Albumentations # img, labels = self.albumentations(img, labels) # nl = len(labels) # update after albumentations # HSV color-space augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) img_h, img_w = img.shape[0], img.shape[1] # Flip up-down if random.random() < hyp['flipud']: img = np.flipud(img) if nl: # labels[:, 2] = 1 - labels[:, 2] labels[:, 2::2] = img_h - labels[:, 2::2] - 1 # Flip left-right if random.random() < hyp['fliplr']: img = np.fliplr(img) if nl: # labels[:, 1] = 1 - labels[:, 1] labels[:, 1::2] = img_w - labels[:, 1::2] - 1 # Cutouts # labels = cutout(img, labels, p=0.5) # nl = len(labels) # update after cutout if nl: # *[clsid poly] to *[clsid cx cy l s theta gaussian_θ_labels] θ∈[-pi/2, pi/2) non-normalized rboxes, csl_labels = poly2rbox(polys=labels[:, 1:], num_cls_thata=hyp['cls_theta'] if hyp else 180, radius=hyp['csl_radius'] if hyp else 6.0, use_pi=True, use_gaussian=True) labels_obb = np.concatenate((labels[:, :1], rboxes, csl_labels), axis=1) labels_mask = (rboxes[:, 0] >= 0) & (rboxes[:, 0] < img.shape[1]) \ & (rboxes[:, 1] >= 0) & (rboxes[:, 0] < img.shape[0]) \ & (rboxes[:, 2] > 5) | (rboxes[:, 3] > 5) labels_obb = labels_obb[labels_mask] nl = len(labels_obb) # update after filter if hyp: c_num = 7 + hyp['cls_theta'] # [index_of_batch clsid cx cy l s theta gaussian_θ_labels] else: c_num = 187 # labels_out = torch.zeros((nl, 6)) labels_out = torch.zeros((nl, c_num)) if nl: # labels_out[:, 1:] = torch.from_numpy(labels) labels_out[:, 1:] = torch.from_numpy(labels_obb) # Convert img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return torch.from_numpy(img), labels_out, self.img_files[index], shapes @staticmethod def collate_fn(batch): img, label, path, shapes = zip(*batch) # transposed; (tupe(b*tensor)) for i, l in enumerate(label): l[:, 0] = i # add target image index for build_targets() return torch.stack(img, 0), torch.cat(label, 0), path, shapes @staticmethod def collate_fn4(batch): img, label, path, shapes = zip(*batch) # transposed n = len(shapes) // 4 img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW i *= 4 if random.random() < 0.5: im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[ 0].type(img[i].type()) l = label[i] else: im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s img4.append(im) label4.append(l) for i, l in enumerate(label4): l[:, 0] = i # add target image index for build_targets() return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 # Ancillary functions -------------------------------------------------------------------------------------------------- def load_image_label(self, i): # loads 1 image from dataset index 'i', returns im, original hw, resized hw im = self.imgs[i] label = self.labels[i].copy() # labels (array): (num_gt_perimg, [cls_id, poly]) if im is None: # not cached in ram npy = self.img_npy[i] if npy and npy.exists(): # load npy im = np.load(npy) else: # read image path = self.img_files[i] im = cv2.imread(path) # BGR assert im is not None, f'Image Not Found {path}' h0, w0 = im.shape[:2] # orig hw r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) label[:, 1:] *= r return im, (h0, w0), im.shape[:2], label # im, hw_original, hw_resized, resized_label else: return self.imgs[i], self.img_hw0[i], self.img_hw[i], self.labels[i] # im, hw_original, hw_resized, resized_label def load_mosaic(self, index): # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic labels4, segments4 = [], [] s = self.img_size yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices random.shuffle(indices) for i, index in enumerate(indices): # Load image img, _, (h, w), img_label = load_image_label(self, index) # place img in img4 if i == 0: # top left img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) elif i == 1: # top right x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h elif i == 2: # bottom left x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) elif i == 3: # bottom right x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] padw = x1a - x1b padh = y1a - y1b # Labels labels, segments = img_label.copy(), self.segments[index].copy() # labels (array): (num_gt_perimg, [cls_id, poly]) if labels.size: # labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format labels[:, [1, 3, 5, 7]] = img_label[:, [1, 3, 5, 7]] + padw labels[:, [2, 4, 6, 8]] = img_label[:, [2, 4, 6, 8]] + padh segments = [xyn2xy(x, w, h, padw, padh) for x in segments] labels4.append(labels) segments4.extend(segments) # Concat/clip labels labels4 = np.concatenate(labels4, 0) # for x in (labels4[:, 1:], *segments4): for x in (segments4): np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() h_filter = 2 * s w_filter = 2 * s
labels_mask = poly_filter(polys=labels4[:, 1:].copy(), h=h_filter, w=w_filter)
18
2023-10-31 06:06:41+00:00
16k
DataCanvasIO/LMS
lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py
[ { "identifier": "LoraModel", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/lora.py", "snippet": "class LoraModel(torch.nn.Module):\n \"\"\"\n Creates Low Rank Adapter (Lora) model from a pretrained transformers model.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): The model to be adapted.\n config ([`LoraConfig`]): The configuration of the Lora model.\n\n Returns:\n `torch.nn.Module`: The Lora model.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig\n >>> from peft import LoraModel, LoraConfig\n\n >>> config = LoraConfig(\n ... peft_type=\"LORA\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... r=8,\n ... lora_alpha=32,\n ... target_modules=[\"q\", \"v\"],\n ... lora_dropout=0.01,\n ... )\n\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\")\n >>> lora_model = LoraModel(config, model)\n ```\n\n **Attributes**:\n - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.\n - **peft_config** ([`LoraConfig`]): The configuration of the Lora model.\n \"\"\"\n\n def __init__(self, model, config, adapter_name):\n super().__init__()\n self.model = model\n self.forward = self.model.forward\n self.peft_config = config\n self.add_adapter(adapter_name, self.peft_config[adapter_name])\n\n def add_adapter(self, adapter_name, config=None):\n if config is not None:\n model_config = self.model.config.to_dict() if hasattr(self.model.config, \"to_dict\") else self.model.config\n config = self._prepare_lora_config(config, model_config)\n self.peft_config[adapter_name] = config\n self._find_and_replace(adapter_name)\n if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != \"none\":\n raise ValueError(\n \"LoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.\"\n )\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n if self.peft_config[adapter_name].inference_mode:\n _freeze_adapter(self.model, adapter_name)\n\n def _find_and_replace(self, adapter_name):\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n }\n key_list = [key for key, _ in self.model.named_modules()]\n for key in key_list:\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key)\n bias = target.bias is not None\n if isinstance(target, LoraLayer):\n target.update_layer(\n adapter_name,\n lora_config.r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else:\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n eightbit_kwargs = kwargs.copy()\n eightbit_kwargs.update(\n {\n \"has_fp16_weights\": target.state.has_fp16_weights,\n \"memory_efficient_backward\": target.state.memory_efficient_backward,\n \"threshold\": target.state.threshold,\n \"index\": target.index,\n }\n )\n new_module = Linear8bitLt(\n adapter_name, target.in_features, target.out_features, bias=bias, **eightbit_kwargs\n )\n else:\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = Linear(adapter_name, in_features, out_features, bias=bias, **kwargs)\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )\n\n def _replace_module(self, parent_module, child_name, new_module, old_module):\n setattr(parent_module, child_name, new_module)\n new_module.weight = old_module.weight\n if old_module.bias is not None:\n new_module.bias = old_module.bias\n if getattr(old_module, \"state\", None) is not None:\n new_module.state = old_module.state\n new_module.to(old_module.weight.device)\n\n # dispatch to correct device\n for name, module in new_module.named_modules():\n if \"lora_\" in name:\n module.to(old_module.weight.device)\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.model, name)\n\n def get_peft_config_as_dict(self, inference: bool = False):\n config_dict = {}\n for key, value in self.peft_config.items():\n config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}\n if inference:\n config[\"inference_mode\"] = True\n config_dict[key] = config\n return config\n\n def _set_adapter_layers(self, enabled=True):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.disable_adapters = False if enabled else True\n\n def enable_adapter_layers(self):\n self._set_adapter_layers(enabled=True)\n\n def disable_adapter_layers(self):\n self._set_adapter_layers(enabled=False)\n\n def set_adapter(self, adapter_name):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n if module.merged:\n warnings.warn(\"Adapter cannot be set when the model is merged. Unmerging the model first.\")\n module.unmerge()\n module.active_adapter = adapter_name\n\n def merge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.merge()\n\n def unmerge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.unmerge()\n\n @staticmethod\n def _prepare_lora_config(peft_config, model_config):\n if peft_config.target_modules is None:\n if model_config[\"model_type\"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:\n raise ValueError(\"Please specify `target_modules` in `peft_config`\")\n peft_config.target_modules = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config[\"model_type\"]]\n if peft_config.inference_mode:\n peft_config.merge_weights = True\n return peft_config\n\n def merge_and_unload(self):\n r\"\"\"\n This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model\n as a standalone model.\n \"\"\"\n if getattr(self.config, \"model_type\", None) == \"gpt2\":\n raise ValueError(\"GPT2 models are not supported for merging LORA layers\")\n\n if getattr(self.model, \"is_loaded_in_8bit\", False):\n raise ValueError(\"Cannot merge LORA layers when the model is loaded in 8-bit mode\")\n\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n try:\n parent, target, target_name = _get_submodules(self.model, key)\n except AttributeError:\n continue\n if isinstance(target, LoraLayer):\n bias = target.bias is not None\n new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)\n target.merge()\n self._replace_module(parent, target_name, new_module, target)\n\n # save any additional trainable modules part of `modules_to_save`\n if isinstance(target, ModulesToSaveWrapper):\n setattr(parent, target_name, target.modules_to_save[target.active_adapter])\n\n return self.model\n\n def add_weighted_adapter(self, adapters, weights, adapter_name):\n if len({self.peft_config[adapter].r for adapter in adapters}) != 1:\n raise ValueError(\"All adapters must have the same r value\")\n self.peft_config[adapter_name] = self.peft_config[adapters[0]]\n self.peft_config[adapter_name].lora_alpha = self.peft_config[adapters[0]].r\n self._find_and_replace(adapter_name)\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n _freeze_adapter(self.model, adapter_name)\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n _, target, _ = _get_submodules(self.model, key)\n if isinstance(target, LoraLayer):\n target.lora_A[adapter_name].weight.data = target.lora_A[adapter_name].weight.data * 0.0\n target.lora_B[adapter_name].weight.data = target.lora_B[adapter_name].weight.data * 0.0\n for adapter, weight in zip(adapters, weights):\n if adapter not in target.lora_A:\n continue\n target.lora_A[adapter_name].weight.data += (\n target.lora_A[adapter].weight.data * weight * target.scaling[adapter]\n )\n target.lora_B[adapter_name].weight.data += target.lora_B[adapter].weight.data * weight" }, { "identifier": "AdaLoraModel", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/adalora.py", "snippet": "class AdaLoraModel(LoraModel):\n \"\"\"\n Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:\n https://openreview.net/pdf?id=lq62uWRJjiY\n\n Args:\n model ([`transformers.PreTrainedModel`]): The model to be adapted.\n config ([`AdaLoraConfig`]): The configuration of the AdaLora model.\n\n Returns:\n `torch.nn.Module`: The AdaLora model.\n\n Example::\n\n >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig\n >>> config = AdaLoraConfig(\n peft_type=\"ADALORA\", task_type=\"SEQ_2_SEQ_LM\", r=8, lora_alpha=32, target_modules=[\"q\", \"v\"],\n lora_dropout=0.01,\n )\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\") >>> model = AdaLoraModel(config, model)\n\n **Attributes**:\n - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.\n - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.\n \"\"\"\n\n def __init__(self, model, config, adapter_name):\n nn.Module.__init__(self)\n self.model = model\n self.peft_config = config\n self.add_adapter(adapter_name, self.peft_config[adapter_name])\n\n def add_adapter(self, adapter_name, config=None):\n if config is not None:\n model_config = self.model.config.to_dict() if hasattr(self.model.config, \"to_dict\") else self.model.config\n config = self._prepare_adalora_config(config, model_config)\n self.peft_config[adapter_name] = config\n self._find_and_replace(adapter_name)\n if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != \"none\":\n raise ValueError(\n \"AdaLoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.\"\n )\n traininable_mode_counter = 0\n for config in self.peft_config.values():\n if not config.inference_mode:\n traininable_mode_counter += 1\n\n if traininable_mode_counter > 1:\n raise ValueError(\n \"AdaLoraModel supports only 1 trainable adapter. \"\n \"When using multiple adapters, set inference_mode to True for all adapters except the one you want to train.\"\n )\n\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n if self.peft_config[adapter_name].inference_mode:\n _freeze_adapter(self.model, adapter_name)\n else:\n self.trainable_adapter_name = adapter_name\n self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)\n\n def _find_and_replace(self, adapter_name):\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.init_r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n }\n key_list = [key for key, _ in self.model.named_modules()]\n for key in key_list:\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key)\n bias = target.bias is not None\n if isinstance(target, LoraLayer):\n target.update_layer(\n adapter_name,\n lora_config.init_r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else:\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n kwargs.update(\n {\n \"has_fp16_weights\": target.state.has_fp16_weights,\n \"memory_efficient_backward\": target.state.memory_efficient_backward,\n \"threshold\": target.state.threshold,\n \"index\": target.index,\n }\n )\n new_module = SVDLinear8bitLt(\n adapter_name, target.in_features, target.out_features, bias=bias, **kwargs\n )\n else:\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = SVDLinear(adapter_name, in_features, out_features, bias=bias, **kwargs)\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.model, name)\n\n def forward(self, *args, **kwargs):\n outputs = self.model.forward(*args, **kwargs)\n\n # Calculate the orthogonal regularization\n orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight\n assert orth_reg_weight > 0\n\n if hasattr(outputs, \"loss\"):\n regu_loss = 0\n num_param = 0\n for n, p in self.model.named_parameters():\n if (\"lora_A\" in n or \"lora_B\" in n) and self.trainable_adapter_name in n:\n para_cov = p @ p.T if \"lora_A\" in n else p.T @ p\n I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov))\n I.requires_grad = False\n num_param += 1\n regu_loss += torch.norm(para_cov - I, p=\"fro\")\n regu_loss = regu_loss / num_param\n outputs.loss += orth_reg_weight * regu_loss\n return outputs\n\n def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):\n lora_config = self.peft_config[adapter_name]\n for name, rank_idx in rank_pattern.items():\n if isinstance(rank_idx, list):\n rank = sum(rank_idx)\n elif isinstance(rank_idx, torch.Tensor):\n rank_idx = rank_idx.view(-1)\n rank = rank_idx.sum().item()\n else:\n raise ValueError(\"Unexcepted type of rank_idx\")\n key = \".\".join(name.split(\".\")[0:-2]) if adapter_name in name else \".\".join(name.split(\".\")[0:-1])\n _, target, _ = _get_submodules(self.model, key)\n lora_E_weights = target.lora_E[adapter_name][rank_idx]\n lora_A_weights = target.lora_A[adapter_name][rank_idx]\n lora_B_weights = target.lora_B[adapter_name][:, rank_idx]\n ranknum = target.ranknum[adapter_name]\n target.update_layer(\n adapter_name,\n rank,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n with torch.no_grad():\n if rank > 0:\n target.lora_E[adapter_name].copy_(lora_E_weights)\n target.lora_A[adapter_name].copy_(lora_A_weights)\n target.lora_B[adapter_name].copy_(lora_B_weights)\n # The scaling is exactly as the previous\n target.ranknum[adapter_name].copy_(ranknum)\n\n def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):\n for name, rank_idx in rank_pattern.items():\n rank = sum(rank_idx)\n prefix = \".\".join(name.split(\".\")[0:-2]) if adapter_name in name else \".\".join(name.split(\".\")[0:-1])\n for layer in [\"lora_E\", \"lora_A\", \"lora_B\"]:\n key = f\"base_model.model.{prefix}.{layer}.{adapter_name}\"\n if layer != \"lora_B\":\n state_dict[key] = (\n state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]\n )\n else:\n state_dict[key] = (\n state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]\n )\n return state_dict\n\n def update_and_allocate(self, global_step):\n lora_config = self.peft_config[self.trainable_adapter_name]\n # Update the importance score and allocate the budget\n if global_step < lora_config.total_step - lora_config.tfinal:\n _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)\n if rank_pattern:\n lora_config.rank_pattern = rank_pattern\n # Finalize the budget allocation\n elif global_step == lora_config.total_step - lora_config.tfinal:\n _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)\n # for some reason, this freezes the trainable parameters and nothing gets updates\n # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)\n lora_config.rank_pattern = rank_pattern\n self.rankallocator.reset_ipt()\n # Currently using inefficient way to mask the unimportant weights using the rank pattern\n # due to problem mentioned above\n elif global_step > lora_config.total_step - lora_config.tfinal:\n self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)\n # Pass the function and do forward propagation\n else:\n return None\n\n @staticmethod\n def _prepare_adalora_config(peft_config, model_config):\n if peft_config.target_modules is None:\n if model_config[\"model_type\"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:\n raise ValueError(\"Please specify `target_modules` in `peft_config`\")\n peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[\n model_config[\"model_type\"]\n ]\n if peft_config.inference_mode:\n peft_config.merge_weights = True\n return peft_config" }, { "identifier": "PromptEncoder", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/p_tuning.py", "snippet": "class PromptEncoder(torch.nn.Module):\n \"\"\"\n The prompt encoder network that is used to generate the virtual token embeddings for p-tuning.\n\n Args:\n config ([`PromptEncoderConfig`]): The configuration of the prompt encoder.\n\n Example:\n\n ```py\n >>> from peft import PromptEncoder, PromptEncoderConfig\n\n >>> config = PromptEncoderConfig(\n ... peft_type=\"P_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... encoder_reparameterization_type=\"MLP\",\n ... encoder_hidden_size=768,\n ... )\n\n >>> prompt_encoder = PromptEncoder(config)\n ```\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder.\n - **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`.\n - **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and\n `encoder_reparameterization_type=\"LSTM\"`.\n - **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model.\n - **input_size** (`int`) -- The input size of the prompt encoder.\n - **output_size** (`int`) -- The output size of the prompt encoder.\n - **hidden_size** (`int`) -- The hidden size of the prompt encoder.\n - **total_virtual_tokens** (`int`): The total number of virtual tokens of the\n prompt encoder.\n - **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt\n encoder.\n\n\n Input shape: (`batch_size`, `total_virtual_tokens`)\n\n Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.token_dim = config.token_dim\n self.input_size = self.token_dim\n self.output_size = self.token_dim\n self.hidden_size = config.encoder_hidden_size\n self.total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules\n self.encoder_type = config.encoder_reparameterization_type\n\n # embedding\n self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim)\n if not config.inference_mode:\n if self.encoder_type == PromptEncoderReparameterizationType.LSTM:\n lstm_dropout = config.encoder_dropout\n num_layers = config.encoder_num_layers\n # LSTM\n self.lstm_head = torch.nn.LSTM(\n input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=num_layers,\n dropout=lstm_dropout,\n bidirectional=True,\n batch_first=True,\n )\n\n self.mlp_head = torch.nn.Sequential(\n torch.nn.Linear(self.hidden_size * 2, self.hidden_size * 2),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size * 2, self.output_size),\n )\n\n elif self.encoder_type == PromptEncoderReparameterizationType.MLP:\n warnings.warn(\n f\"for {self.encoder_type}, the `encoder_num_layers` is ignored. Exactly 2 MLP layers are used.\"\n )\n layers = [\n torch.nn.Linear(self.input_size, self.hidden_size),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size, self.hidden_size),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size, self.output_size),\n ]\n self.mlp_head = torch.nn.Sequential(*layers)\n\n else:\n raise ValueError(\"Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.\")\n\n def forward(self, indices):\n input_embeds = self.embedding(indices)\n if self.encoder_type == PromptEncoderReparameterizationType.LSTM:\n output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0])\n elif self.encoder_type == PromptEncoderReparameterizationType.MLP:\n output_embeds = self.mlp_head(input_embeds)\n else:\n raise ValueError(\"Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.\")\n\n return output_embeds" }, { "identifier": "PrefixEncoder", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/prefix_tuning.py", "snippet": "class PrefixEncoder(torch.nn.Module):\n r\"\"\"\n The `torch.nn` model to encode the prefix.\n\n Args:\n config ([`PrefixTuningConfig`]): The configuration of the prefix encoder.\n\n Example:\n\n ```py\n >>> from peft import PrefixEncoder, PrefixTuningConfig\n\n >>> config = PrefixTuningConfig(\n ... peft_type=\"PREFIX_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... encoder_hidden_size=768,\n ... )\n >>> prefix_encoder = PrefixEncoder(config)\n ```\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder.\n - **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if\n `prefix_projection` is `True`.\n - **prefix_projection** (`bool`) -- Whether to project the prefix embeddings.\n\n Input shape: (`batch_size`, `num_virtual_tokens`)\n\n Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`)\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.prefix_projection = config.prefix_projection\n token_dim = config.token_dim\n num_layers = config.num_layers\n encoder_hidden_size = config.encoder_hidden_size\n num_virtual_tokens = config.num_virtual_tokens\n if self.prefix_projection and not config.inference_mode:\n # Use a two-layer MLP to encode the prefix\n self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim)\n self.transform = torch.nn.Sequential(\n torch.nn.Linear(token_dim, encoder_hidden_size),\n torch.nn.Tanh(),\n torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim),\n )\n else:\n self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim)\n\n def forward(self, prefix: torch.Tensor):\n if self.prefix_projection:\n prefix_tokens = self.embedding(prefix)\n past_key_values = self.transform(prefix_tokens)\n else:\n past_key_values = self.embedding(prefix)\n return past_key_values" }, { "identifier": "PromptEmbedding", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/prompt_tuning.py", "snippet": "class PromptEmbedding(torch.nn.Module):\n \"\"\"\n The model to encode virtual tokens into prompt embeddings.\n\n Args:\n config ([`PromptTuningConfig`]): The configuration of the prompt embedding.\n word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model.\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding.\n\n Example:\n\n ```py\n >>> from peft import PromptEmbedding, PromptTuningConfig\n\n >>> config = PromptTuningConfig(\n ... peft_type=\"PROMPT_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... prompt_tuning_init=\"TEXT\",\n ... prompt_tuning_init_text=\"Predict if sentiment of this review is positive, negative or neutral\",\n ... tokenizer_name_or_path=\"t5-base\",\n ... )\n\n >>> # t5_model.shared is the word embeddings of the base model\n >>> prompt_embedding = PromptEmbedding(config, t5_model.shared)\n ```\n\n Input Shape: (`batch_size`, `total_virtual_tokens`)\n\n Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)\n \"\"\"\n\n def __init__(self, config, word_embeddings):\n super().__init__()\n\n total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules\n self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim)\n if config.prompt_tuning_init == PromptTuningInit.TEXT:\n from transformers import AutoTokenizer\n\n tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path)\n init_text = config.prompt_tuning_init_text\n init_token_ids = tokenizer(init_text)[\"input_ids\"]\n # Trim or iterate until num_text_tokens matches total_virtual_tokens\n num_text_tokens = len(init_token_ids)\n if num_text_tokens > total_virtual_tokens:\n init_token_ids = init_token_ids[:total_virtual_tokens]\n elif num_text_tokens < total_virtual_tokens:\n num_reps = math.ceil(total_virtual_tokens / num_text_tokens)\n init_token_ids = init_token_ids * num_reps\n init_token_ids = init_token_ids[:total_virtual_tokens]\n\n word_embedding_weights = word_embeddings(torch.LongTensor(init_token_ids)).detach().clone()\n word_embedding_weights = word_embedding_weights.to(torch.float32)\n self.embedding.weight = torch.nn.Parameter(word_embedding_weights)\n\n def forward(self, indices):\n # Just get embeddings\n prompt_embeddings = self.embedding(indices)\n return prompt_embeddings" }, { "identifier": "PeftConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.\n inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.\n \"\"\"\n\n base_model_name_or_path: str = field(default=None, metadata={\"help\": \"The name of the base model to use.\"})\n peft_type: Union[str, PeftType] = field(default=None, metadata={\"help\": \"Peft type\"})\n task_type: Union[str, TaskType] = field(default=None, metadata={\"help\": \"Task type\"})\n inference_mode: bool = field(default=False, metadata={\"help\": \"Whether to use inference mode\"})" }, { "identifier": "PeftType", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"" }, { "identifier": "PromptLearningConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})" }, { "identifier": "TaskType", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class TaskType(str, enum.Enum):\n SEQ_CLS = \"SEQ_CLS\"\n SEQ_2_SEQ_LM = \"SEQ_2_SEQ_LM\"\n CAUSAL_LM = \"CAUSAL_LM\"\n TOKEN_CLS = \"TOKEN_CLS\"" }, { "identifier": "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING = {\n \"bloom\": bloom_model_postprocess_past_key_value,\n}" }, { "identifier": "WEIGHTS_NAME", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "WEIGHTS_NAME = \"adapter_model.bin\"" }, { "identifier": "_set_trainable", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "def _set_trainable(model, adapter_name):\n key_list = [key for key, _ in model.named_modules()]\n for key in key_list:\n target_module_found = any(key.endswith(target_key) for target_key in model.modules_to_save)\n if target_module_found:\n parent, target, target_name = _get_submodules(model, key)\n if isinstance(target, ModulesToSaveWrapper):\n target.update(adapter_name)\n else:\n for param in target.parameters():\n param.requires_grad = True\n setattr(parent, target_name, ModulesToSaveWrapper(target, adapter_name))" }, { "identifier": "shift_tokens_right", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):\n \"\"\"\n Shift input ids one token to the right.\n\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids\n pad_token_id (`int`): The id of the `padding` token.\n decoder_start_token_id (`int`): The id of the `start` token.\n \"\"\"\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()\n shifted_input_ids[:, 0] = decoder_start_token_id\n\n if pad_token_id is None:\n raise ValueError(\"self.model.config.pad_token_id has to be defined.\")\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n return shifted_input_ids" }, { "identifier": "_set_adapter", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "def _set_adapter(model, adapter_name):\n for module in model.modules():\n if isinstance(module, ModulesToSaveWrapper):\n module.active_adapter = adapter_name" }, { "identifier": "get_peft_model_state_dict", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/save_and_load.py", "snippet": "def get_peft_model_state_dict(model, state_dict=None, adapter_name=\"default\"):\n \"\"\"\n Get the state dict of the Peft model.\n\n Args:\n model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP,\n the model should be the underlying model/unwrapped model (i.e. model.module).\n state_dict (`dict`, *optional*, defaults to `None`):\n The state dict of the model. If not provided, the state dict of the model\n will be used.\n \"\"\"\n config = model.peft_config[adapter_name]\n if state_dict is None:\n state_dict = model.state_dict()\n if config.peft_type in (PeftType.LORA, PeftType.ADALORA):\n # to_return = lora_state_dict(model, bias=model.peft_config.bias)\n # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py`\n # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP\n bias = config.bias\n if bias == \"none\":\n to_return = {k: state_dict[k] for k in state_dict if \"lora_\" in k}\n elif bias == \"all\":\n to_return = {k: state_dict[k] for k in state_dict if \"lora_\" in k or \"bias\" in k}\n elif bias == \"lora_only\":\n to_return = {}\n for k in state_dict:\n if \"lora_\" in k:\n to_return[k] = state_dict[k]\n bias_name = k.split(\"lora_\")[0] + \"bias\"\n if bias_name in state_dict:\n to_return[bias_name] = state_dict[bias_name]\n else:\n raise NotImplementedError\n to_return = {k: v for k, v in to_return.items() if ((\"lora_\" in k and adapter_name in k) or (\"bias\" in k))}\n if config.peft_type == PeftType.ADALORA:\n rank_pattern = config.rank_pattern\n if rank_pattern is not None:\n rank_pattern = {k.replace(f\".{adapter_name}\", \"\"): v for k, v in rank_pattern.items()}\n config.rank_pattern = rank_pattern\n to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name)\n elif isinstance(config, PromptLearningConfig):\n to_return = {}\n if config.inference_mode:\n prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight\n else:\n prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name)\n to_return[\"prompt_embeddings\"] = prompt_embeddings\n else:\n raise NotImplementedError\n if model.modules_to_save is not None:\n for key, value in state_dict.items():\n if any(f\"{module_name}.modules_to_save.{adapter_name}\" in key for module_name in model.modules_to_save):\n to_return[key.replace(\"modules_to_save.\", \"\")] = value\n\n to_return = {k.replace(f\".{adapter_name}\", \"\"): v for k, v in to_return.items()}\n return to_return" }, { "identifier": "set_peft_model_state_dict", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/save_and_load.py", "snippet": "def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name=\"default\"):\n \"\"\"\n Set the state dict of the Peft model.\n\n Args:\n model ([`PeftModel`]): The Peft model.\n peft_model_state_dict (`dict`): The state dict of the Peft model.\n \"\"\"\n config = model.peft_config[adapter_name]\n state_dict = {}\n if model.modules_to_save is not None:\n for key, value in peft_model_state_dict.items():\n if any(module_name in key for module_name in model.modules_to_save):\n for module_name in model.modules_to_save:\n if module_name in key:\n key = key.replace(module_name, f\"{module_name}.modules_to_save.{adapter_name}\")\n break\n state_dict[key] = value\n else:\n state_dict = peft_model_state_dict\n\n #print(\"config.peft_type: \".format(config.peft_type))\n if config.peft_type in (PeftType.LORA, PeftType.ADALORA):\n peft_model_state_dict = {}\n for k, v in state_dict.items():\n if \"lora_\" in k:\n suffix = k.split(\"lora_\")[1]\n if \".\" in suffix:\n suffix_to_replace = \".\".join(suffix.split(\".\")[1:])\n k = k.replace(suffix_to_replace, f\"{adapter_name}.{suffix_to_replace}\")\n else:\n k = f\"{k}.{adapter_name}\"\n peft_model_state_dict[k] = v\n else:\n peft_model_state_dict[k] = v\n if config.peft_type == PeftType.ADALORA:\n rank_pattern = config.rank_pattern\n if rank_pattern is not None:\n model.resize_modules_by_rank_pattern(rank_pattern, adapter_name)\n elif isinstance(config, PromptLearningConfig):\n peft_model_state_dict = state_dict\n else:\n raise NotImplementedError\n\n model.load_state_dict(peft_model_state_dict, strict=False)\n #exit()\n if isinstance(config, PromptLearningConfig):\n model.prompt_encoder[adapter_name].embedding.load_state_dict(\n {\"weight\": peft_model_state_dict[\"prompt_embeddings\"]}, strict=True\n )" } ]
import inspect import os import warnings import torch from contextlib import contextmanager from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules from accelerate.utils import get_balanced_memory from huggingface_hub import hf_hub_download from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import PreTrainedModel from transformers.modeling_outputs import SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin from .tuners import AdaLoraModel, LoraModel, PrefixEncoder, PromptEmbedding, PromptEncoder from .utils import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftConfig, PeftType, PromptLearningConfig, TaskType, _set_adapter, _set_trainable, get_peft_model_state_dict, set_peft_model_state_dict, shift_tokens_right, ) from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
11,152
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding,
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding,
PeftType.P_TUNING: PromptEncoder,
2
2023-10-30 10:50:32+00:00
16k
chenran-li/RQL-release
sb3_contrib/trpo/trpo.py
[ { "identifier": "kl_divergence", "path": "stable_baselines3/common/distributions.py", "snippet": "def kl_divergence(dist_true: Distribution, dist_pred: Distribution) -> th.Tensor:\n \"\"\"\n Wrapper for the PyTorch implementation of the full form KL Divergence\n\n :param dist_true: the p distribution\n :param dist_pred: the q distribution\n :return: KL(dist_true||dist_pred)\n \"\"\"\n # KL Divergence for different distribution types is out of scope\n assert dist_true.__class__ == dist_pred.__class__, \"Error: input distributions should be the same type\"\n\n # MultiCategoricalDistribution is not a PyTorch Distribution subclass\n # so we need to implement it ourselves!\n if isinstance(dist_pred, MultiCategoricalDistribution):\n assert np.allclose(dist_pred.action_dims, dist_true.action_dims), \"Error: distributions must have the same input space\"\n return th.stack(\n [th.distributions.kl_divergence(p, q) for p, q in zip(dist_true.distribution, dist_pred.distribution)],\n dim=1,\n ).sum(dim=1)\n\n # Use the PyTorch kl_divergence implementation\n else:\n return th.distributions.kl_divergence(dist_true.distribution, dist_pred.distribution)" }, { "identifier": "OnPolicyAlgorithm", "path": "stable_baselines3/common/on_policy_algorithm.py", "snippet": "class OnPolicyAlgorithm(BaseAlgorithm):\n \"\"\"\n The base for On-Policy algorithms (ex: A2C/PPO).\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from (if registered in Gym, can be str)\n :param learning_rate: The learning rate, it can be a function\n of the current progress remaining (from 1 to 0)\n :param n_steps: The number of steps to run for each environment per update\n (i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)\n :param gamma: Discount factor\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator.\n Equivalent to classic advantage when set to 1.\n :param ent_coef: Entropy coefficient for the loss calculation\n :param vf_coef: Value function coefficient for the loss calculation\n :param max_grad_norm: The maximum value for the gradient clipping\n :param use_sde: Whether to use generalized State Dependent Exploration (gSDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param monitor_wrapper: When creating an environment, whether to wrap it\n or not in a Monitor wrapper.\n :param policy_kwargs: additional arguments to be passed to the policy on creation\n :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for\n debug messages\n :param seed: Seed for the pseudo random generators\n :param device: Device (cpu, cuda, ...) on which the code should be run.\n Setting it to auto, the code will be run on the GPU if possible.\n :param _init_setup_model: Whether or not to build the network at the creation of the instance\n :param supported_action_spaces: The action spaces supported by the algorithm.\n \"\"\"\n\n def __init__(\n self,\n policy: Union[str, Type[ActorCriticPolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule],\n n_steps: int,\n gamma: float,\n gae_lambda: float,\n ent_coef: float,\n vf_coef: float,\n max_grad_norm: float,\n use_sde: bool,\n sde_sample_freq: int,\n tensorboard_log: Optional[str] = None,\n monitor_wrapper: bool = True,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n verbose: int = 0,\n seed: Optional[int] = None,\n device: Union[th.device, str] = \"auto\",\n _init_setup_model: bool = True,\n supported_action_spaces: Optional[Tuple[spaces.Space, ...]] = None,\n ):\n\n super().__init__(\n policy=policy,\n env=env,\n learning_rate=learning_rate,\n policy_kwargs=policy_kwargs,\n verbose=verbose,\n device=device,\n use_sde=use_sde,\n sde_sample_freq=sde_sample_freq,\n support_multi_env=True,\n seed=seed,\n tensorboard_log=tensorboard_log,\n supported_action_spaces=supported_action_spaces,\n )\n\n self.n_steps = n_steps\n self.gamma = gamma\n self.gae_lambda = gae_lambda\n self.ent_coef = ent_coef\n self.vf_coef = vf_coef\n self.max_grad_norm = max_grad_norm\n self.rollout_buffer = None\n\n if _init_setup_model:\n self._setup_model()\n\n def _setup_model(self) -> None:\n self._setup_lr_schedule()\n self.set_random_seed(self.seed)\n\n buffer_cls = DictRolloutBuffer if isinstance(self.observation_space, spaces.Dict) else RolloutBuffer\n\n self.rollout_buffer = buffer_cls(\n self.n_steps,\n self.observation_space,\n self.action_space,\n device=self.device,\n gamma=self.gamma,\n gae_lambda=self.gae_lambda,\n n_envs=self.n_envs,\n )\n self.policy = self.policy_class( # pytype:disable=not-instantiable\n self.observation_space,\n self.action_space,\n self.lr_schedule,\n use_sde=self.use_sde,\n **self.policy_kwargs # pytype:disable=not-instantiable\n )\n self.policy = self.policy.to(self.device)\n\n def collect_rollouts(\n self,\n env: VecEnv,\n callback: BaseCallback,\n rollout_buffer: RolloutBuffer,\n n_rollout_steps: int,\n ) -> bool:\n \"\"\"\n Collect experiences using the current policy and fill a ``RolloutBuffer``.\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n\n :param env: The training environment\n :param callback: Callback that will be called at each step\n (and at the beginning and end of the rollout)\n :param rollout_buffer: Buffer to fill with rollouts\n :param n_rollout_steps: Number of experiences to collect per environment\n :return: True if function returned with at least `n_rollout_steps`\n collected, False if callback terminated rollout prematurely.\n \"\"\"\n assert self._last_obs is not None, \"No previous observation was provided\"\n # Switch to eval mode (this affects batch norm / dropout)\n self.policy.set_training_mode(False)\n\n n_steps = 0\n rollout_buffer.reset()\n # Sample new weights for the state dependent exploration\n if self.use_sde:\n self.policy.reset_noise(env.num_envs)\n\n callback.on_rollout_start()\n\n while n_steps < n_rollout_steps:\n if self.use_sde and self.sde_sample_freq > 0 and n_steps % self.sde_sample_freq == 0:\n # Sample a new noise matrix\n self.policy.reset_noise(env.num_envs)\n\n with th.no_grad():\n # Convert to pytorch tensor or to TensorDict\n obs_tensor = obs_as_tensor(self._last_obs, self.device)\n actions, values, log_probs = self.policy(obs_tensor)\n actions = actions.cpu().numpy()\n\n # Rescale and perform action\n clipped_actions = actions\n # Clip the actions to avoid out of bound error\n if isinstance(self.action_space, spaces.Box):\n clipped_actions = np.clip(actions, self.action_space.low, self.action_space.high)\n\n new_obs, rewards, dones, infos = env.step(clipped_actions)\n\n self.num_timesteps += env.num_envs\n\n # Give access to local variables\n callback.update_locals(locals())\n if callback.on_step() is False:\n return False\n\n self._update_info_buffer(infos)\n n_steps += 1\n\n if isinstance(self.action_space, spaces.Discrete):\n # Reshape in case of discrete action\n actions = actions.reshape(-1, 1)\n\n # Handle timeout by bootstraping with value function\n # see GitHub issue #633\n for idx, done in enumerate(dones):\n if (\n done\n and infos[idx].get(\"terminal_observation\") is not None\n and infos[idx].get(\"TimeLimit.truncated\", False)\n ):\n terminal_obs = self.policy.obs_to_tensor(infos[idx][\"terminal_observation\"])[0]\n with th.no_grad():\n terminal_value = self.policy.predict_values(terminal_obs)[0]\n rewards[idx] += self.gamma * terminal_value\n\n rollout_buffer.add(self._last_obs, actions, rewards, self._last_episode_starts, values, log_probs)\n self._last_obs = new_obs\n self._last_episode_starts = dones\n\n with th.no_grad():\n # Compute value for the last timestep\n values = self.policy.predict_values(obs_as_tensor(new_obs, self.device))\n\n rollout_buffer.compute_returns_and_advantage(last_values=values, dones=dones)\n\n callback.on_rollout_end()\n\n return True\n\n def train(self) -> None:\n \"\"\"\n Consume current rollout data and update policy parameters.\n Implemented by individual algorithms.\n \"\"\"\n raise NotImplementedError\n\n def learn(\n self: SelfOnPolicyAlgorithm,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 1,\n tb_log_name: str = \"OnPolicyAlgorithm\",\n reset_num_timesteps: bool = True,\n progress_bar: bool = False,\n ) -> SelfOnPolicyAlgorithm:\n iteration = 0\n\n total_timesteps, callback = self._setup_learn(\n total_timesteps,\n callback,\n reset_num_timesteps,\n tb_log_name,\n progress_bar,\n )\n\n callback.on_training_start(locals(), globals())\n\n while self.num_timesteps < total_timesteps:\n\n continue_training = self.collect_rollouts(self.env, callback, self.rollout_buffer, n_rollout_steps=self.n_steps)\n\n if continue_training is False:\n break\n\n iteration += 1\n self._update_current_progress_remaining(self.num_timesteps, total_timesteps)\n\n # Display training infos\n if log_interval is not None and iteration % log_interval == 0:\n time_elapsed = max((time.time_ns() - self.start_time) / 1e9, sys.float_info.epsilon)\n fps = int((self.num_timesteps - self._num_timesteps_at_start) / time_elapsed)\n self.logger.record(\"time/iterations\", iteration, exclude=\"tensorboard\")\n if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:\n self.logger.record(\"rollout/ep_rew_mean\", safe_mean([ep_info[\"r\"] for ep_info in self.ep_info_buffer]))\n self.logger.record(\"rollout/ep_len_mean\", safe_mean([ep_info[\"l\"] for ep_info in self.ep_info_buffer]))\n self.logger.record(\"time/fps\", fps)\n self.logger.record(\"time/time_elapsed\", int(time_elapsed), exclude=\"tensorboard\")\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n self.logger.dump(step=self.num_timesteps)\n\n self.train()\n\n callback.on_training_end()\n\n return self\n\n def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:\n state_dicts = [\"policy\", \"policy.optimizer\"]\n\n return state_dicts, []" }, { "identifier": "ActorCriticPolicy", "path": "stable_baselines3/common/policies.py", "snippet": "class ActorCriticPolicy(BasePolicy):\n \"\"\"\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n # TODO(antonin): update type annotation when we remove shared network support\n net_arch: Union[List[int], Dict[str, List[int]], List[Dict[str, List[int]]], None] = None,\n activation_fn: Type[nn.Module] = nn.Tanh,\n ortho_init: bool = True,\n use_sde: bool = False,\n log_std_init: float = 0.0,\n full_std: bool = True,\n use_expln: bool = False,\n squash_output: bool = False,\n features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n share_features_extractor: bool = True,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n\n if optimizer_kwargs is None:\n optimizer_kwargs = {}\n # Small values to avoid NaN in Adam optimizer\n if optimizer_class == th.optim.Adam:\n optimizer_kwargs[\"eps\"] = 1e-5\n\n super().__init__(\n observation_space,\n action_space,\n features_extractor_class,\n features_extractor_kwargs,\n optimizer_class=optimizer_class,\n optimizer_kwargs=optimizer_kwargs,\n squash_output=squash_output,\n normalize_images=normalize_images,\n )\n\n # Convert [dict()] to dict() as shared network are deprecated\n if isinstance(net_arch, list) and len(net_arch) > 0:\n if isinstance(net_arch[0], dict):\n warnings.warn(\n (\n \"As shared layers in the mlp_extractor are deprecated and will be removed in SB3 v1.8.0, \"\n \"you should now pass directly a dictionary and not a list \"\n \"(net_arch=dict(pi=..., vf=...) instead of net_arch=[dict(pi=..., vf=...)])\"\n ),\n )\n net_arch = net_arch[0]\n else:\n # Note: deprecation warning will be emitted\n # by the MlpExtractor constructor\n pass\n\n # Default network architecture, from stable-baselines\n if net_arch is None:\n if features_extractor_class == NatureCNN:\n net_arch = []\n else:\n net_arch = dict(pi=[64, 64], vf=[64, 64])\n\n self.net_arch = net_arch\n self.activation_fn = activation_fn\n self.ortho_init = ortho_init\n\n self.share_features_extractor = share_features_extractor\n self.features_extractor = self.make_features_extractor()\n self.features_dim = self.features_extractor.features_dim\n if self.share_features_extractor:\n self.pi_features_extractor = self.features_extractor\n self.vf_features_extractor = self.features_extractor\n else:\n self.pi_features_extractor = self.features_extractor\n self.vf_features_extractor = self.make_features_extractor()\n # if the features extractor is not shared, there cannot be shared layers in the mlp_extractor\n # TODO(antonin): update the check once we change net_arch behavior\n if isinstance(net_arch, list) and len(net_arch) > 0:\n raise ValueError(\n \"Error: if the features extractor is not shared, there cannot be shared layers in the mlp_extractor\"\n )\n\n self.log_std_init = log_std_init\n dist_kwargs = None\n # Keyword arguments for gSDE distribution\n if use_sde:\n dist_kwargs = {\n \"full_std\": full_std,\n \"squash_output\": squash_output,\n \"use_expln\": use_expln,\n \"learn_features\": False,\n }\n\n self.use_sde = use_sde\n self.dist_kwargs = dist_kwargs\n\n # Action distribution\n self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)\n\n self._build(lr_schedule)\n\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n data = super()._get_constructor_parameters()\n\n default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None)\n\n data.update(\n dict(\n net_arch=self.net_arch,\n activation_fn=self.activation_fn,\n use_sde=self.use_sde,\n log_std_init=self.log_std_init,\n squash_output=default_none_kwargs[\"squash_output\"],\n full_std=default_none_kwargs[\"full_std\"],\n use_expln=default_none_kwargs[\"use_expln\"],\n lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone\n ortho_init=self.ortho_init,\n optimizer_class=self.optimizer_class,\n optimizer_kwargs=self.optimizer_kwargs,\n features_extractor_class=self.features_extractor_class,\n features_extractor_kwargs=self.features_extractor_kwargs,\n )\n )\n return data\n\n def reset_noise(self, n_envs: int = 1) -> None:\n \"\"\"\n Sample new weights for the exploration matrix.\n\n :param n_envs:\n \"\"\"\n assert isinstance(self.action_dist, StateDependentNoiseDistribution), \"reset_noise() is only available when using gSDE\"\n self.action_dist.sample_weights(self.log_std, batch_size=n_envs)\n\n def _build_mlp_extractor(self) -> None:\n \"\"\"\n Create the policy and value networks.\n Part of the layers can be shared.\n \"\"\"\n # Note: If net_arch is None and some features extractor is used,\n # net_arch here is an empty list and mlp_extractor does not\n # really contain any layers (acts like an identity module).\n self.mlp_extractor = MlpExtractor(\n self.features_dim,\n net_arch=self.net_arch,\n activation_fn=self.activation_fn,\n device=self.device,\n )\n\n def _build(self, lr_schedule: Schedule) -> None:\n \"\"\"\n Create the networks and the optimizer.\n\n :param lr_schedule: Learning rate schedule\n lr_schedule(1) is the initial learning rate\n \"\"\"\n self._build_mlp_extractor()\n\n latent_dim_pi = self.mlp_extractor.latent_dim_pi\n\n if isinstance(self.action_dist, DiagGaussianDistribution):\n self.action_net, self.log_std = self.action_dist.proba_distribution_net(\n latent_dim=latent_dim_pi, log_std_init=self.log_std_init\n )\n elif isinstance(self.action_dist, StateDependentNoiseDistribution):\n self.action_net, self.log_std = self.action_dist.proba_distribution_net(\n latent_dim=latent_dim_pi, latent_sde_dim=latent_dim_pi, log_std_init=self.log_std_init\n )\n elif isinstance(self.action_dist, (CategoricalDistribution, MultiCategoricalDistribution, BernoulliDistribution)):\n self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)\n else:\n raise NotImplementedError(f\"Unsupported distribution '{self.action_dist}'.\")\n\n self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)\n # Init weights: use orthogonal initialization\n # with small initial weight for the output\n if self.ortho_init:\n # TODO: check for features_extractor\n # Values from stable-baselines.\n # features_extractor/mlp values are\n # originally from openai/baselines (default gains/init_scales).\n module_gains = {\n self.features_extractor: np.sqrt(2),\n self.mlp_extractor: np.sqrt(2),\n self.action_net: 0.01,\n self.value_net: 1,\n }\n if not self.share_features_extractor:\n # Note(antonin): this is to keep SB3 results\n # consistent, see GH#1148\n del module_gains[self.features_extractor]\n module_gains[self.pi_features_extractor] = np.sqrt(2)\n module_gains[self.vf_features_extractor] = np.sqrt(2)\n\n for module, gain in module_gains.items():\n module.apply(partial(self.init_weights, gain=gain))\n\n # Setup optimizer with initial learning rate\n self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)\n\n def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n \"\"\"\n Forward pass in all the networks (actor and critic)\n\n :param obs: Observation\n :param deterministic: Whether to sample or use deterministic actions\n :return: action, value and log probability of the action\n \"\"\"\n # Preprocess the observation if needed\n features = self.extract_features(obs)\n if self.share_features_extractor:\n latent_pi, latent_vf = self.mlp_extractor(features)\n else:\n pi_features, vf_features = features\n latent_pi = self.mlp_extractor.forward_actor(pi_features)\n latent_vf = self.mlp_extractor.forward_critic(vf_features)\n # Evaluate the values for the given observations\n values = self.value_net(latent_vf)\n distribution = self._get_action_dist_from_latent(latent_pi)\n actions = distribution.get_actions(deterministic=deterministic)\n log_prob = distribution.log_prob(actions)\n actions = actions.reshape((-1,) + self.action_space.shape)\n return actions, values, log_prob\n\n def extract_features(self, obs: th.Tensor) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:\n \"\"\"\n Preprocess the observation if needed and extract features.\n\n :param obs: Observation\n :return: the output of the features extractor(s)\n \"\"\"\n if self.share_features_extractor:\n return super().extract_features(obs, self.features_extractor)\n else:\n pi_features = super().extract_features(obs, self.pi_features_extractor)\n vf_features = super().extract_features(obs, self.vf_features_extractor)\n return pi_features, vf_features\n\n def _get_action_dist_from_latent(self, latent_pi: th.Tensor) -> Distribution:\n \"\"\"\n Retrieve action distribution given the latent codes.\n\n :param latent_pi: Latent code for the actor\n :return: Action distribution\n \"\"\"\n mean_actions = self.action_net(latent_pi)\n\n if isinstance(self.action_dist, DiagGaussianDistribution):\n return self.action_dist.proba_distribution(mean_actions, self.log_std)\n elif isinstance(self.action_dist, CategoricalDistribution):\n # Here mean_actions are the logits before the softmax\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, MultiCategoricalDistribution):\n # Here mean_actions are the flattened logits\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, BernoulliDistribution):\n # Here mean_actions are the logits (before rounding to get the binary actions)\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, StateDependentNoiseDistribution):\n return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_pi)\n else:\n raise ValueError(\"Invalid action distribution\")\n\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n return self.get_distribution(observation).get_actions(deterministic=deterministic)\n\n def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, Optional[th.Tensor]]:\n \"\"\"\n Evaluate actions according to the current policy,\n given the observations.\n\n :param obs: Observation\n :param actions: Actions\n :return: estimated value, log likelihood of taking those actions\n and entropy of the action distribution.\n \"\"\"\n # Preprocess the observation if needed\n features = self.extract_features(obs)\n if self.share_features_extractor:\n latent_pi, latent_vf = self.mlp_extractor(features)\n else:\n pi_features, vf_features = features\n latent_pi = self.mlp_extractor.forward_actor(pi_features)\n latent_vf = self.mlp_extractor.forward_critic(vf_features)\n distribution = self._get_action_dist_from_latent(latent_pi)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf)\n entropy = distribution.entropy()\n return values, log_prob, entropy\n\n def get_distribution(self, obs: th.Tensor) -> Distribution:\n \"\"\"\n Get the current policy distribution given the observations.\n\n :param obs:\n :return: the action distribution.\n \"\"\"\n features = super().extract_features(obs, self.pi_features_extractor)\n latent_pi = self.mlp_extractor.forward_actor(features)\n return self._get_action_dist_from_latent(latent_pi)\n\n def predict_values(self, obs: th.Tensor) -> th.Tensor:\n \"\"\"\n Get the estimated values according to the current policy given the observations.\n\n :param obs: Observation\n :return: the estimated values.\n \"\"\"\n features = super().extract_features(obs, self.vf_features_extractor)\n latent_vf = self.mlp_extractor.forward_critic(features)\n return self.value_net(latent_vf)" }, { "identifier": "BasePolicy", "path": "stable_baselines3/common/policies.py", "snippet": "class BasePolicy(BaseModel, ABC):\n \"\"\"The base policy object.\n\n Parameters are mostly the same as `BaseModel`; additions are documented below.\n\n :param args: positional arguments passed through to `BaseModel`.\n :param kwargs: keyword arguments passed through to `BaseModel`.\n :param squash_output: For continuous actions, whether the output is squashed\n or not using a ``tanh()`` function.\n \"\"\"\n\n def __init__(self, *args, squash_output: bool = False, **kwargs):\n super().__init__(*args, **kwargs)\n self._squash_output = squash_output\n\n @staticmethod\n def _dummy_schedule(progress_remaining: float) -> float:\n \"\"\"(float) Useful for pickling policy.\"\"\"\n del progress_remaining\n return 0.0\n\n @property\n def squash_output(self) -> bool:\n \"\"\"(bool) Getter for squash_output.\"\"\"\n return self._squash_output\n\n @staticmethod\n def init_weights(module: nn.Module, gain: float = 1) -> None:\n \"\"\"\n Orthogonal initialization (used in PPO and A2C)\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n nn.init.orthogonal_(module.weight, gain=gain)\n if module.bias is not None:\n module.bias.data.fill_(0.0)\n\n @abstractmethod\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n By default provides a dummy implementation -- not all BasePolicy classes\n implement this, e.g. if they are a Critic in an Actor-Critic method.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n \"\"\"\n Get the policy action from an observation (and optional hidden state).\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :param state: The last hidden states (can be None, used in recurrent policies)\n :param episode_start: The last masks (can be None, used in recurrent policies)\n this correspond to beginning of episodes,\n where the hidden states of the RNN must be reset.\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next hidden state\n (used in recurrent policies)\n \"\"\"\n # TODO (GH/1): add support for RNN policies\n # if state is None:\n # state = self.initial_state\n # if episode_start is None:\n # episode_start = [False for _ in range(self.n_envs)]\n # Switch to eval mode (this affects batch norm / dropout)\n self.set_training_mode(False)\n\n observation, vectorized_env = self.obs_to_tensor(observation)\n\n with th.no_grad():\n actions = self._predict(observation, deterministic=deterministic)\n # Convert to numpy, and reshape to the original action shape\n actions = actions.cpu().numpy().reshape((-1,) + self.action_space.shape)\n\n if isinstance(self.action_space, spaces.Box):\n if self.squash_output:\n # Rescale to proper domain when using squashing\n actions = self.unscale_action(actions)\n else:\n # Actions could be on arbitrary scale, so clip the actions to avoid\n # out of bound error (e.g. if sampling from a Gaussian distribution)\n actions = np.clip(actions, self.action_space.low, self.action_space.high)\n\n # Remove batch dimension if needed\n if not vectorized_env:\n actions = actions.squeeze(axis=0)\n\n return actions, state\n\n def scale_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [low, high] to [-1, 1]\n (no need for symmetric action space)\n\n :param action: Action to scale\n :return: Scaled action\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return 2.0 * ((action - low) / (high - low)) - 1.0\n\n def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [-1, 1] to [low, high]\n (no need for symmetric action space)\n\n :param scaled_action: Action to un-scale\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))" }, { "identifier": "GymEnv", "path": "stable_baselines3/common/type_aliases.py", "snippet": "class RolloutBufferSamples(NamedTuple):\nclass DictRolloutBufferSamples(NamedTuple):\nclass ReplayBufferSamples(NamedTuple):\nclass DictReplayBufferSamples(NamedTuple):\nclass RolloutReturn(NamedTuple):\nclass TrainFrequencyUnit(Enum):\nclass TrainFreq(NamedTuple):\nclass PolicyPredictor(Protocol):\n STEP = \"step\"\n EPISODE = \"episode\"\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:" }, { "identifier": "explained_variance", "path": "stable_baselines3/common/utils.py", "snippet": "def explained_variance(y_pred: np.ndarray, y_true: np.ndarray) -> np.ndarray:\n \"\"\"\n Computes fraction of variance that ypred explains about y.\n Returns 1 - Var[y-ypred] / Var[y]\n\n interpretation:\n ev=0 => might as well have predicted zero\n ev=1 => perfect prediction\n ev<0 => worse than just predicting zero\n\n :param y_pred: the prediction\n :param y_true: the expected value\n :return: explained variance of ypred and y\n \"\"\"\n assert y_true.ndim == 1 and y_pred.ndim == 1\n var_y = np.var(y_true)\n return np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y" }, { "identifier": "conjugate_gradient_solver", "path": "sb3_contrib/common/utils.py", "snippet": "def conjugate_gradient_solver(\n matrix_vector_dot_fn: Callable[[th.Tensor], th.Tensor],\n b,\n max_iter=10,\n residual_tol=1e-10,\n) -> th.Tensor:\n \"\"\"\n Finds an approximate solution to a set of linear equations Ax = b\n\n Sources:\n - https://github.com/ajlangley/trpo-pytorch/blob/master/conjugate_gradient.py\n - https://github.com/joschu/modular_rl/blob/master/modular_rl/trpo.py#L122\n\n Reference:\n - https://epubs.siam.org/doi/abs/10.1137/1.9781611971446.ch6\n\n :param matrix_vector_dot_fn:\n a function that right multiplies a matrix A by a vector v\n :param b:\n the right hand term in the set of linear equations Ax = b\n :param max_iter:\n the maximum number of iterations (default is 10)\n :param residual_tol:\n residual tolerance for early stopping of the solving (default is 1e-10)\n :return x:\n the approximate solution to the system of equations defined by `matrix_vector_dot_fn`\n and b\n \"\"\"\n\n # The vector is not initialized at 0 because of the instability issues when the gradient becomes small.\n # A small random gaussian noise is used for the initialization.\n x = 1e-4 * th.randn_like(b)\n residual = b - matrix_vector_dot_fn(x)\n # Equivalent to th.linalg.norm(residual) ** 2 (L2 norm squared)\n residual_squared_norm = th.matmul(residual, residual)\n\n if residual_squared_norm < residual_tol:\n # If the gradient becomes extremely small\n # The denominator in alpha will become zero\n # Leading to a division by zero\n return x\n\n p = residual.clone()\n\n for i in range(max_iter):\n # A @ p (matrix vector multiplication)\n A_dot_p = matrix_vector_dot_fn(p)\n\n alpha = residual_squared_norm / p.dot(A_dot_p)\n x += alpha * p\n\n if i == max_iter - 1:\n return x\n\n residual -= alpha * A_dot_p\n new_residual_squared_norm = th.matmul(residual, residual)\n\n if new_residual_squared_norm < residual_tol:\n return x\n\n beta = new_residual_squared_norm / residual_squared_norm\n residual_squared_norm = new_residual_squared_norm\n p = residual + beta * p\n # Note: this return statement is only used when max_iter=0\n return x" }, { "identifier": "flat_grad", "path": "sb3_contrib/common/utils.py", "snippet": "def flat_grad(\n output,\n parameters: Sequence[nn.parameter.Parameter],\n create_graph: bool = False,\n retain_graph: bool = False,\n) -> th.Tensor:\n \"\"\"\n Returns the gradients of the passed sequence of parameters into a flat gradient.\n Order of parameters is preserved.\n\n :param output: functional output to compute the gradient for\n :param parameters: sequence of ``Parameter``\n :param retain_graph: – If ``False``, the graph used to compute the grad will be freed.\n Defaults to the value of ``create_graph``.\n :param create_graph: – If ``True``, graph of the derivative will be constructed,\n allowing to compute higher order derivative products. Default: ``False``.\n :return: Tensor containing the flattened gradients\n \"\"\"\n grads = th.autograd.grad(\n output,\n parameters,\n create_graph=create_graph,\n retain_graph=retain_graph,\n allow_unused=True,\n )\n return th.cat([th.ravel(grad) for grad in grads if grad is not None])" }, { "identifier": "CnnPolicy", "path": "sb3_contrib/trpo/policies.py", "snippet": "" } ]
import copy import warnings import numpy as np import torch as th from functools import partial from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union from gym import spaces from stable_baselines3.common.distributions import kl_divergence from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm from stable_baselines3.common.policies import ActorCriticPolicy, BasePolicy from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutBufferSamples, Schedule from stable_baselines3.common.utils import explained_variance from torch import nn from torch.nn import functional as F from sb3_contrib.common.utils import conjugate_gradient_solver, flat_grad from sb3_contrib.trpo.policies import CnnPolicy, MlpPolicy, MultiInputPolicy
11,945
actor_params, policy_objective_gradients, grad_kl, grad_shape = self._compute_actor_grad(kl_div, policy_objective) # Hessian-vector dot product function used in the conjugate gradient step hessian_vector_product_fn = partial(self.hessian_vector_product, actor_params, grad_kl) # Computing search direction search_direction = conjugate_gradient_solver( hessian_vector_product_fn, policy_objective_gradients, max_iter=self.cg_max_steps, ) # Maximal step length line_search_max_step_size = 2 * self.target_kl line_search_max_step_size /= th.matmul( search_direction, hessian_vector_product_fn(search_direction, retain_graph=False) ) line_search_max_step_size = th.sqrt(line_search_max_step_size) line_search_backtrack_coeff = 1.0 original_actor_params = [param.detach().clone() for param in actor_params] is_line_search_success = False with th.no_grad(): # Line-search (backtracking) for _ in range(self.line_search_max_iter): start_idx = 0 # Applying the scaled step direction for param, original_param, shape in zip(actor_params, original_actor_params, grad_shape): n_params = param.numel() param.data = ( original_param.data + line_search_backtrack_coeff * line_search_max_step_size * search_direction[start_idx : (start_idx + n_params)].view(shape) ) start_idx += n_params # Recomputing the policy log-probabilities distribution = self.policy.get_distribution(rollout_data.observations) log_prob = distribution.log_prob(actions) # New policy objective ratio = th.exp(log_prob - rollout_data.old_log_prob) new_policy_objective = (advantages * ratio).mean() # New KL-divergence kl_div = kl_divergence(distribution, old_distribution).mean() # Constraint criteria: # we need to improve the surrogate policy objective # while being close enough (in term of kl div) to the old policy if (kl_div < self.target_kl) and (new_policy_objective > policy_objective): is_line_search_success = True break # Reducing step size if line-search wasn't successful line_search_backtrack_coeff *= self.line_search_shrinking_factor line_search_results.append(is_line_search_success) if not is_line_search_success: # If the line-search wasn't successful we revert to the original parameters for param, original_param in zip(actor_params, original_actor_params): param.data = original_param.data.clone() policy_objective_values.append(policy_objective.item()) kl_divergences.append(0) else: policy_objective_values.append(new_policy_objective.item()) kl_divergences.append(kl_div.item()) # Critic update for _ in range(self.n_critic_updates): for rollout_data in self.rollout_buffer.get(self.batch_size): values_pred = self.policy.predict_values(rollout_data.observations) value_loss = F.mse_loss(rollout_data.returns, values_pred.flatten()) value_losses.append(value_loss.item()) self.policy.optimizer.zero_grad() value_loss.backward() # Removing gradients of parameters shared with the actor # otherwise it defeats the purposes of the KL constraint for param in actor_params: param.grad = None self.policy.optimizer.step() self._n_updates += 1 explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten()) # Logs self.logger.record("train/policy_objective", np.mean(policy_objective_values)) self.logger.record("train/value_loss", np.mean(value_losses)) self.logger.record("train/kl_divergence_loss", np.mean(kl_divergences)) self.logger.record("train/explained_variance", explained_var) self.logger.record("train/is_line_search_success", np.mean(line_search_results)) if hasattr(self.policy, "log_std"): self.logger.record("train/std", th.exp(self.policy.log_std).mean().item()) self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard") def hessian_vector_product( self, params: List[nn.Parameter], grad_kl: th.Tensor, vector: th.Tensor, retain_graph: bool = True ) -> th.Tensor: """ Computes the matrix-vector product with the Fisher information matrix. :param params: list of parameters used to compute the Hessian :param grad_kl: flattened gradient of the KL divergence between the old and new policy :param vector: vector to compute the dot product the hessian-vector dot product with :param retain_graph: if True, the graph will be kept after computing the Hessian :return: Hessian-vector dot product (with damping) """ jacobian_vector_product = (grad_kl * vector).sum() return flat_grad(jacobian_vector_product, params, retain_graph=retain_graph) + self.cg_damping * vector def learn( self: SelfTRPO, total_timesteps: int,
SelfTRPO = TypeVar("SelfTRPO", bound="TRPO") class TRPO(OnPolicyAlgorithm): """ Trust Region Policy Optimization (TRPO) Paper: https://arxiv.org/abs/1502.05477 Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/) and Stable Baselines (TRPO from https://github.com/hill-a/stable-baselines) Introduction to TRPO: https://spinningup.openai.com/en/latest/algorithms/trpo.html :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) :param learning_rate: The learning rate for the value function, it can be a function of the current progress remaining (from 1 to 0) :param n_steps: The number of steps to run for each environment per update (i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel) NOTE: n_steps * n_envs must be greater than 1 (because of the advantage normalization) See https://github.com/pytorch/pytorch/issues/29372 :param batch_size: Minibatch size for the value function :param gamma: Discount factor :param cg_max_steps: maximum number of steps in the Conjugate Gradient algorithm for computing the Hessian vector product :param cg_damping: damping in the Hessian vector product computation :param line_search_shrinking_factor: step-size reduction factor for the line-search (i.e., ``theta_new = theta + alpha^i * step``) :param line_search_max_iter: maximum number of iteration for the backtracking line-search :param n_critic_updates: number of critic updates per policy update :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator :param use_sde: Whether to use generalized State Dependent Exploration (gSDE) instead of action noise exploration (default: False) :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE Default: -1 (only sample at the beginning of the rollout) :param normalize_advantage: Whether to normalize or not the advantage :param target_kl: Target Kullback-Leibler divergence between updates. Should be small for stability. Values like 0.01, 0.05. :param sub_sampling_factor: Sub-sample the batch to make computation faster see p40-42 of John Schulman thesis http://joschu.net/docs/thesis.pdf :param tensorboard_log: the log location for tensorboard (if None, no logging) :param policy_kwargs: additional arguments to be passed to the policy on creation :param verbose: the verbosity level: 0 no output, 1 info, 2 debug :param seed: Seed for the pseudo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy, "CnnPolicy": CnnPolicy, "MultiInputPolicy": MultiInputPolicy, } def __init__( self, policy: Union[str, Type[ActorCriticPolicy]], env: Union[GymEnv, str], learning_rate: Union[float, Schedule] = 1e-3, n_steps: int = 2048, batch_size: int = 128, gamma: float = 0.99, cg_max_steps: int = 15, cg_damping: float = 0.1, line_search_shrinking_factor: float = 0.8, line_search_max_iter: int = 10, n_critic_updates: int = 10, gae_lambda: float = 0.95, use_sde: bool = False, sde_sample_freq: int = -1, normalize_advantage: bool = True, target_kl: float = 0.01, sub_sampling_factor: int = 1, tensorboard_log: Optional[str] = None, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 0, seed: Optional[int] = None, device: Union[th.device, str] = "auto", _init_setup_model: bool = True, ): super().__init__( policy, env, learning_rate=learning_rate, n_steps=n_steps, gamma=gamma, gae_lambda=gae_lambda, ent_coef=0.0, # entropy bonus is not used by TRPO vf_coef=0.0, # value function is optimized separately max_grad_norm=0.0, use_sde=use_sde, sde_sample_freq=sde_sample_freq, tensorboard_log=tensorboard_log, policy_kwargs=policy_kwargs, verbose=verbose, device=device, seed=seed, _init_setup_model=False, supported_action_spaces=( spaces.Box, spaces.Discrete, spaces.MultiDiscrete, spaces.MultiBinary, ), ) self.normalize_advantage = normalize_advantage # Sanity check, otherwise it will lead to noisy gradient and NaN # because of the advantage normalization if self.env is not None: # Check that `n_steps * n_envs > 1` to avoid NaN # when doing advantage normalization buffer_size = self.env.num_envs * self.n_steps if normalize_advantage: assert buffer_size > 1, ( "`n_steps * n_envs` must be greater than 1. " f"Currently n_steps={self.n_steps} and n_envs={self.env.num_envs}" ) # Check that the rollout buffer size is a multiple of the mini-batch size untruncated_batches = buffer_size // batch_size if buffer_size % batch_size > 0: warnings.warn( f"You have specified a mini-batch size of {batch_size}," f" but because the `RolloutBuffer` is of size `n_steps * n_envs = {buffer_size}`," f" after every {untruncated_batches} untruncated mini-batches," f" there will be a truncated mini-batch of size {buffer_size % batch_size}\n" f"We recommend using a `batch_size` that is a factor of `n_steps * n_envs`.\n" f"Info: (n_steps={self.n_steps} and n_envs={self.env.num_envs})" ) self.batch_size = batch_size # Conjugate gradients parameters self.cg_max_steps = cg_max_steps self.cg_damping = cg_damping # Backtracking line search parameters self.line_search_shrinking_factor = line_search_shrinking_factor self.line_search_max_iter = line_search_max_iter self.target_kl = target_kl self.n_critic_updates = n_critic_updates self.sub_sampling_factor = sub_sampling_factor if _init_setup_model: self._setup_model() def _compute_actor_grad( self, kl_div: th.Tensor, policy_objective: th.Tensor ) -> Tuple[List[nn.Parameter], th.Tensor, th.Tensor, List[Tuple[int, ...]]]: """ Compute actor gradients for kl div and surrogate objectives. :param kl_div: The KL divergence objective :param policy_objective: The surrogate objective ("classic" policy gradient) :return: List of actor params, gradients and gradients shape. """ # This is necessary because not all the parameters in the policy have gradients w.r.t. the KL divergence # The policy objective is also called surrogate objective policy_objective_gradients = [] # Contains the gradients of the KL divergence grad_kl = [] # Contains the shape of the gradients of the KL divergence w.r.t each parameter # This way the flattened gradient can be reshaped back into the original shapes and applied to # the parameters grad_shape = [] # Contains the parameters which have non-zeros KL divergence gradients # The list is used during the line-search to apply the step to each parameters actor_params = [] for name, param in self.policy.named_parameters(): # Skip parameters related to value function based on name # this work for built-in policies only (not custom ones) if "value" in name: continue # For each parameter we compute the gradient of the KL divergence w.r.t to that parameter kl_param_grad, *_ = th.autograd.grad( kl_div, param, create_graph=True, retain_graph=True, allow_unused=True, only_inputs=True, ) # If the gradient is not zero (not None), we store the parameter in the actor_params list # and add the gradient and its shape to grad_kl and grad_shape respectively if kl_param_grad is not None: # If the parameter impacts the KL divergence (i.e. the policy) # we compute the gradient of the policy objective w.r.t to the parameter # this avoids computing the gradient if it's not going to be used in the conjugate gradient step policy_objective_grad, *_ = th.autograd.grad(policy_objective, param, retain_graph=True, only_inputs=True) grad_shape.append(kl_param_grad.shape) grad_kl.append(kl_param_grad.reshape(-1)) policy_objective_gradients.append(policy_objective_grad.reshape(-1)) actor_params.append(param) # Gradients are concatenated before the conjugate gradient step policy_objective_gradients = th.cat(policy_objective_gradients) grad_kl = th.cat(grad_kl) return actor_params, policy_objective_gradients, grad_kl, grad_shape def train(self) -> None: """ Update policy using the currently gathered rollout buffer. """ # Switch to train mode (this affects batch norm / dropout) self.policy.set_training_mode(True) # Update optimizer learning rate self._update_learning_rate(self.policy.optimizer) policy_objective_values = [] kl_divergences = [] line_search_results = [] value_losses = [] # This will only loop once (get all data in one go) for rollout_data in self.rollout_buffer.get(batch_size=None): # Optional: sub-sample data for faster computation if self.sub_sampling_factor > 1: rollout_data = RolloutBufferSamples( rollout_data.observations[:: self.sub_sampling_factor], rollout_data.actions[:: self.sub_sampling_factor], None, # old values, not used here rollout_data.old_log_prob[:: self.sub_sampling_factor], rollout_data.advantages[:: self.sub_sampling_factor], None, # returns, not used here ) actions = rollout_data.actions if isinstance(self.action_space, spaces.Discrete): # Convert discrete action from float to long actions = rollout_data.actions.long().flatten() # Re-sample the noise matrix because the log_std has changed if self.use_sde: # batch_size is only used for the value function self.policy.reset_noise(actions.shape[0]) with th.no_grad(): # Note: is copy enough, no need for deepcopy? # If using gSDE and deepcopy, we need to use `old_distribution.distribution` # directly to avoid PyTorch errors. old_distribution = copy.copy(self.policy.get_distribution(rollout_data.observations)) distribution = self.policy.get_distribution(rollout_data.observations) log_prob = distribution.log_prob(actions) advantages = rollout_data.advantages if self.normalize_advantage: advantages = (advantages - advantages.mean()) / (rollout_data.advantages.std() + 1e-8) # ratio between old and new policy, should be one at the first iteration ratio = th.exp(log_prob - rollout_data.old_log_prob) # surrogate policy objective policy_objective = (advantages * ratio).mean() # KL divergence kl_div = kl_divergence(distribution, old_distribution).mean() # Surrogate & KL gradient self.policy.optimizer.zero_grad() actor_params, policy_objective_gradients, grad_kl, grad_shape = self._compute_actor_grad(kl_div, policy_objective) # Hessian-vector dot product function used in the conjugate gradient step hessian_vector_product_fn = partial(self.hessian_vector_product, actor_params, grad_kl) # Computing search direction search_direction = conjugate_gradient_solver( hessian_vector_product_fn, policy_objective_gradients, max_iter=self.cg_max_steps, ) # Maximal step length line_search_max_step_size = 2 * self.target_kl line_search_max_step_size /= th.matmul( search_direction, hessian_vector_product_fn(search_direction, retain_graph=False) ) line_search_max_step_size = th.sqrt(line_search_max_step_size) line_search_backtrack_coeff = 1.0 original_actor_params = [param.detach().clone() for param in actor_params] is_line_search_success = False with th.no_grad(): # Line-search (backtracking) for _ in range(self.line_search_max_iter): start_idx = 0 # Applying the scaled step direction for param, original_param, shape in zip(actor_params, original_actor_params, grad_shape): n_params = param.numel() param.data = ( original_param.data + line_search_backtrack_coeff * line_search_max_step_size * search_direction[start_idx : (start_idx + n_params)].view(shape) ) start_idx += n_params # Recomputing the policy log-probabilities distribution = self.policy.get_distribution(rollout_data.observations) log_prob = distribution.log_prob(actions) # New policy objective ratio = th.exp(log_prob - rollout_data.old_log_prob) new_policy_objective = (advantages * ratio).mean() # New KL-divergence kl_div = kl_divergence(distribution, old_distribution).mean() # Constraint criteria: # we need to improve the surrogate policy objective # while being close enough (in term of kl div) to the old policy if (kl_div < self.target_kl) and (new_policy_objective > policy_objective): is_line_search_success = True break # Reducing step size if line-search wasn't successful line_search_backtrack_coeff *= self.line_search_shrinking_factor line_search_results.append(is_line_search_success) if not is_line_search_success: # If the line-search wasn't successful we revert to the original parameters for param, original_param in zip(actor_params, original_actor_params): param.data = original_param.data.clone() policy_objective_values.append(policy_objective.item()) kl_divergences.append(0) else: policy_objective_values.append(new_policy_objective.item()) kl_divergences.append(kl_div.item()) # Critic update for _ in range(self.n_critic_updates): for rollout_data in self.rollout_buffer.get(self.batch_size): values_pred = self.policy.predict_values(rollout_data.observations) value_loss = F.mse_loss(rollout_data.returns, values_pred.flatten()) value_losses.append(value_loss.item()) self.policy.optimizer.zero_grad() value_loss.backward() # Removing gradients of parameters shared with the actor # otherwise it defeats the purposes of the KL constraint for param in actor_params: param.grad = None self.policy.optimizer.step() self._n_updates += 1 explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten()) # Logs self.logger.record("train/policy_objective", np.mean(policy_objective_values)) self.logger.record("train/value_loss", np.mean(value_losses)) self.logger.record("train/kl_divergence_loss", np.mean(kl_divergences)) self.logger.record("train/explained_variance", explained_var) self.logger.record("train/is_line_search_success", np.mean(line_search_results)) if hasattr(self.policy, "log_std"): self.logger.record("train/std", th.exp(self.policy.log_std).mean().item()) self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard") def hessian_vector_product( self, params: List[nn.Parameter], grad_kl: th.Tensor, vector: th.Tensor, retain_graph: bool = True ) -> th.Tensor: """ Computes the matrix-vector product with the Fisher information matrix. :param params: list of parameters used to compute the Hessian :param grad_kl: flattened gradient of the KL divergence between the old and new policy :param vector: vector to compute the dot product the hessian-vector dot product with :param retain_graph: if True, the graph will be kept after computing the Hessian :return: Hessian-vector dot product (with damping) """ jacobian_vector_product = (grad_kl * vector).sum() return flat_grad(jacobian_vector_product, params, retain_graph=retain_graph) + self.cg_damping * vector def learn( self: SelfTRPO, total_timesteps: int,
callback: MaybeCallback = None,
4
2023-10-28 01:09:21+00:00
16k
tobagin/whakarere
whakarere/windows/whakarere.py
[ { "identifier": "ConfigManager", "path": "whakarere/managers/config.py", "snippet": "class ConfigManager:\n def __init__(self, window):\n self.window = window\n self.config = {}\n self.config_file_path = os.path.expanduser(\"~/.config/whakarere/config.json\")\n atexit.register(self.save_config)\n\n def load_config(self):\n if os.path.exists(self.config_file_path):\n with open(self.config_file_path, \"r\") as f:\n self.config = json.load(f)\n\n def save_config(self):\n with open(self.config_file_path, \"w\") as f:\n json.dump(self.config, f)\n \n def set_config(self, key, value):\n self.config[key] = value\n\n def get_config(self, key):\n return self.config.get(key)" }, { "identifier": "SessionManager", "path": "whakarere/managers/session.py", "snippet": "class SessionManager:\n def __init__(self, window):\n self.window = window\n api_key = \"your_global_api_key_here\"\n self.api_url = \"http://localhost:3000\"\n self.headers = { 'x-api-key': api_key }\n self.current_session_id = None\n self.session_ids = []\n\n def add_session(self, session_id):\n if session_id not in self.session_ids:\n if self.check_session_id(session_id):\n self.session_ids.append(session_id)\n self.save_session_ids()\n else:\n self.terminate_session(session_id)\n session_id = self.add_session(self.generate_session_id())\n self.check_session_id(session_id)\n self.session_ids.append(session_id)\n self.save_session_ids()\n\n def remove_session(self, session_id):\n if session_id in self.session_ids:\n self.session_ids.remove(session_id)\n self.save_session_ids()\n if not self.check_session_status(session_id):\n self.terminate_session(session_id)\n\n def get_session_ids_size(self):\n return len(self.session_ids)\n\n def generate_session_id(self):\n return str(uuid.uuid4())\n\n def get_session(self, session_id):\n return self.session_ids.get(session_id)\n \n def set_current_session(self, session_id):\n self.current_session_id = session_id\n \n def get_current_session(self):\n return self.current_session_id\n\n def clear_current_session(self):\n self.current_session_id = None\n\n def get_session_ids(self):\n return self.session_ids\n \n def load_sessions(self):\n self.session_ids = self.window.config_manager.get_config(\"session_ids\")\n if self.session_ids is None:\n self.session_ids = []\n \n def save_session_ids(self):\n self.window.config_manager.set_config(\"session_ids\", self.session_ids)\n self.window.config_manager.save_config()\n \n def get_current_session_user_id(self):\n return self.window.whatsapp_manager.get_user_id(self.current_session_id)\n \n def check_session_status(self, session_id):\n url = self.api_url + f'/session/status/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"check_session_status: \" + str(result))\n \n return result \n\n def check_session_id(self, session_id):\n url = self.api_url + f'/session/start/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"check_session_id: \" + str(result))\n \n return result \n\n def terminate_session(self, session_id):\n url = self.api_url + f'/session/terminate/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"terminate_session: \" + str(result))\n \n return result \n \n def terminate_inactive_sessions(self):\n url = self.api_url + f'/session/terminateInactive'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"terminate_inactive_sessions: \" + str(result))\n \n return result \n\n def terminate_all_sessions(self, test=False):\n url = self.api_url + f'/session/terminateAll'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"terminate_inactive_sessions: \" + str(result))\n \n return result " }, { "identifier": "WhatsAppSessionManager", "path": "whakarere/managers/whatsapp.py", "snippet": "class WhatsAppSessionManager:\n def __init__(self, window):\n self.window = window\n api_key = \"your_global_api_key_here\"\n self.api_url = \"http://localhost:3000\"\n self.headers = { 'x-api-key': api_key }\n self.whatsapp_messenger_pages = {}\n self.chats = {} # Changed to a dictionary to map session IDs to chats\n self.chats_avatar = {} # Presumably for future functionality\n self.databases = {} # Changed to a dictionary to map session IDs to databases\n self.chat_messages = {} # Presumably for future functionality\n self.number = 0\n\n def load_or_create_databases(self):\n db_directory = os.path.expanduser(\"~/.config/whakarere/dbs\")\n\n # Ensure the database directory exists\n if not os.path.exists(db_directory):\n os.makedirs(db_directory)\n\n for session_id in self.window.session_manager.session_ids:\n db_file = f\"{session_id}.db\"\n db_path = os.path.join(db_directory, db_file)\n\n # Connect to the SQLite database\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n\n # Store the connection in the dictionary\n self.databases[session_id] = conn\n\n # Close the cursor\n cursor.close()\n\n def initialize(self):\n sessions_thread = threading.Thread(target=self.initialize_sessions)\n sessions_thread.start()\n\n def initialize_sessions(self):\n for session in self.window.session_manager.session_ids:\n if self.window.session_manager.check_session_status(session):\n result = self.get_chats(session) # Fixed assignment\n self.chats[session] = result # Store chats indexed by session ID\n for chat in result:\n chat_id = chat[\"id\"][\"_serialized\"]\n if chat[\"isGroup\"]:\n print(chat_id)\n try:\n self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session)\n except: \n trimmed_chat_id = chat_id[-15:]\n print(trimmed_chat_id)\n self.chats[trimmed_chat_id] = self.chat_fetch_messages(trimmed_chat_id, session)\n else:\n self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session)\n self.chats_avatar[chat_id] = self.get_user_profile_picture(chat_id, session)\n self.window.whatsapp_manager.add_whatsapp_messenger_page(session)\n\n def initialize_session_by_id(self, session_id):\n if self.window.session_manager.check_session_status(session_id):\n result = self.get_chats(session_id) # Fixed assignment\n self.chats[session_id] = result # Store chats indexed by session ID\n for chat in result:\n chat_id = chat[\"id\"][\"_serialized\"]\n if chat[\"isGroup\"]:\n print(chat_id)\n try:\n self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session_id)\n except: \n trimmed_chat_id = chat_id[-15:]\n print(trimmed_chat_id)\n self.chats[trimmed_chat_id] = self.chat_fetch_messages(trimmed_chat_id, session_id)\n else:\n self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session_id)\n self.chats_avatar[chat_id] = self.get_user_profile_picture(chat_id, session_id)\n if session_id not in self.whatsapp_sessions_pages:\n self.whatsapp_sessions_pages[session_id] = WhatsappMessengerPage(self, session_id)\n\n def navigate_to_whatsapp_messenger_page(self, session_id):\n # make it so it checks for for already open session on whatsapp_sessions_pages\n # if it has one and if doesn´t it creates a new one and pushes into the whatsapp_sessions_pages\n if session_id in self.whatsapp_sessions_pages:\n self.main_window.navigation_view.push(self.whatsapp_sessions_pages[session_id])\n else:\n self.add_whatsapp_messenger_page(session_id)\n self.main_window.navigation_view.push(self.whatsapp_sessions_pages[session_id])\n\n ############################\n # Chat methods\n ############################\n\n def get_chats(self, session_id):\n url = self.api_url + f'/client/getChats/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"chats\"]\n\n if(self.window.is_debug()):\n print(\"get_chats: \" + str(result))\n \n return result \n \n def chat_fetch_messages(self, chat_id, session_id):\n url = self.api_url + f'/chat/fetchMessages/{session_id}'\n result = requests.post(url, headers=self.headers, json={'chatId': chat_id})\n if(self.number == 3):\n print(result)\n\n json = result.json()\n\n if(self.window.is_debug()):\n print(\"get_chat_messages: \" + str(result))\n\n if(self.number == 3):\n print(json) \n self.number += 1\n \n return result \n\n def get_chats_by_id(self, session_id):\n return self.chats.get(session_id, [])\n\n def get_chat_avatar(self, chat_id):\n url = self.chats_avatar.get(chat_id, None)\n if url is not None:\n response = requests.get(url)\n loader = GdkPixbuf.PixbufLoader()\n loader.write(response.content)\n loader.close()\n return Gdk.Texture.new_for_pixbuf(loader.get_pixbuf())\n else:\n binary_data = base64.b64decode(UnknownContact.base64image)\n gbytes = GLib.Bytes.new(binary_data)\n input_stream = Gio.MemoryInputStream.new_from_bytes(gbytes)\n pixbuf = GdkPixbuf.Pixbuf.new_from_stream(input_stream, None)\n return Gdk.Texture.new_for_pixbuf(pixbuf)\n \n def get_user_profile_picture(self, userid, session_id):\n url = self.api_url + f'/client/getProfilePicUrl/{session_id}'\n try:\n result = requests.post(url, headers=self.headers, json={'contactId': userid}).json()[\"result\"]\n except:\n result = None\n\n if(self.window.is_debug()):\n print(\"get_user_profile_picture: \" + str(result))\n \n return result \n\n def get_user_id(self, session_id):\n url = self.api_url + f'/client/getClassInfo/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"sessionInfo\"][\"wid\"][\"_serialized\"] # Extract userid\n\n if(self.window.is_debug()):\n print(\"get_user_id: \" + str(result))\n \n return result \n\n def get_user_name(self, session_id):\n url = self.api_url + f'/client/getClassInfo/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"sessionInfo\"][\"pushname\"] # Return pushname\n\n if(self.window.is_debug()):\n print(\"get_user_name: \" + str(result))\n \n return result \n\n ############################\n # Contact methods\n ############################\n\n def get_contact_info(self, contact_id, session_id):\n url = self.api_url + f'/contact/getClassInfo/{session_id}'\n result = requests.post(url, headers=self.headers, json={'contactId': contact_id}).json()\n print(result)\n if(self.window.is_debug()):\n print(\"get_contact_info: \" + str(result))\n \n return result" }, { "identifier": "WindowTitlebarWidget", "path": "whakarere/widgets/titlebar.py", "snippet": "class WindowTitlebarWidget(Gtk.Box):\n def __init__(self):\n super().__init__(orientation=Gtk.Orientation.VERTICAL, spacing=2)\n self.label_title = Gtk.Label(label=\"Whakarere\")\n self.label_title.add_css_class(\"title\")\n self.label_subtitle = Gtk.Label(label=\"Available Sessions\")\n self.label_subtitle.add_css_class(\"subtitle\")\n self.append(self.label_title)\n self.append(self.label_subtitle)\n\n def set_title(self, title):\n self.label_title.set_label(title)\n\n def set_subtitle(self, subtitle):\n self.label_subtitle.set_label(subtitle)" }, { "identifier": "MainMenuButtonWidget", "path": "whakarere/widgets/main_menu.py", "snippet": "class MainMenuButtonWidget(Gtk.MenuButton):\n def __init__(self):\n super().__init__()\n # Create MainMenu Button Widget\n self.set_icon_name(\"open-menu-symbolic\")\n self.set_tooltip_text(\"Main Menu\")\n self.set_has_frame(False)\n self.set_direction(Gtk.ArrowType.DOWN)\n self.set_popover(Gtk.Popover())\n self.get_popover().set_position(Gtk.PositionType.BOTTOM)\n self.get_popover().set_has_arrow(True)\n self.get_popover().set_size_request(200, 200)\n self.get_popover().set_child(Gtk.Label(label=\"Main Menu\"))\n \n # About Button\n about_button = Gtk.Button()\n about_button.set_label(\"About Whakarere\")\n about_button.set_has_frame(False)\n about_button.connect(\"clicked\", self.on_about_clicked)\n \n # Keyboard Shortcuts Button\n shortcut_button = Gtk.Button()\n shortcut_button.set_label(\"Keyboard Shortcuts\")\n shortcut_button.set_has_frame(False)\n shortcut_button.connect(\"clicked\", self.on_shortcuts_clicked)\n \n # Preferences Button\n preferences_button = Gtk.Button()\n preferences_button.set_label(\"Preferences\")\n preferences_button.set_has_frame(False)\n preferences_button.connect(\"clicked\", self.on_preferences_clicked)\n\n settings_menu = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n separetor = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)\n settings_menu.append(separetor)\n settings_menu.append(preferences_button)\n settings_menu.append(shortcut_button)\n settings_menu.append(about_button)\n\n self.get_popover().set_child(settings_menu)\n\n def on_about_clicked(self, button):\n about_window = Adw.AboutWindow(modal=True, transient_for=self)\n about_window.set_application_icon(\"com.mudeprolinux.whakarere\")\n about_window.set_application_name(\"Whakarere\")\n about_window.set_version(\"0.1.0\")\n #about_window.set_comments(\"A Gtk4 Whatsapp Client.\")\n about_window.set_website(\"https://mudeprolinux.com\")\n about_window.set_developer_name(\"Mude Pro Linux\")\n about_window.set_developers([\"Thiago Fernandes <[email protected]>\"])\n about_window.set_designers([\"Thiago Fernandes <[email protected]>\"])\n about_window.set_license_type(Gtk.License.MIT_X11)\n about_window.set_copyright(\"2023 © Mude Pro Linux\")\n about_window.set_issue_url(\"https://github.com/tobagin/whakarere/issues\")\n\n # Show the About window\n about_window.present()\n \n def on_shortcuts_clicked(self, button):\n shortcuts_window = Gtk.ShortcutsWindow(modal=True, transient_for=self)\n shortcuts_section = Gtk.ShortcutsSection()\n shortcuts_group = Gtk.ShortcutsGroup()\n shortcuts_section.add_group(shortcuts_group)\n shortcuts_window.add_session(shortcuts_section)\n copy_shortcut = Gtk.Shortcut.new_from_string(\"<Ctrl>C\", Gtk.Label.new(\"Copy Selected Text\"))\n shortcuts_group.add(copy_shortcut)\n shortcuts_window.show()\n\n def on_preferences_clicked(self, button):\n pass" }, { "identifier": "SessionManagerPage", "path": "whakarere/pages/session.py", "snippet": "class SessionManagerPage(Adw.NavigationPage):\n def __init__(self, app_manager):\n super().__init__()\n self.set_title(\"Whakarere\")\n self.app_manager = app_manager\n self.set_can_pop(True)\n\n # Create TitleBar Widget\n self.window_titlebar_widget = WindowTitlebarWidget()\n\n # Create MainMenu Button Widget\n self.button_settings_menu = MainMenuButtonWidget()\n\n # Create HeaderBar\n self.page_headerbar = Adw.HeaderBar()\n self.page_headerbar.set_title_widget(self.window_titlebar_widget)\n self.page_headerbar.pack_end(self.button_settings_menu)\n\n if self.app_manager.is_dev():\n self.terminate_all_sessions = Gtk.Button()\n self.terminate_all_sessions.set_label(\"T.A.S.\") # Terminate All Sessions\n self.terminate_all_sessions.set_tooltip_text(\"Terminate All Sessions\")\n self.terminate_all_sessions.connect(\"clicked\", self.app_manager.whatsapp_manager.terminate_all_sessions)\n self.page_headerbar.pack_start(self.terminate_all_sessions)\n\n # Create Account List\n self.account_list = Gio.ListStore(item_type=AccountItem)\n for session_id in self.app_manager.session_manager.get_session_ids():\n account = AccountItem(session_id)\n self.account_list.append(account)\n\n # Factory function for creating list items\n factory = Gtk.SignalListItemFactory.new()\n factory.connect('bind', self.bind_function)\n\n # Create SingleSelection\n self.selected_item = None\n self.selected_item_position = None\n self.selection_model = Gtk.SingleSelection.new(self.account_list)\n self.selection_model.connect(\"selection-changed\", self.on_selection_changed)\n\n self.account_list.connect(\"items-changed\", self.on_items_changed)\n\n # Create ListView\n self.list_view = Gtk.ListView.new(self.selection_model, factory)\n\n # Create ScrolledWindow\n scrolled_window = Gtk.ScrolledWindow()\n scrolled_window.set_min_content_width(300)\n scrolled_window.set_min_content_height(300)\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n scrolled_window.set_child(self.list_view) # Set ListView as child of ScrolledWindow\n\n # Add session button\n self.button_add_session = Gtk.Button()\n self.button_add_session.icon_name = Gio.ThemedIcon(name=\"com.mudeprolinux.whakarere-add-session-symbolic.svg\")\n button_add_session_content = Adw.ButtonContent()\n button_add_session_content.set_icon_name(\"com.mudeprolinux.whakarere-add-session-symbolic\")\n button_add_session_content.add_css_class(\"svg-icon\")\n button_add_session_content.set_label(\"Add Session\")\n self.button_add_session.set_child(button_add_session_content)\n self.button_add_session.connect(\"clicked\", self.add_new_session)\n\n # Remove session button\n self.button_remove_session = Gtk.Button()\n button_remove_session_content = Adw.ButtonContent()\n button_remove_session_content.set_icon_name(\"com.mudeprolinux.whakarere-remove-session-symbolic\")\n button_remove_session_content.add_css_class(\"svg-icon\")\n button_remove_session_content.set_label(\"Remove Session\")\n self.button_remove_session.set_child(button_remove_session_content)\n self.button_remove_session.connect(\"clicked\", self.remove_selected_session)\n\n # Launch session button\n self.button_launch_session = Gtk.Button()\n self.button_launch_session.set_hexpand(True)\n self.button_launch_session.set_halign(Gtk.Align.CENTER)\n button_launch_session_content = Adw.ButtonContent()\n button_launch_session_content.set_icon_name(\"com.mudeprolinux.whakarere-launch-session-symbolic\")\n button_launch_session_content.add_css_class(\"svg-icon\")\n button_launch_session_content.set_label(\"Launch Session\")\n self.button_launch_session.set_child(button_launch_session_content)\n self.button_launch_session.connect(\"clicked\", self.launch_selected_session)\n\n # Activate session button\n self.button_activate_session = Gtk.Button()\n self.button_activate_session.set_hexpand(True)\n self.button_activate_session.set_halign(Gtk.Align.CENTER)\n button_activate_session_content = Adw.ButtonContent()\n button_activate_session_content.set_icon_name(\"com.mudeprolinux.whakarere-qr-code-symbolic\")\n button_activate_session_content.add_css_class(\"svg-icon\")\n button_activate_session_content.set_label(\"Scan QR\")\n self.button_activate_session.set_child(button_activate_session_content)\n self.button_activate_session.connect(\"clicked\", self.activate_selected_session)\n\n page_label = Gtk.Label(label=\"<b>Create a New Session.</b>\")\n page_label.set_use_markup(True)\n page_label.set_halign(Gtk.Align.CENTER)\n page_label.set_valign(Gtk.Align.CENTER)\n\n # Create content box for list view\n content_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n content_box.set_valign(Gtk.Align.CENTER) # Vertical alignment to center\n content_box.set_halign(Gtk.Align.CENTER) # Horizontal alignment to center\n content_box.set_margin_top(10)\n content_box.set_margin_bottom(10)\n content_box.set_margin_start(10)\n content_box.set_margin_end(10)\n content_box.set_hexpand(True)\n content_box.set_vexpand(True)\n content_box.append(scrolled_window)\n #content_box.append(self.action_bar)\n\n # a button bar for the bottom of the page\n button_bar = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n button_bar.set_halign(Gtk.Align.CENTER)\n button_bar.set_hexpand(True)\n button_bar.append(self.button_add_session)\n button_bar.append(self.button_launch_session)\n button_bar.append(self.button_activate_session)\n button_bar.append(self.button_remove_session)\n if self.app_manager.session_manager.get_session_ids_size() > 0:\n self.on_selection_changed(self.selection_model, None, None)\n if self.app_manager.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(False)\n\n bottom_bar = Adw.HeaderBar()\n bottom_bar.set_title_widget(button_bar)\n bottom_bar.set_show_back_button(False)\n bottom_bar.set_show_end_title_buttons(False)\n bottom_bar.set_show_start_title_buttons(False)\n\n # Create page content\n self.page_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self.page_content.append(self.page_headerbar)\n self.page_content.append(content_box)\n self.page_content.append(bottom_bar)\n\n # Set page content\n self.set_child(self.page_content)\n \n def refresh_listview(self):\n # Update or refresh the data in the list store (modify as needed)\n self.account_list.remove_all()\n for session_id in self.app_manager.session_manager.get_session_ids():\n account = AccountItem(session_id)\n self.account_list.append(account)\n\n self.selection_model = Gtk.SingleSelection.new(self.account_list)\n # Notify the list view to refresh\n self.list_view.set_model(self.selection_model)\n\n def on_items_changed(self, list_store, position, removed, added):\n if not removed and self.app_manager.session_manager.get_session_ids_size() > 0:\n self.on_selection_changed(self.selection_model, None, None)\n if self.app_manager.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(False)\n\n def on_selection_changed(self, selection_model, positon, n_items):\n self.selected_item_position = selection_model.get_selected()\n self.selected_item = selection_model.get_selected_item()\n if self.selected_item is not None:\n if self.app_manager.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n\n def add_new_session(self, button):\n session_id = self.app_manager.session_manager.generate_session_id()\n self.app_manager.session_manager.add_session(session_id)\n self.account_list.append(AccountItem(session_id))\n\n def remove_selected_session(self, button):\n # Create a new message dialog\n dialog = Adw.MessageDialog(modal=True, transient_for=self.app_manager.main_window)\n dialog.set_heading(\"Delete Session\")\n dialog.set_body(\"Are you sure you want to delete the session?\")\n\n dialog.add_response(\"cancel\", \"_Cancel\")\n dialog.add_response(\"delete\", \"_Delete\")\n\n dialog.set_response_appearance(\"delete\", Adw.ResponseAppearance.DESTRUCTIVE)\n \n dialog.set_default_response(\"cancel\")\n dialog.set_close_response(\"cancel\")\n\n dialog.connect(\"response\", self.on_response)\n\n #self.add_overlay(dialog)\n dialog.set_visible(True)\n \n def on_response(self, dialog, response):\n if response == \"delete\":\n self.account_list.remove(self.selected_item_position)\n self.app_manager.session_manager.remove_session(self.selected_item.session_id)\n self.app_manager.whatsapp_manager.terminate_session(self.selected_item.session_id)\n self.on_selection_changed(self.selection_model, None, None)\n elif response == \"cancel\":\n pass\n dialog.destroy()\n\n def launch_selected_session(self, button):\n if self.selected_item is not None:\n self.app_manager.session_manager.set_current_session(self.selected_item.session_id)\n self.app_manager.navigate_to_whatsapp_messenger_page(self.selected_item.session_id)\n \n def activate_selected_session(self, button):\n if self.selected_item is not None:\n self.app_manager.session_manager.set_current_session(self.selected_item.session_id)\n self.app_manager.navigate_to_qr_manager_page(self.selected_item.session_id)\n\n def bind_function(self, factory, list_item):\n model = list_item.get_item()\n result = self.account_list.find(model)\n position = result.position\n if model is not None:\n is_session_active = self.app_manager.whatsapp_manager.check_session_status(model.session_id)\n print(is_session_active)\n if is_session_active:\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n hbox.set_halign(Gtk.Align.CENTER)\n avatar = Adw.Avatar()\n avatar.set_size(40)\n avatar.set_margin_top(5)\n avatar.set_margin_bottom(5)\n avatar.set_margin_start(5)\n avatar.set_halign(Gtk.Align.START)\n userid = self.app_manager.whatsapp_manager.get_user_id(model.session_id)\n response = requests.get(self.app_manager.whatsapp_manager.get_user_profile_picture(userid, model.session_id))\n response.raise_for_status()\n loader = GdkPixbuf.PixbufLoader()\n loader.write(response.content)\n loader.close()\n avatar_image = Gdk.Texture.new_for_pixbuf(loader.get_pixbuf())\n avatar.set_custom_image(avatar_image)\n hbox.append(avatar)\n label = Gtk.Label(label=f\"<b>{self.app_manager.whatsapp_manager.get_user_name(model.session_id)}</b>\")\n label.set_use_markup(True)\n hbox.append(label)\n list_item.set_child(hbox)\n else:\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n hbox.set_halign(Gtk.Align.CENTER)\n avatar = Adw.Avatar()\n avatar.set_size(40)\n avatar.set_margin_top(5)\n avatar.set_margin_bottom(5)\n avatar.set_margin_start(5)\n image_data = base64.b64decode(WhatsappLogoAlt.base64image)\n gbytes = GLib.Bytes.new_take(image_data)\n input_stream = Gio.MemoryInputStream.new_from_bytes(gbytes)\n pixbuf = GdkPixbuf.Pixbuf.new_from_stream(input_stream, None)\n texture = Gdk.Texture.new_for_pixbuf(pixbuf)\n avatar.set_custom_image(texture)\n hbox.append(avatar)\n label = Gtk.Label(label=\"<b>No account linked.</b>\")\n label.set_use_markup(True)\n hbox.append(label)\n list_item.set_child(hbox)" }, { "identifier": "SessionManagerPage2", "path": "whakarere/pages/session2.py", "snippet": "class SessionManagerPage2(Adw.NavigationPage):\n def __init__(self, window):\n super().__init__()\n self.set_title(\"Whakarere\")\n self.window = window\n self.set_can_pop(True)\n self.session_overlay = Gtk.Overlay()\n\n # Create TitleBar Widget\n self.window_titlebar_widget = WindowTitlebarWidget()\n self.window_titlebar_widget.set_title(\"Whakarere\")\n self.window_titlebar_widget.set_subtitle(\"A Gtk4 Whatsapp Client.\")\n # Create MainMenu Button Widget\n self.button_settings_menu = MainMenuButtonWidget()\n\n # Create HeaderBar\n self.page_headerbar = Adw.HeaderBar()\n self.page_headerbar.set_title_widget(self.window_titlebar_widget)\n self.page_headerbar.pack_end(self.button_settings_menu)\n self.add_session_button = Gtk.Button()\n self.add_session_button.set_icon_name(\"window-new-symbolic\")\n self.add_session_button.set_tooltip_text(\"Create a New Session\")\n self.add_session_button.connect(\"clicked\", self.add_new_session)\n self.page_headerbar.pack_end(self.add_session_button)\n\n if self.window.is_dev():\n self.terminate_all_sessions = Gtk.Button()\n self.terminate_all_sessions.set_label(\"T.A.S.\") # Terminate All Sessions\n self.terminate_all_sessions.set_tooltip_text(\"Terminate All Sessions\")\n self.terminate_all_sessions.connect(\"clicked\", self.window.whatsapp_manager.terminate_all_sessions)\n self.page_headerbar.pack_start(self.terminate_all_sessions)\n\n # Create Account List\n self.account_list = Gio.ListStore(item_type=AccountItem)\n for session_id in self.window.session_manager.get_session_ids():\n account = AccountItem(session_id)\n self.account_list.append(account)\n\n # Factory function for creating list items\n factory = Gtk.SignalListItemFactory.new()\n factory.connect('bind', self.bind_function)\n\n # Create SingleSelection\n self.selected_item = None\n self.selected_item_position = None\n self.selection_model = Gtk.SingleSelection.new(self.account_list)\n self.selection_model.connect(\"selection-changed\", self.on_selection_changed)\n\n self.account_list.connect(\"items-changed\", self.on_items_changed)\n\n # Create ListView\n self.list_view = Gtk.ListView.new(self.selection_model, factory)\n\n # Create ScrolledWindow\n scrolled_window = Gtk.ScrolledWindow()\n scrolled_window.set_min_content_width(300)\n scrolled_window.set_min_content_height(300)\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n scrolled_window.set_child(self.list_view) # Set ListView as child of ScrolledWindow\n\n # Add session button\n self.button_add_session = Gtk.Button()\n self.button_add_session.icon_name = Gio.ThemedIcon(name=\"com.mudeprolinux.whakarere-add-session-symbolic.svg\")\n button_add_session_content = Adw.ButtonContent()\n button_add_session_content.set_icon_name(\"com.mudeprolinux.whakarere-add-session-symbolic\")\n button_add_session_content.add_css_class(\"svg-icon\")\n button_add_session_content.set_label(\"Add Session\")\n self.button_add_session.set_child(button_add_session_content)\n self.button_add_session.connect(\"clicked\", self.add_new_session)\n\n # Remove session button\n self.button_remove_session = Gtk.Button()\n button_remove_session_content = Adw.ButtonContent()\n button_remove_session_content.set_icon_name(\"com.mudeprolinux.whakarere-remove-session-symbolic\")\n button_remove_session_content.add_css_class(\"svg-icon\")\n button_remove_session_content.set_label(\"Remove Session\")\n self.button_remove_session.set_child(button_remove_session_content)\n self.button_remove_session.connect(\"clicked\", self.remove_selected_session)\n\n # Launch session button\n self.button_launch_session = Gtk.Button()\n self.button_launch_session.set_hexpand(True)\n self.button_launch_session.set_halign(Gtk.Align.CENTER)\n button_launch_session_content = Adw.ButtonContent()\n button_launch_session_content.set_icon_name(\"com.mudeprolinux.whakarere-launch-session-symbolic\")\n button_launch_session_content.add_css_class(\"svg-icon\")\n button_launch_session_content.set_label(\"Launch Session\")\n self.button_launch_session.set_child(button_launch_session_content)\n self.button_launch_session.connect(\"clicked\", self.launch_selected_session)\n\n # Activate session button\n self.button_activate_session = Gtk.Button()\n self.button_activate_session.set_hexpand(True)\n self.button_activate_session.set_halign(Gtk.Align.CENTER)\n button_activate_session_content = Adw.ButtonContent()\n button_activate_session_content.set_icon_name(\"com.mudeprolinux.whakarere-qr-code-symbolic\")\n button_activate_session_content.add_css_class(\"svg-icon\")\n button_activate_session_content.set_label(\"Scan QR\")\n self.button_activate_session.set_child(button_activate_session_content)\n self.button_activate_session.connect(\"clicked\", self.activate_selected_session)\n\n page_label = Gtk.Label(label=\"<b>Create a New Session.</b>\")\n page_label.set_use_markup(True)\n page_label.set_halign(Gtk.Align.CENTER)\n page_label.set_valign(Gtk.Align.CENTER)\n\n # Create content box for list view\n content_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n content_box.set_valign(Gtk.Align.CENTER) # Vertical alignment to center\n content_box.set_halign(Gtk.Align.CENTER) # Horizontal alignment to center\n content_box.set_margin_top(10)\n content_box.set_margin_bottom(10)\n content_box.set_margin_start(10)\n content_box.set_margin_end(10)\n content_box.set_hexpand(True)\n content_box.set_vexpand(True)\n content_box.append(scrolled_window)\n #content_box.append(self.action_bar)\n\n # a button bar for the bottom of the page\n button_bar = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n button_bar.set_halign(Gtk.Align.CENTER)\n button_bar.set_hexpand(True)\n button_bar.append(self.button_add_session)\n button_bar.append(self.button_launch_session)\n button_bar.append(self.button_activate_session)\n button_bar.append(self.button_remove_session)\n if self.window.session_manager.get_session_ids_size() > 0:\n self.on_selection_changed(self.selection_model, None, None)\n if self.window.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(False)\n\n bottom_bar = Adw.HeaderBar()\n bottom_bar.set_title_widget(button_bar)\n bottom_bar.set_show_back_button(False)\n bottom_bar.set_show_end_title_buttons(False)\n bottom_bar.set_show_start_title_buttons(False)\n\n # Create page content\n self.page_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self.page_content.append(self.page_headerbar)\n #self.page_content.append(content_box)\n #self.page_content.append(bottom_bar)\n self.session_overlay.set_child(self.page_content)\n # Set page content\n self.set_child(self.session_overlay)\n \n def refresh_listview(self):\n # Update or refresh the data in the list store (modify as needed)\n self.account_list.remove_all()\n for session_id in self.window.session_manager.get_session_ids():\n account = AccountItem(session_id)\n self.account_list.append(account)\n\n self.selection_model = Gtk.SingleSelection.new(self.account_list)\n # Notify the list view to refresh\n self.list_view.set_model(self.selection_model)\n\n def on_items_changed(self, list_store, position, removed, added):\n if not removed and self.window.session_manager.get_session_ids_size() > 0:\n self.on_selection_changed(self.selection_model, None, None)\n if self.window.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(False)\n\n def on_selection_changed(self, selection_model, positon, n_items):\n self.selected_item_position = selection_model.get_selected()\n self.selected_item = selection_model.get_selected_item()\n if self.selected_item is not None:\n if self.window.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n\n def add_new_session(self, button):\n #self.window.main_window.set_sensitive(False) # Disable main window \n new_account_wizard = AccountWizardWindow(self.app_manager)\n new_account_wizard.set_visible(True)\n\n def remove_selected_session(self, button):\n # Create a new message dialog\n dialog = Adw.MessageDialog(modal=True, transient_for=self.window.main_window)\n dialog.set_heading(\"Delete Session\")\n dialog.set_body(\"Are you sure you want to delete the session?\")\n\n dialog.add_response(\"cancel\", \"_Cancel\")\n dialog.add_response(\"delete\", \"_Delete\")\n\n dialog.set_response_appearance(\"delete\", Adw.ResponseAppearance.DESTRUCTIVE)\n \n dialog.set_default_response(\"cancel\")\n dialog.set_close_response(\"cancel\")\n\n dialog.connect(\"response\", self.on_response)\n\n #self.add_overlay(dialog)\n dialog.set_visible(True)\n \n def on_response(self, dialog, response):\n if response == \"delete\":\n self.account_list.remove(self.selected_item_position)\n self.window.session_manager.remove_session(self.selected_item.session_id)\n self.window.whatsapp_manager.terminate_session(self.selected_item.session_id)\n self.on_selection_changed(self.selection_model, None, None)\n elif response == \"cancel\":\n pass\n dialog.destroy()\n\n def launch_selected_session(self, button):\n if self.selected_item is not None:\n self.window.session_manager.set_current_session(self.selected_item.session_id)\n self.window.navigate_to_whatsapp_messenger_page(self.selected_item.session_id)\n \n def activate_selected_session(self, button):\n if self.selected_item is not None:\n self.window.session_manager.set_current_session(self.selected_item.session_id)\n self.window.navigate_to_qr_manager_page(self.selected_item.session_id)\n\n def bind_function(self, factory, list_item):\n model = list_item.get_item()\n result = self.account_list.find(model)\n position = result.position\n if model is not None:\n is_session_active = self.window.whatsapp_manager.check_session_status(model.session_id)\n print(is_session_active)\n if is_session_active:\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n hbox.set_halign(Gtk.Align.CENTER)\n avatar = Adw.Avatar()\n avatar.set_size(40)\n avatar.set_margin_top(5)\n avatar.set_margin_bottom(5)\n avatar.set_margin_start(5)\n avatar.set_halign(Gtk.Align.START)\n userid = self.window.whatsapp_manager.get_user_id(model.session_id)\n response = requests.get(self.window.whatsapp_manager.get_user_profile_picture(userid, model.session_id))\n response.raise_for_status()\n loader = GdkPixbuf.PixbufLoader()\n loader.write(response.content)\n loader.close()\n avatar_image = Gdk.Texture.new_for_pixbuf(loader.get_pixbuf())\n avatar.set_custom_image(avatar_image)\n hbox.append(avatar)\n label = Gtk.Label(label=f\"<b>{self.window.whatsapp_manager.get_user_name(model.session_id)}</b>\")\n label.set_use_markup(True)\n hbox.append(label)\n list_item.set_child(hbox)\n else:\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n hbox.set_halign(Gtk.Align.CENTER)\n avatar = Adw.Avatar()\n avatar.set_size(40)\n avatar.set_margin_top(5)\n avatar.set_margin_bottom(5)\n avatar.set_margin_start(5)\n image_data = base64.b64decode(WhatsappLogoAlt.base64image)\n gbytes = GLib.Bytes.new_take(image_data)\n input_stream = Gio.MemoryInputStream.new_from_bytes(gbytes)\n pixbuf = GdkPixbuf.Pixbuf.new_from_stream(input_stream, None)\n texture = Gdk.Texture.new_for_pixbuf(pixbuf)\n avatar.set_custom_image(texture)\n hbox.append(avatar)\n label = Gtk.Label(label=\"<b>No account linked.</b>\")\n label.set_use_markup(True)\n hbox.append(label)\n list_item.set_child(hbox)" }, { "identifier": "AccountWizardWindow", "path": "whakarere/windows/account_wizard.py", "snippet": "class AccountWizardWindow(Adw.Window):\n def __init__(self, window):\n super().__init__()\n self.window = window\n self.set_transient_for(window)\n self.set_modal(True)\n self.set_default_size(300, 300)\n self.connect(\"close-request\", self.on_modal_close_request)\n self.set_decorated(False)\n self.session_id = None\n\n api_key = \"your_global_api_key_here\"\n self.api_url = \"http://localhost:3000\"\n self.headers = { 'x-api-key': api_key }\n\n self.header_bar = Adw.HeaderBar()\n self.titlebar_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self.title = Gtk.Label(label=\"Creating a Session\")\n self.subtitle = Gtk.Label(label=\"Please wait...\")\n self.titlebar_box.append(self.title)\n self.titlebar_box.append(self.subtitle)\n self.header_bar.set_title_widget(self.titlebar_box)\n\n self.window_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.window_content.set_size_request(200, 300)\n\n image = Gtk.Image.new_from_icon_name(\"com.mudeprolinux.whakarere\")\n image.set_pixel_size(120)\n label_title = Gtk.Label(label=\"Welcome to Whakarere\")\n label_title.set_halign(Gtk.Align.CENTER)\n label_message = Gtk.Label(label=\"Let me create a new session and I'll help you link it to your WhatsApp account.\")\n label_message.set_halign(Gtk.Align.CENTER)\n\n self.progress_bar_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.progress_bar = Gtk.ProgressBar()\n self.progress_bar.set_fraction(0.0)\n self.progress_bar.set_show_text(False)\n self.progress_bar.set_pulse_step(1)\n\n self.label_progress = Gtk.Label(label=\"Creating session...\")\n self.label_progress.set_halign(Gtk.Align.CENTER)\n self.label_progress.set_margin_top(0)\n self.progress_bar_box.append(self.progress_bar)\n self.progress_bar_box.append(self.label_progress)\n self.progress_bar_box.set_margin_top(40)\n self.progress_bar_box.set_margin_bottom(40)\n self.progress_bar_box.set_margin_start(20)\n self.progress_bar_box.set_margin_end(20)\n\n self.session_id = self.window.session_manager.generate_session_id()\n self.window.session_manager.add_session(self.session_id)\n thread = threading.Thread(target=self.update_progress_bar)\n thread.start()\n\n self.top_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.top_box.set_margin_top(20)\n self.top_box.set_halign(Gtk.Align.CENTER)\n self.top_box.set_valign(Gtk.Align.CENTER)\n self.top_box.append(image)\n self.top_box.append(label_title)\n self.top_box.append(label_message)\n self.top_box.set_margin_start(20)\n self.top_box.set_margin_end(20)\n \n self.window_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.window_content.set_size_request(200, 300)\n self.window_content.append(self.header_bar)\n self.window_content.append(self.top_box)\n self.window_content.append(self.progress_bar_box)\n self.set_content(self.window_content)\n self.present()\n\n def on_modal_close_request(self, widget):\n self.window.session_manager.remove_session(self.session_id)\n self.destroy()\n\n def update_progress_bar(self):\n self.label_progress.set_text(\"Creating session...\")\n for i in range(1, 11):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n self.label_progress.set_text(\"Launching session...\")\n for i in range(11, 21):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n self.label_progress.set_text(\"Waiting for session activation...\")\n for i in range(21, 31):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n self.label_progress.set_text(\"Capturing QR code...\")\n for i in range(31, 41):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n self.label_progress.set_text(\"Generating QR code...\")\n for i in range(41, 51):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n \n self.progress_bar.pulse()\n self.label_progress.set_text(\"Please scan QR code to continue...\")\n self.progress_bar.pulse()\n self.qr_code_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.progress_bar.pulse()\n qr_code_data = self.get_qr_code_image(self.session_id)\n self.progress_bar.pulse()\n glib_bytes = GLib.Bytes.new(qr_code_data)\n self.progress_bar.pulse()\n input_stream = Gio.MemoryInputStream.new_from_bytes(glib_bytes)\n self.progress_bar.pulse()\n pixbuf = GdkPixbuf.Pixbuf.new_from_stream(input_stream, None)\n self.progress_bar.pulse()\n self.qr_code_image = Gtk.Image.new_from_pixbuf(pixbuf)\n self.progress_bar.pulse()\n self.qr_code_image.set_pixel_size(240)\n self.progress_bar.pulse()\n self.qr_code_box.append(self.qr_code_image)\n self.progress_bar.pulse()\n self.window_content.remove(self.top_box)\n self.progress_bar.pulse()\n self.window_content.insert_child_after(self.qr_code_box, self.header_bar)\n self.progress_bar.pulse()\n\n while not self.check_session_status(self.qr_code_image):\n self.progress_bar.pulse()\n time.sleep(1)\n\n self.progress_bar.set_fraction(0.50)\n\n self.label_progress.set_text(\"Syncing your chats...\")\n self.window.whatsapp_manager.initialize_session_by_id(self.session_id)\n for i in range(51, 71):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.4)\n\n self.label_progress.set_text(\"Done!\")\n\n def generate_qr_code(self, qr_code_data):\n qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=4)\n qr.add_data(qr_code_data)\n qr.make(fit=True)\n return qr.make_image(fill_color=\"black\", back_color=\"white\")\n\n def get_qr_code_texture(self, qr_code_data):\n qr_image = self.generate_qr_code(qr_code_data)\n pixbuf = self.pil_image_to_pixbuf(qr_image)\n return Gdk.Texture.new_for_pixbuf(pixbuf)\n\n def pil_image_to_pixbuf(self, pil_image):\n \"\"\"Convert a PIL image to a GdkPixbuf.\"\"\"\n buffer = BytesIO()\n pil_image.save(buffer)\n glib_bytes = GLib.Bytes.new(buffer.getvalue())\n loader = GdkPixbuf.PixbufLoader.new_with_type(\"png\")\n loader.write_bytes(glib_bytes)\n pixbuf = loader.get_pixbuf()\n loader.close()\n return pixbuf\n\n def get_qr_code_image(self, session_id):\n url = self.api_url + f'/session/qr/{session_id}/image'\n result = requests.get(url, headers=self.headers).content\n\n if(self.window.is_debug()):\n print(\"get_qr_code_image: \" + str(result))\n \n return result\n\n def get_qr_code_data(self, session_id):\n url = self.api_url + f'/session/qr/{session_id}'\n result = ((requests.get(url, headers=self.headers)).json())[\"qr\"]\n\n if(self.window.is_debug()):\n print(\"get_qr_code_data: \" + str(result))\n \n return result\n \n def check_session_status(self, session_id):\n url = self.api_url + f'/session/status/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"check_session_status: \" + str(result))\n \n return result" } ]
import gi from whakarere.managers.config import ConfigManager from whakarere.managers.session import SessionManager from whakarere.managers.whatsapp import WhatsAppSessionManager from whakarere.widgets.titlebar import WindowTitlebarWidget from whakarere.widgets.main_menu import MainMenuButtonWidget from whakarere.pages.session import SessionManagerPage from whakarere.pages.session2 import SessionManagerPage2 from whakarere.windows.account_wizard import AccountWizardWindow from gi.repository import Adw, Gtk, Gdk
11,718
gi.require_version('Gtk', '4.0') gi.require_version('Adw', '1') gi.require_version("Gdk", "4.0") class WhakarereMainWindow(Adw.ApplicationWindow): def __init__(self, app, debug=False, dev=False): super().__init__(application=app) self.app = app self.debug = debug self.dev = dev self.settings = Gtk.Settings.get_default() self.settings.connect("notify::gtk-theme-name", self.on_theme_changed) # Initial CSS application self.update_css_for_theme() # Set the window size and default close behavior self.set_default_size(800, 600) self.set_hide_on_close(True) # Create the config manager and load the config file self.config_manager = ConfigManager(self) self.config_manager.load_config() # Create the session manager and load the sessions
gi.require_version('Gtk', '4.0') gi.require_version('Adw', '1') gi.require_version("Gdk", "4.0") class WhakarereMainWindow(Adw.ApplicationWindow): def __init__(self, app, debug=False, dev=False): super().__init__(application=app) self.app = app self.debug = debug self.dev = dev self.settings = Gtk.Settings.get_default() self.settings.connect("notify::gtk-theme-name", self.on_theme_changed) # Initial CSS application self.update_css_for_theme() # Set the window size and default close behavior self.set_default_size(800, 600) self.set_hide_on_close(True) # Create the config manager and load the config file self.config_manager = ConfigManager(self) self.config_manager.load_config() # Create the session manager and load the sessions
self.session_manager = SessionManager(self)
1
2023-10-29 15:46:50+00:00
16k
TheCompAce/ShellSpeak
main.py
[ { "identifier": "VectorDatabase", "path": "modules/vectorDatabase.py", "snippet": "class VectorDatabase:\n def __init__(self, path, name):\n self.path = path\n self.name = name\n self.db_path = os.path.join(path, f'{name}.db')\n self.model_path = os.path.join(path, f'{name}.bin')\n \n # Ensure the path exists\n if not os.path.exists(path):\n os.makedirs(path)\n \n # Set up database and model connections\n self.conn = self.initialize_db()\n self.model = self.initialize_model()\n \n def initialize_db(self):\n try:\n conn = sqlite3.connect(self.db_path)\n c = conn.cursor()\n \n c.execute('CREATE TABLE IF NOT EXISTS responses (id INTEGER PRIMARY KEY, response TEXT, response_raw TEXT, trained BOOLEAN DEFAULT 0)')\n c.execute('CREATE INDEX IF NOT EXISTS idx_responses_trained ON responses (trained)') # Index on trained field\n c.execute('CREATE TABLE IF NOT EXISTS vector_data (id INTEGER PRIMARY KEY, vector BLOB, response_id INTEGER, FOREIGN KEY(response_id) REFERENCES responses(id))')\n c.execute('CREATE INDEX IF NOT EXISTS idx_vector_data_response_id ON vector_data (response_id)') # Index on response_id field\n\n # Check if timestamp column exists\n c.execute(\"PRAGMA table_info(responses)\")\n columns = [column[1] for column in c.fetchall()]\n if 'timestamp' not in columns:\n c.execute('ALTER TABLE responses ADD COLUMN timestamp DATETIME DEFAULT CURRENT_TIMESTAMP')\n \n conn.commit()\n\n return conn # Return the connection\n except Exception as e:\n logging.exception(f\"An error occurred in initialize_db: {e}\")\n\n \n def initialize_model(self):\n try:\n # Create a new Word2Vec model if it doesn't exist\n if not os.path.exists(self.model_path):\n # Assuming sentences is your data\n # Replace the following line with your data and model parameters\n sentences = [[\"hello\", \"world\"], [\"how\", \"are\", \"you\"], [\"goodbye\", \"world\"]]\n\n model = Word2Vec(sentences, min_count=1)\n model.save(self.model_path)\n else:\n model = Word2Vec.load(self.model_path)\n\n return model # Return the model\n except Exception as e:\n logging.exception(f\"An error occurred in initialize_model: {e}\")\n\n def store_short_term_memory(self, task_id, data):\n # Convert data to a string or JSON format\n data_str = json.dumps(data)\n # Store the data as a response in the VectorDatabase\n self.vector_db.create_response(data_str)\n\n def store_long_term_memory(self, task_data):\n # Convert task_data to a string or JSON format\n task_data_str = json.dumps(task_data)\n # Store the task_data as a response in the VectorDatabase\n self.create_response(task_data_str)\n\n def ensure_connection(self):\n if self.conn is None:\n self.conn = self.initialize_db()\n if self.model is None:\n self.model = self.initialize_model()\n \n def create_response(self, response_text):\n try:\n c = self.conn.cursor()\n \n preprocess_text = self.preprocess_text(response_text)\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n c.execute('INSERT INTO responses (response, response_raw, timestamp) VALUES (?, ?, ?)', (preprocess_text, response_text, now))\n response_id = c.lastrowid\n\n # Updated word check in vocabulary\n words = preprocess_text.split()\n vectors = [self.model.wv[word] for word in words if word in dict(self.model.wv.key_to_index)]\n if vectors:\n vector = np.mean(vectors, axis=0) # Averaging vectors of the words\n vector_bytes = vector.tobytes()\n c.execute('INSERT INTO vector_data (vector, response_id) VALUES (?, ?)', (vector_bytes, response_id))\n else:\n logging.info(\"No valid words found in the response for vectorization.\")\n\n self.conn.commit()\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in create_response: {e}\")\n\n \n def search_response(self, search_text):\n c = self.conn.cursor()\n \n # Use the LIKE operator to search for the search_text in the response field\n c.execute(\"SELECT id, response FROM responses WHERE response LIKE ?\", ('%' + search_text + '%',))\n search_results = c.fetchall()\n c.close()\n return search_results\n \n def normalize_text(self, text):\n # Convert to lowercase\n text = text.lower()\n # Replace newline characters with spaces\n text = text.replace('\\\\n', ' ')\n # Remove special characters and digits using regex\n text = re.sub(r'[^a-zA-Z\\s]', ' ', text)\n # Remove extra whitespaces\n text = re.sub(r'\\s+', ' ', text).strip()\n # Tokenize the text\n tokens = text.split()\n # Remove stopwords\n tokens = [word for word in tokens if word not in stopwords.words('english')]\n # Perform stemming\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(word) for word in tokens]\n # Join tokens back into a single string\n text = ' '.join(tokens)\n return text\n\n def close_connection(self):\n \"\"\"Close the database connection gracefully.\"\"\"\n try:\n if self.conn:\n self.conn.close()\n\n self.conn = None\n except Exception as e:\n logging.exception(f\"An error occurred while closing the connection: {e}\")\n\n\n def preprocess_text(self, text):\n \"\"\"Example preprocessing function (can be expanded).\"\"\"\n # Placeholder for any preprocessing steps you want to implement\n return self.normalize_text(text)\n\n def get_vector(self, response_id):\n \"\"\"Retrieve vector data for a given response_id.\"\"\"\n c = self.conn.cursor()\n \n c.execute('SELECT vector FROM vector_data WHERE response_id = ?', (response_id,))\n vector_data = c.fetchone()\n c.close()\n \n if vector_data is None:\n error_message = f\"No vector data found for response_id {response_id}\"\n logging.error(error_message)\n raise ValueError(error_message)\n \n vector = np.frombuffer(vector_data[0], dtype=np.float32) # Assuming the vector data is stored as float32\n \n return vector\n\n \n def read_response(self, response_id):\n c = self.conn.cursor()\n \n c.execute('SELECT response FROM responses WHERE id = ?', (response_id,))\n response = c.fetchone()\n\n c.close()\n \n if response is None:\n error_message = f\"No response found for response_id {response_id}\"\n logging.error(error_message)\n raise ValueError(error_message)\n \n return response[0]\n \n def update_response(self, response_id, new_response_text):\n try:\n c = self.conn.cursor()\n \n normalized_text = self.preprocess(new_response_text)\n c.execute('UPDATE responses SET response = ? WHERE id = ?', (normalized_text, response_id))\n \n # Check if each word is in the model's vocabulary\n words = normalized_text.split()\n vectors = [self.model.wv[word] for word in words if word in dict(self.model.wv.key_to_index)]\n if vectors:\n vector = np.mean(vectors, axis=0) # Averaging vectors of the words\n vector_bytes = vector.tobytes()\n c.execute('UPDATE vector_data SET vector = ? WHERE response_id = ?', (vector_bytes, response_id))\n \n self.conn.commit()\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in update_response: {e}\")\n \n def delete_response(self, response_id):\n try:\n c = self.conn.cursor()\n \n c.execute('DELETE FROM vector_data WHERE response_id = ?', (response_id,))\n c.execute('DELETE FROM responses WHERE id = ?', (response_id,))\n \n self.conn.commit()\n\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in delete_response: {e}\")\n\n def train_untrained_responses(self):\n try:\n c = self.conn.cursor()\n\n c.execute(\"SELECT response FROM responses WHERE trained = 0\")\n untrained_responses = c.fetchall()\n if untrained_responses:\n sentences = [response[0].split() for response in untrained_responses]\n\n self.model.build_vocab(sentences, update=True)\n self.model.train(sentences, total_examples=len(sentences), epochs=self.model.epochs)\n\n self.model.save(self.model_path)\n\n c.execute(\"UPDATE responses SET trained = 1 WHERE trained = 0\")\n\n self.conn.commit()\n else:\n logging.info(\"No untrained responses found.\")\n\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in train_untrained_responses: {e}\")\n\n def needs_index_update(self):\n \"\"\"\n Check if there are any untrained responses in the database.\n If there are, it means the index needs to be updated.\n Returns True if update is needed, False otherwise.\n \"\"\"\n try:\n c = self.conn.cursor()\n c.execute(\"SELECT COUNT(*) FROM responses WHERE trained = 0\")\n count = c.fetchone()[0]\n c.close() # Manually close the cursor\n return count > 0\n except Exception as e:\n logging.exception(f\"An error occurred in needs_index_update: {e}\")\n return False # In case of an error, you might want to handle it differently\n\n \n def reset_training_status(self):\n \"\"\"Reset the trained status of all responses to untrained.\"\"\"\n try:\n c = self.conn.cursor()\n \n c.execute(\"UPDATE responses SET trained = 0\")\n \n self.conn.commit()\n\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in reset_training_status: {e}\")\n\n\n def search_word_vector(self, word):\n try:\n if word in self.model.wv.key_to_index:\n similar_words = self.model.wv.similar_by_word(word)\n return similar_words\n else:\n logging.error(f\"The word {word} is not in the model's vocabulary.\")\n return []\n except Exception as e:\n logging.exception(f\"An error occurred in search_word_vector: {e}\")\n return []\n\n def get_vector_average(self, text):\n words = text.split()\n vectors = [self.model.wv[word] for word in words if word in dict(self.model.wv.key_to_index)]\n if vectors:\n vector_avg = np.mean(vectors, axis=0)\n return vector_avg\n else:\n return np.zeros(self.model.vector_size)\n\n def search_similar_conversations(self, text, top_n=1):\n processed_text = self.preprocess_text(text)\n print(f\"processed_text = {processed_text}\")\n\n query_vector = self.get_vector_average(processed_text)\n with self.conn:\n c = self.conn.cursor()\n c.execute('SELECT id, vector FROM vector_data')\n vector_data = c.fetchall()\n\n if not vector_data:\n return []\n\n ids, vectors = zip(*vector_data)\n vectors = np.array([np.frombuffer(vector, dtype=np.float32) for vector in vectors])\n similarities = cosine_similarity([query_vector], vectors)[0]\n sorted_indices = np.argsort(similarities)[::-1]\n top_indices = sorted_indices[:top_n]\n top_ids = [ids[i] for i in top_indices]\n top_similarities = [similarities[i] for i in top_indices]\n\n result = []\n for response_id, similarity in zip(top_ids, top_similarities):\n # Fetch the corresponding response text for each response_id\n c.execute('SELECT response_raw FROM response_raw WHERE id = ?', (response_id,))\n response_text = c.fetchone()\n if response_text is not None:\n response_text = response_text[0] # Extracting text from the tuple\n # result.append((response_id, response_text, similarity))\n result.append(response_text)\n\n return result" }, { "identifier": "save_settings", "path": "modules/menus/setup_menu.py", "snippet": "def setup_menu():" }, { "identifier": "ShellSpeak", "path": "modules/shellSpeak.py", "snippet": "class ShellSpeak:\n def __init__(self, settings, base_path, vectorDb):\n self.llm_len = int(settings.get(\"llm_size\", 14000))\n self.llm_history_len = int(settings.get(\"llm_history_size\", 4000))\n self.llm_file_len = int(settings.get(\"llm_file_size\", 4000))\n self.llm_folder_len = int(settings.get(\"llm_folder_size\", 4000))\n self.llm_slide_len = int(settings.get(\"llm_slide_len\", 120))\n\n self.temp_file = settings.get(\"temp_file\", \"temp\")\n\n self.llm_output_size = int(settings.get(\"llm_output_size\", 4097))\n self.use_cache = settings.get(\"use_cache\", False)\n self.cache_file = settings.get(\"cache_file\", None)\n\n self.vector_for_commands = settings.get(\"vector_for_commands\", False)\n self.vector_for_history = settings.get(\"vector_for_history\", True)\n self.vector_for_folders = settings.get(\"vector_for_folders\", True)\n\n self.data_file = 'path_to_your_data_file.json'\n self.use_indexing = settings.get('use_indexing', False)\n\n self.vector_db = vectorDb\n\n self.settings = settings\n self.command_history = \"\"\n self.settingsRoot = base_path\n\n self.files = []\n\n self.llm = LLM(model_type=ModelTypes(self.settings.get('model', \"OpenAI\")), use_cache=self.use_cache, cache_file=self.cache_file) #Zephyr7bBeta\n\n self.command_runner = CommandRunner(self)\n\n logging.info(f\"Shell Speak Loaded\")\n\n def capture_input(self):\n # Get current working directory\n current_directory = os.getcwd()\n \n # Get environment (if available)\n environment = os.environ.get('VIRTUAL_ENV', None)\n if environment:\n environment = os.path.basename(environment) # Extracting last part of the path as environment name\n \n # Formatted prompt\n prompt = f\"[green]({environment})[cyan] {current_directory}[white]>\" if environment else f\"{current_directory}{self.settings['command_prompt']}\"\n \n set_input = capture_styled_input(prompt)\n logging.info(f\"Using input : {set_input}\")\n return set_input\n \n def show_file(self, caption, body):\n print_colored_text(f\"[yellow]==== {caption} ====\")\n num_width = len(str(len(body)))\n for line_number, line in enumerate(body, 1): # Start counting from 1\n print_colored_text(f'[yellow]{line_number:{num_width}}:[cyan] {line}') # Adjust the format as needed\n print_colored_text(\"[yellow]====================\")\n\n\n def detect_language(self, code):\n try:\n lexer = lexers.guess_lexer(code)\n return lexer.name\n except lexers.ClassNotFound:\n return None\n \n async def execute_python_script(self, python_section, filename):\n lines = python_section.split('\\n')\n if len(lines) == 1:\n # Single-line script, execute directly\n script = lines[0]\n # script = f\"{self.settings['python_command_prompt']}\\n{script}\"\n output = await self.run_python_script(script)\n return output\n else:\n # Multi-line script, create a python file\n python_filename = f'{self.temp_file}.py'\n if filename:\n # Use commented out filename\n check_filename = filename\n \n if (is_valid_filename(check_filename)):\n python_filename = filename\n\n script = '\\n'.join(lines)\n script = f\"{self.settings['python_command_prompt']}\\n{script}\"\n\n with open(python_filename, 'w') as python_file:\n python_file.write(script)\n\n self.show_file(\"Python File\", script.split('\\n'))\n user_confirmation = capture_styled_input(\"[yellow]Are you sure you want to run this Python script? (yes/no): \")\n if user_confirmation.lower() != 'yes':\n if python_filename == f'{self.temp_file}.py':\n os.remove(python_filename) # Remove temporary python file\n return CommandResult(\"\", \"Run python file Canceled.\")\n \n output = await self.run_python_script(python_filename)\n if python_filename == f'{self.temp_file}.py':\n os.remove(python_filename) # Remove temporary python file\n return output\n \n async def run_python_script(self, script):\n # If the script is a file, use 'python filename.py' to execute\n if script.endswith('.py'):\n command = f'python -u {script}'\n else:\n command = f'python -u -c \"{script}\"'\n result = await self.run_command(command)\n return CommandResult(result.out, result.err)\n \n def extract_script_command(self, script_type, text):\n match = re.search(rf'```{script_type}(.*?)```', text, re.DOTALL)\n if match:\n shell_section = match.group(1).strip()\n else:\n logging.error(f\"No {script_type} section found\")\n shell_section = None\n\n return shell_section\n\n \n \n\n async def execute_shell_section(self, shell_section, filename):\n\n logging.info(f\"Executing Shell Section : {shell_section}\")\n\n shell_section.strip()\n\n lines = shell_section.split('\\n')\n ret_value = CommandResult(\"\", \"\")\n \n if len(lines) == 1:\n # Single-line command, execute directly\n command = lines[0]\n\n ret_value = await self.run_command(command)\n logging.error(f\"Execute Shell Directory Line Strip: {ret_value}\")\n\n else:\n # Multi-line command, create a batch file\n batch_filename = f'{self.temp_file}.bat'\n if lines[0].startswith('REM '):\n # Use commented out filename\n batch_filename = lines[0][4:].strip()\n # lines = lines[1:] # Remove the filename line\n\n logging.info(f\"batch_filename : {batch_filename}\")\n with open(batch_filename, 'w') as batch_file:\n batch_file.write('\\n'.join(lines))\n self.show_file(\"Batch File\", lines)\n user_confirmation = capture_styled_input(\"[yellow]Are you sure you want to run this batch file? (yes/no): \")\n logging.info(f\"user_confirmation : {user_confirmation}\")\n if user_confirmation.lower() != 'yes':\n return CommandResult(\"\", \"Run batch file Canceled.\")\n ret_value = await self.run_command(batch_filename)\n \n logging.info(f\"command output : out: {ret_value.out}, err: {ret_value.err}\")\n if batch_filename == f'{self.temp_file}.bat':\n os.remove(batch_filename) # Remove temporary batch file\n logging.info(f\"removing : {batch_filename}\")\n\n return ret_value\n \n def create_process_group(self):\n # Create a new process group\n process_group_id = os.set_handle_inheritance(0, 1)\n return process_group_id\n\n async def run_command(self, command):\n command += \" && cd\"\n logging.info(f\"run command : {command}\")\n\n stdout, stderr = await self.command_runner.run(command)\n\n \n\n if stderr == \"\":\n lines = stdout.strip().split(\"\\n\")\n if lines:\n new_dir = lines[-1] # Assuming the last line of output contains the new working directory\n if os.path.isdir(new_dir):\n os.chdir(new_dir) # Change to the new working directory in your parent process\n # Remove the last line containing the new directory from the output\n lines = lines[:-1]\n stdout = '\\n'.join(lines)\n else:\n logging.error(f\"Invalid directory: {new_dir}\")\n else:\n logging.error(\"No output to determine the new working directory\")\n\n if stdout.find(\"Traceback (most recent call last):\") > -1:\n stderr = stdout\n stdout = command\n else:\n stderr = f\"Command : {command}, Error: {stderr}\"\n\n logging.info(f\"run return : out: {stdout}, err: {stderr}\")\n\n ret_val = CommandResult(stdout, stderr)\n return ret_val\n \n \n def format_for_display(self, input, output):\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.command_history += f\"History: [Time: {timestamp}\\nInput: {input}\\nOutput: {output}]\\n\"\n self.display_output(output)\n\n\n def shrink_file_data(self, file_data, target_tokens):\n # Get the current token count of file_data\n current_tokens = get_token_count(file_data)\n\n if current_tokens > target_tokens:\n # Estimate the number of characters to keep based on the average token length\n average_token_length = len(file_data) / current_tokens\n chars_to_keep = int(target_tokens * average_token_length)\n \n # Only keep the last part of file_data\n truncated_data = file_data[-chars_to_keep:]\n return truncated_data\n\n # If the file_data is already within the limit, return it as is\n return file_data\n\n\n def find_relevant_data(file_data, target_tokens):\n # Your logic here to find relevant information within the token count\n return file_data[:target_tokens]\n\n def expand_directories(self, file_paths, exclusions):\n new_file_list = []\n for file_path in file_paths:\n if os.path.isdir(file_path):\n # If the path is a directory, ask the user whether to include its files\n user_decision = input(f\"The path '{file_path}' is a directory. Do you want to add all files in this directory? (y/n): \")\n if user_decision.lower() == 'y':\n # If yes, walk through the directory and add all files\n for root, dirs, files in os.walk(file_path):\n # Remove excluded directories so os.walk doesn't traverse them\n dirs[:] = [d for d in dirs if d not in exclusions]\n for name in files:\n if name not in exclusions:\n new_file_list.append(os.path.join(root, name))\n else:\n # If no, inform the user that the directory is being skipped\n print_colored_text(f\"[blue]Skipping directory '{file_path}'.\")\n else:\n # If the path is a file, just add it to the list\n if os.path.basename(file_path) not in exclusions:\n new_file_list.append(file_path)\n return new_file_list\n\n\n def string_sizer(self, data, context, length=1024, use_vector=True):\n set_data = data.strip()\n token_count = get_token_count(set_data)\n print(f\"token_count = {token_count}\")\n if token_count > length:\n if use_vector:\n relevant_segments = self.vector_db.search_similar_conversations(context, top_n=length)\n # relevant_segments = find_relevant_file_segments(\n # history_text= context,\n # file_data=set_data,\n # window_size=length, # or any other size you deem appropriate (8124)\n # overlap=self.llm_slide_len, # or any other overlap size you deem appropriate\n # top_k=1 # or any other number of segments you deem appropriate\n # )\n # set_data = '\\n'.join([f\"[{item[0]}, {item[1]}, {item[2]}]\" for item in relevant_segments])\n\n set_data = '/n.../n'.join(relevant_segments)\n else:\n set_data = trim_to_right_token_count(set_data, len)\n \n data_tokens = get_token_count(set_data)\n logging.info(f\"Translate to Command History Token Count : {data_tokens}\")\n return data_tokens, set_data\n\n async def translate_to_command(self, user_input):\n user_command_prompt = self.settings['user_command_prompt']\n send_prompt = self.settings['command_prompt']\n max_llm = (self.llm_len - 80) #80 is used to pad json formatting of System Messages and over all prompt size.\n \n max_llm -= get_token_count(send_prompt)\n max_llm -= get_token_count(user_input)\n \n history_tokens, command_history = self.string_sizer(self.command_history, user_input, self.llm_history_len, self.vector_for_history)\n command_history = json.dumps(command_history)\n max_llm -= history_tokens\n\n # Add get folders/Files\n current_directory = os.getcwd()\n folder_list = list_files_and_folders_with_sizes(current_directory)\n folder_list = {\n \"path\": current_directory,\n \"folder_list\": folder_list\n }\n folder_list = json.dumps(folder_list)\n folder_list_tokens, folder_list = self.string_sizer(folder_list, command_history + \"/n\" + user_input, self.llm_folder_len, self.vector_for_commands)\n folder_list = json.dumps(folder_list)\n max_llm -= folder_list_tokens\n\n set_command_files_data = []\n total_tokens = 0\n\n # Extract file paths and exclusion list from user_input\n file_paths = re.findall(r'file:\\s*(\".*?\"|\\S+)', user_input)\n \n # Remove quotes from file paths, if present\n self.files = [fp.strip('\"') for fp in file_paths]\n for f, file in enumerate(self.files):\n exclusions = file.split(',')\n file_path = exclusions[0]\n\n exclusions.pop(0)\n self.files[f] = file_path\n self.exclusions = exclusions\n self.files = self.expand_directories(self.files, self.exclusions)\n\n # Use the new function to expand directories into file lists\n self.files = self.expand_directories(self.files, self.exclusions)\n\n if len(self.files) > 0:\n total_size = 0\n total_data = \"\"\n files_data = []\n \n for file in self.files:\n file_data_content = read_file(file) # Note: Changed to 'file_data_content'\n if len(file_data_content) > 50000: #Cap for NLP = 1000000\n # Prompt the user for a decision\n include_file = input(f\"The file {file} is very large. Do you want to include it? (yes/no): \")\n if include_file.lower() != 'yes' or include_file.lower() != 'y':\n print_colored_text(f\"[yellow]Skipping file: {file}\")\n continue # Skip the rest of the loop and therefore the file\n\n\n file_data = {\n \"file\": file,\n \"file_data\": file_data_content,\n \"file_size\": int(get_file_size(file)),\n \"file_tokens\": get_token_count(file_data_content) # Note: Changed to 'file_data_content'\n }\n \n total_size += file_data[\"file_size\"]\n total_data += file_data[\"file_data\"]\n\n files_data.append(file_data)\n\n # Sort files_data by file_tokens in descending order\n files_data = sorted(files_data, key=lambda x: x['file_tokens'], reverse=True)\n\n remaining_tokens = self.llm_file_len\n remaining_tokens_split = int(remaining_tokens / len(files_data)) + 1\n new_files_data = []\n for f, file in enumerate(files_data):\n if file[\"file_tokens\"] > remaining_tokens_split:\n file[\"fileIndex\"] = f\n file[\"file_tokens\"] = remaining_tokens_split\n new_files_data.append(file)\n else:\n remaining_tokens -= file[\"file_tokens\"]\n div_val = (len(files_data) - (len(files_data) - len(new_files_data)))\n if div_val == 0:\n div_val = 1\n\n remaining_tokens_split = int(remaining_tokens / div_val)\n \n if len(new_files_data) > 0:\n for new_file in new_files_data:\n print_colored_text(f\"[cyan]File {new_file['file']} Trimming\")\n relevant_segments = self.vector_db.search_similar_conversations(new_file['file_data'])\n # relevant_segments = find_relevant_file_segments(\n # history_text=folder_list + \"\\n\" + command_history + \"\\n\"+ user_input,\n # file_data=new_file['file_data'],\n # window_size=new_file['file_tokens'], # or any other size you deem appropriate (8124)\n # overlap=self.llm_slide_len, # or any other overlap size you deem appropriate\n # top_k=1 # or any other number of segments you deem appropriate\n # )\n new_file['file_data'] = '/n.../n'.join(relevant_segments)\n file_data_content = new_file['file_data']\n \n new_file['file_tokens'] = get_token_count(file_data_content)\n\n files_data[new_file[\"fileIndex\"]] = new_file\n\n total_tokens = 0\n for file_data in files_data:\n total_tokens += file_data[\"file_tokens\"]\n\n # Check if the file_data is binary and encode it with base64 if so\n try:\n # This will work if 'file_data' is text\n encoded_data = json.dumps(file_data['file_data'])\n except TypeError:\n # If 'file_data' is binary, encode it with base64\n encoded_data = base64.b64encode(file_data['file_data']).decode('utf-8')\n\n add_command_files_data = {\n \"file:\": file_data[\"file\"],\n \"data:\": encoded_data\n }\n\n set_command_files_data.append(add_command_files_data)\n \n\n command_files_data = json.dumps(set_command_files_data)\n logging.info(f\"Translate to Command File Token Count : {total_tokens}\")\n\n max_llm -= total_tokens\n\n commands = map_possible_commands()\n command_tokens, commands = self.string_sizer(commands, command_files_data + \"\\n\" + folder_list + \"\\n\" + command_history + \"\\n\"+ user_input, max_llm, self.vector_for_commands)\n \n command_tokens = get_token_count(commands)\n logging.info(f\"Translate to Command Commands Token Count : {command_tokens}\")\n \n logging.info(f\"Translate to Command : {user_input}\")\n\n kwargs = {\n 'user_prompt': user_input,\n 'get_os_name': get_os_name(),\n 'commands': commands,\n 'command_history': command_history,\n 'command_files_data': command_files_data,\n 'current_folders_data': folder_list\n }\n user_command_prompt = replace_placeholders(user_command_prompt, **kwargs)\n system_command_prompt = replace_placeholders(send_prompt, **kwargs)\n\n user_tokens = get_token_count(user_command_prompt)\n system_tokens = get_token_count(system_command_prompt)\n logging.info(f\"Translate to Command User Token Count : {user_tokens}\")\n logging.info(f\"Translate to Command System Token Count : {system_tokens}\")\n\n logging.info(f\"Translate to Command use System Prompt : {system_command_prompt}\")\n logging.info(f\"Translate to Command use User Prompt : {user_command_prompt}\")\n # command_output = self.llm.ask(system_command_prompt, user_command_prompt, model_type=ModelTypes(self.settings.get('model', \"OpenAI\")), return_type=\"json_object\")\n # loop = asyncio.get_event_loop()\n # command_output = await loop.run_in_executor(None, lambda: self.llm.ask(system_command_prompt, user_command_prompt, model_type=ModelTypes(self.settings.get('model', \"OpenAI\"))))\n command_output = await self.llm.async_ask(system_command_prompt, user_command_prompt, model_type=ModelTypes(self.settings.get('model', \"OpenAI\")), return_type=\"json_object\")\n # save_history_data(user_command_prompt, f\"User : {system_command_prompt}\", self.settings)\n self.vector_db.store_long_term_memory(f\"System : {system_command_prompt}\\n User : {user_command_prompt}\")\n logging.info(f\"Translate to Command return Response : {command_output}\")\n\n display_content = \"\"\n display_error = None\n try:\n if not isinstance(command_output, str):\n # Convert non-string command_output to a JSON-formatted string\n command_output_obj = {\n \"type\": \"Unknown\",\n \"Content\": f\"{command_output}\"\n }\n try:\n command_output_obj = json.loads(command_output)\n except json.JSONDecodeError as e:\n # Handle JSON decoding error if it occurs\n # You might want to log this error or handle it as per your application's needs\n command_output_obj = {\"type\": \"Error\", \"content\": str(e)}\n\n\n logging.info(f\"Translate return Response : {command_output}\")\n type = command_output_obj[\"type\"]\n content = command_output_obj.get(\"content\", None)\n err = content.get(\"error\", None)\n\n if not err:\n if type == \"command_execution\":\n command = content[\"command\"]\n if len(command) > 6 and command[:6] == \"python\":\n while True:\n run_as_mod = capture_styled_input(\"[yellow]Do you want to add our compatibility code? (yes/no/exit) :\")\n run_as_code = False\n cancel_run = False\n if run_as_mod == \"yes\" or run_as_mod == \"y\":\n run_as_code = True\n break\n elif run_as_mod == \"no\" or run_as_mod == \"n\":\n run_as_code = False\n break\n elif run_as_mod == \"exit\":\n cancel_run = True\n break\n else:\n print_colored_text(\"[red]Invalid Input!\")\n\n if not cancel_run:\n if run_as_code:\n # Extract the Python script or module name from the command\n command_parts = command_output.split()\n script_name = None\n for i, part in enumerate(command_parts):\n if part.endswith(\".py\"):\n script_name = part\n break\n elif part == \"-m\" and i < len(command_parts) - 1:\n script_name = command_parts[i + 1] + \".py\" # Assuming the module name is a Python file name\n break\n\n # Open and read the script if the name is found\n if script_name:\n try:\n with open(script_name, 'r') as file:\n python_code = file.read()\n\n\n # Now, python_code contains the content of the Python file\n # You can now pass this code to execute_python_script function\n display_content = await self.execute_python_script(python_code)\n\n except FileNotFoundError:\n print_colored_text(f\"[red]Error: The file {script_name} was not found.\")\n logging.info(f\"Translate Command Error: The file {script_name} was not found.\")\n except Exception as e:\n print_colored_text(f\"[red]Error: An error occurred while reading the file {script_name}: {e}\")\n logging.info(f\"Translate Command Error: An error occurred while reading the file {script_name}: {e}\")\n else:\n print_colored_text(\"[red]Error: No Python script name could be extracted from the command.\")\n logging.info(f\"Translate Command Error: No Python script name could be extracted from the command.\")\n else:\n success, command_output = await self.execute_command(command_output)\n if not success:\n print_colored_text(f\"[red]Exe Error: {command_output.err}\")\n display_content = command_output.err\n else:\n display_content = command_output.out\n logging.info(f\"Translate Command Execute : {command_output}\")\n else:\n logging.info(f\"Translate Command Canceled : {command_output}\")\n else:\n success, command_output = await self.execute_command(command)\n if not success and command_output.err.strip() != \"\":\n print_colored_text(f\"[red]Exe Error: {command_output.err}\")\n display_content = command_output.err\n else:\n display_content = command_output.out\n logging.info(f\"Translate Command Execute : {display_content}\")\n pass\n elif type == \"script_creation\":\n script_text = content['script']\n script_type = content['script_type']\n script_filename = content.get('script_filename', None)\n\n if script_type == \"shell\" or script_type == \"batch\" or script_type == \"bash\":\n display_content = await self.execute_shell_section(script_text, script_filename)\n elif script_type == \"python\":\n display_content = await self.execute_python_script(script_text, script_filename)\n else:\n display_content = CommandResult(script_text, f\"Invalid Script Type : {script_type}\")\n\n if command_output.err != \"\":\n print_colored_text(f\"[red]Shell Error: {command_output.err} with {command_output.out}\")\n display_content = command_output.err\n else: \n display_content = command_output.out\n\n logging.info(f\"Translate Shell Execute : {command_output}\")\n elif type == \"response_formatting\":\n display_content = content[\"text\"]\n elif type == \"error_handling\":\n display_content = content[\"type\"]\n display_error = err\n else:\n display_content = command_output\n display_error = f\"Invalid command type '{type}'.\"\n else:\n display_content = command_output\n display_error = err\n logging.info(f\"Translate to Command Object Error : {err}, command_output= {command_output}\")\n\n\n except Exception as e:\n display_content = command_output\n display_error = e\n logging.info(f\"Translate to Command Object Error : {e}, command_output= {command_output}\")\n\n logging.info(f\"Translate to Command Display Content : {display_content}\")\n\n if display_error:\n return display_error\n \n return display_content\n \n def check_script(self, code_type, text):\n command_output = text\n if f'```{code_type}' in text:\n command_output = self.extract_script_command(code_type, text)\n logging.info(f\"Translate '{code_type}' Code : {text}\")\n\n return command_output\n\n async def execute_command(self, command):\n try:\n logging.info(f\"Execute Command : {command}\")\n result = await self.run_command(command)\n if result.err:\n logging.info(f\"Execute Error : {result.err}\")\n return False, result\n \n logging.info(f\"Execute Output : {result.out}\")\n\n return True, result\n except Exception as e:\n return False, CommandResult(\"\", str(e))\n\n def translate_output(self, output, is_internal=False):\n logging.info(f\"Translate Output : {output}\")\n send_prompt = self.settings['display_prompt']\n\n total_tokens = self.llm_output_size - (get_token_count(send_prompt) + get_token_count(output) + 80)\n\n set_command_history = self.command_history\n token_count = get_token_count(set_command_history)\n\n if token_count > total_tokens:\n set_command_history = trim_to_right_token_count(set_command_history, total_tokens)\n\n max_llm = (self.llm_len - 80) #80 is used to padd json formatting of System Messages and over all prompt size.\n \n max_llm -= get_token_count(send_prompt)\n max_llm -= get_token_count(output)\n \n history_tokens, command_history = self.string_sizer(self.command_history, output, self.llm_history_len)\n command_history = json.dumps(command_history)\n max_llm -= history_tokens\n\n # Add get folders/Files\n current_directory = os.getcwd()\n folder_list = list_files_and_folders_with_sizes(current_directory)\n folder_list = {\n \"path\": current_directory,\n \"folder_list\": folder_list\n }\n folder_list = json.dumps(folder_list)\n folder_list_tokens, folder_list = self.string_sizer(folder_list, self.command_history + \"/n\" + output, self.llm_folder_len)\n folder_list = json.dumps(folder_list)\n max_llm -= folder_list_tokens\n\n kwargs = {\n 'get_os_name': get_os_name(),\n 'command_history': set_command_history,\n 'internal_script': str(is_internal)\n }\n send_prompt = replace_placeholders(send_prompt, **kwargs)\n\n logging.info(f\"Translate Output Display System Prompt : {send_prompt}\")\n logging.info(f\"Translate Output Display User Prompt : {output}\")\n display_output = self.llm.ask(send_prompt, output, model_type=ModelTypes(self.settings.get('model', \"OpenAI\")), return_type=\"text\")\n # save_history_data(output, f\"Assistant : {send_prompt}\", self.settings)\n self.vector_db.store_long_term_memory(f\"System : {send_prompt}\\n User : {output}\")\n\n logging.info(f\"Translate Output Display Response : {display_output}\")\n return display_output\n\n def display_output(self, output):\n logging.info(f\"Display Output : {output}\")\n print_colored_text(output)\n\n def display_about(self):\n print_colored_text(\"[bold][yellow]======================================================\\nShellSpeak\\n======================================================\\n[white]AI powered Console Input\\nVisit: https://github.com/TheCompAce/ShellSpeak\\nDonate: @BradfordBrooks79 on Venmo\\n\\n[grey]Type 'help' for Help.\\n[yellow]======================================================\\n\")\n\n def display_help(self):\n print_colored_text(\"[bold][yellow]======================================================\\nShellSpeak Help\\n======================================================\\n[white]Type:\\n'exit': to close ShellSpeak\\n'user: /command/': pass a raw command to execute then reply threw the AI\\n'file: /filepath/': adds file data to the command prompt. (use can send a folder path, using ',' to exclude folders and files.)\\n'clm': Clear command Memory\\n'rset': Reloads the settings file (this happens on every loading of the prompt.)\\n'about': Shows the About Information\\n'help': Shows this Help information.\\n[yellow]======================================================\\n\")\n\n async def run(self):\n self.display_about()\n while True:\n self.settings = load_settings(self.settingsRoot)\n self.files = []\n\n user_input = self.capture_input()\n if user_input.lower() == 'exit':\n break\n elif user_input.lower() == 'about':\n self.display_about()\n elif user_input.lower() == 'help':\n self.display_help()\n elif user_input.lower() == 'rset':\n self.display_output(f\"Settings Updated.\")\n elif user_input.lower() == 'rset':\n self.display_output(f\"Settings Updated.\")\n elif user_input.lower() == 'clm':\n self.command_history = \"\"\n # self.command_history += f\"Command Input: {user_input}\\nCommand Output: Command History cleared.\\n\"\n self.display_output(f\"Command Memory (History) Cleared.\")\n else:\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if user_input.lower().startswith('user: '):\n # Bypass AI translation and send raw command to the OS\n raw_command = user_input[6:] # Extract the command part from user_input\n try:\n result = await self.run_command(raw_command)\n except Exception as e:\n translated_command = e\n translated_output = self.translate_output(result.out)\n self.command_history += f\"History: [Time: {timestamp}\\nInput: {user_input}\\nOutput: {result.out} Error: {result.err}]\\n\"\n # self.display_output(f\"Output:\\n{result.out}\\nError:\\n{result.err}\")\n self.display_output(translated_output)\n else:\n # Continue with AI translation for the command\n try:\n translated_command = await self.translate_to_command(user_input)\n except Exception as e:\n translated_command = {\n \"err\" : \"Invalid user_input!\",\n \"out\": e\n }\n # if translated_command.err == \"\":\n # translated_output = self.translate_output(translated_command)\n # self.command_history += f\"Command Input: {user_input}\\nCommand Output: {translated_output}\\n\"\n # self.display_output(translated_output)\n #else:\n user_input = redact_json_values(user_input, [\"run_command_list\", \"command_files\"])\n\n self.command_history += f\"History: [Time: {timestamp}\\nInput: {user_input}\\nOutput: {translated_command}]\\n\"\n if not isinstance(translated_command, str):\n translated_command = str(translated_command) # Convert non-string output to string\n translated_output = self.translate_output(translated_command)\n self.display_output(translated_output)" }, { "identifier": "load_settings", "path": "modules/utils.py", "snippet": "def load_settings(filepath):\n try:\n with open(os.path.join(filepath, \"settings.json\"), 'r') as f:\n settings = json.load(f)\n chk_file = os.path.join(filepath, settings['command_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['command_prompt'] = f.read()\n \n chk_file = os.path.join(filepath, settings['display_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['display_prompt'] = f.read()\n\n chk_file = os.path.join(filepath, settings['user_command_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['user_command_prompt'] = f.read()\n\n chk_file = os.path.join(filepath, settings['python_command_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['python_command_prompt'] = f.read()\n\n return settings\n except FileNotFoundError:\n return {}" } ]
import json import os import sys import asyncio import json from modules.vectorDatabase import VectorDatabase from datetime import datetime from modules.menus.setup_menu import save_settings, setup_menu from modules.shellSpeak import ShellSpeak from modules.utils import load_settings
11,030
# from modules.vectors import load_faiss_index, build_and_save_faiss_index, load_index_data, needs_index_update def run_async_function(func, *args): asyncio.run(func(*args)) async def start_shell_speak(settings, base_path, vector_db): await main_start(settings, base_path, vector_db) async def main_start(settings, base_path, vector_db): # Initialize VectorDatabase here if needed globally
# from modules.vectors import load_faiss_index, build_and_save_faiss_index, load_index_data, needs_index_update def run_async_function(func, *args): asyncio.run(func(*args)) async def start_shell_speak(settings, base_path, vector_db): await main_start(settings, base_path, vector_db) async def main_start(settings, base_path, vector_db): # Initialize VectorDatabase here if needed globally
shellSpeak = ShellSpeak(settings, base_path, vector_db)
2
2023-10-31 23:35:19+00:00
16k
qym7/SparseDiff
sparse_diffusion/diffusion_model_sparse.py
[ { "identifier": "utils", "path": "sparse_diffusion/utils.py", "snippet": "def setup_wandb(cfg):\ndef create_folders(args):\ndef to_dense(x, edge_index, edge_attr, batch, charge):\ndef to_dense_node(x, batch):\ndef to_dense_edge(edge_index, edge_attr, batch, max_num_nodes):\ndef encode_no_edge(E):\ndef to_sparse(X, E, y, node_mask, charge=None):\n def __init__(self, X, E, y, charge=None, t_int=None, t=None, node_mask=None):\n def device_as(self, x: torch.Tensor):\n def type_as(self, x: torch.Tensor):\n def mask(self, node_mask=None, collapse=False):\n def collapse(self, collapse_charge=None):\n def __repr__(self):\n def copy(self):\n def __init__(\n self, node, edge_index, edge_attr, y, ptr=None, batch=None, charge=None\n ):\n def type_as(self, x: torch.Tensor):\n def to_device(self, device: str):\n def coalesce(self):\n def symmetry(self):\n def collapse(self, collapse_charge=None):\n def __init__(self, keep_chain):\n def append(self, data):\ndef delete_repeated_twice_edges(edge_index, edge_attr): \ndef to_undirected(edge_index, edge_attr=None):\ndef undirected_to_directed(edge_index, edge_attr=None):\ndef ptr_to_node_mask(ptr, batch, n_node):\ndef concat_sparse_graphs(graphs):\ndef split_samples(samples, start_idx, end_idx):\ndef densify_noisy_data(sparse_noisy_data):\n E = to_dense_edge(edge_index, edge_attr, batch, max_num_nodes)\n E = to_dense_adj(\n edge_index=edge_index,\n batch=batch,\n edge_attr=edge_attr,\n max_num_nodes=max_num_nodes,\n )\n E = encode_no_edge(E)\n E[:, :, :, 0] = first_elt\nclass PlaceHolder:\nclass SparsePlaceHolder:\nclass SparseChainPlaceHolder:" }, { "identifier": "diffusion_utils", "path": "sparse_diffusion/diffusion/diffusion_utils.py", "snippet": "def sum_except_batch(x):\ndef assert_correctly_masked(variable, node_mask):\ndef sample_gaussian(size):\ndef sample_gaussian_with_mask(size, node_mask):\ndef clip_noise_schedule(alphas2, clip_value=0.001):\ndef cosine_beta_schedule(timesteps, s=0.008, raise_to_power: float = 1):\ndef cosine_beta_schedule_discrete(timesteps, s=0.008):\ndef custom_beta_schedule_discrete(timesteps, average_num_nodes=50, s=0.008):\ndef gaussian_KL(q_mu, q_sigma):\ndef cdf_std_gaussian(x):\ndef SNR(gamma):\ndef inflate_batch_array(array, target_shape):\ndef sigma(gamma, target_shape):\ndef alpha(gamma, target_shape):\ndef check_mask_correct(variables, node_mask):\ndef check_tensor_same_size(*args):\ndef sigma_and_alpha_t_given_s(\n gamma_t: torch.Tensor, gamma_s: torch.Tensor, target_size: torch.Size\n):\ndef reverse_tensor(x):\ndef sample_discrete_features(probX, probE, node_mask, prob_charge=None):\ndef sample_discrete_edge_features(probE, node_mask):\ndef sample_discrete_node_features(probX, node_mask):\ndef compute_posterior_distribution(M, M_t, Qt_M, Qsb_M, Qtb_M):\ndef compute_sparse_posterior_distribution(M, M_t, Qt_M, Qsb_M, Qtb_M):\ndef compute_batched_over0_posterior_distribution(X_t, Qt, Qsb, Qtb):\ndef mask_distributions(\n true_X, true_E, pred_X, pred_E, node_mask, true_charge=None, pred_charge=None\n):\ndef posterior_distributions(X, E, X_t, E_t, y_t, Qt, Qsb, Qtb, charge, charge_t):\ndef sample_discrete_feature_noise(limit_dist, node_mask):\ndef sample_sparse_discrete_feature_noise(limit_dist, node_mask):\ndef compute_sparse_batched_over0_posterior_distribution(\n input_data, batch, Qt, Qsb, Qtb\n):\n M = M.flatten(start_dim=1, end_dim=-2).to(\n torch.float32\n ) # (bs, N, d) with N = n or n * n\n U_X = x_limit.flatten(end_dim=-2).multinomial(1).reshape(bs, n_max)\n U_E = e_limit.flatten(end_dim=-2).multinomial(1).reshape(bs, n_max, n_max)\n U_X = U_X.type_as(long_mask)\n U_E = U_E.type_as(long_mask)\n U_X = F.one_hot(U_X, num_classes=x_limit.shape[-1]).float()\n U_E = F.one_hot(U_E, num_classes=e_limit.shape[-1]).float()\n U_E = U_E * upper_triangular_mask\n U_E = U_E + torch.transpose(U_E, 1, 2)" }, { "identifier": "get_computational_graph", "path": "sparse_diffusion/diffusion/sample_edges_utils.py", "snippet": "def get_computational_graph(\n triu_query_edge_index,\n clean_edge_index,\n clean_edge_attr,\n triu=True,\n):\n \"\"\"\n concat and remove repeated edges of query_edge_index and clean_edge_index\n mask the position of query_edge_index\n in case where query_edge_attr is None, return query_edge_attr as 0\n else, return query_edge_attr for all query_edge_index\n (used in apply noise, when we need to sample the query edge attr)\n \"\"\"\n # get dimension information\n de = clean_edge_attr.shape[-1]\n device = triu_query_edge_index.device\n\n # create default query edge attr\n default_query_edge_attr = torch.zeros((triu_query_edge_index.shape[1], de)).to(\n device\n )\n default_query_edge_attr[:, 0] = 1\n\n # if query_edge_attr is None, use default query edge attr\n if triu:\n # make random edges symmetrical\n query_edge_index, default_query_edge_attr = utils.to_undirected(\n triu_query_edge_index, default_query_edge_attr\n )\n _, default_query_edge_attr = utils.to_undirected(\n triu_query_edge_index, default_query_edge_attr\n )\n else:\n query_edge_index, default_query_edge_attr = triu_query_edge_index, default_query_edge_attr\n\n # get the computational graph: positive edges + random edges\n comp_edge_index = torch.hstack([clean_edge_index, query_edge_index])\n default_comp_edge_attr = torch.argmax(\n torch.vstack([clean_edge_attr, default_query_edge_attr]), -1\n )\n\n # reduce repeated edges and get the mask\n assert comp_edge_index.dtype == torch.long\n _, min_default_edge_attr = coalesce(\n comp_edge_index, default_comp_edge_attr, reduce=\"min\"\n )\n\n max_comp_edge_index, max_default_edge_attr = coalesce(\n comp_edge_index, default_comp_edge_attr, reduce=\"max\"\n )\n query_mask = min_default_edge_attr == 0\n comp_edge_attr = F.one_hot(max_default_edge_attr.long(), num_classes=de).float()\n\n return query_mask, max_comp_edge_index, comp_edge_attr" }, { "identifier": "mask_query_graph_from_comp_graph", "path": "sparse_diffusion/diffusion/sample_edges_utils.py", "snippet": "def mask_query_graph_from_comp_graph(\n triu_query_edge_index, edge_index, edge_attr, num_classes\n):\n query_edge_index = utils.to_undirected(triu_query_edge_index)\n # import pdb; pdb.set_trace()\n\n all_edge_index = torch.hstack([edge_index, query_edge_index])\n all_edge_attr = torch.hstack(\n [\n torch.argmax(edge_attr, -1),\n torch.zeros(query_edge_index.shape[1]).to(edge_index.device),\n ]\n )\n\n assert all_edge_index.dtype == torch.long\n _, min_edge_attr = coalesce(all_edge_index, all_edge_attr, reduce=\"min\")\n\n max_edge_index, max_edge_attr = coalesce(\n all_edge_index, all_edge_attr, reduce=\"max\"\n )\n\n return (\n min_edge_attr == 0,\n F.one_hot(max_edge_attr.long(), num_classes=num_classes),\n max_edge_index,\n )" }, { "identifier": "sample_non_existing_edge_attr", "path": "sparse_diffusion/diffusion/sample_edges_utils.py", "snippet": "def sample_non_existing_edge_attr(query_edges_dist_batch, num_edges_to_sample):\n device = query_edges_dist_batch.device\n max_edges_to_sample = int(num_edges_to_sample.max())\n\n if max_edges_to_sample == 0:\n return torch.tensor([]).to(device)\n\n query_mask = (\n torch.ones((len(num_edges_to_sample), max_edges_to_sample))\n .cumsum(-1)\n .to(device)\n )\n query_mask[\n query_mask > num_edges_to_sample.unsqueeze(-1).repeat(1, max_edges_to_sample)\n ] = 0\n query_mask[query_mask > 0] = 1\n query_edge_attr = (\n torch.multinomial(query_edges_dist_batch, max_edges_to_sample, replacement=True)\n + 1\n )\n query_edge_attr = query_edge_attr.flatten()[query_mask.flatten().bool()]\n\n return query_edge_attr" }, { "identifier": "condensed_to_matrix_index_batch", "path": "sparse_diffusion/diffusion/sample_edges_utils.py", "snippet": "def condensed_to_matrix_index_batch(condensed_index, num_nodes, edge_batch, ptr):\n \"\"\"From https://stackoverflow.com/questions/5323818/condensed-matrix-function-to-find-pairs.\n condensed_index: (E) example: [0, 1, 0, 2] where [0, 1] are edges for graph0 and [0,2] edges for graph 1\n num_nodes: (bs)\n edge_batch: (E): tells to which graph each edge belongs\n ptr: (bs+1): contains the offset for the number of nodes in each graph.\n \"\"\"\n bb = -2 * num_nodes[edge_batch] + 1\n\n # Edge ptr adds an offset of n (n-1) / 2 to each edge index\n ptr_condensed_index = condensed_index\n ii = torch.div(\n (-bb - torch.sqrt(bb**2 - 8 * ptr_condensed_index)), 2, rounding_mode=\"floor\"\n )\n jj = (\n ptr_condensed_index\n + torch.div(ii * (bb + ii + 2), 2, rounding_mode=\"floor\")\n + 1\n )\n return torch.vstack((ii.long(), jj.long())) + ptr[edge_batch]" }, { "identifier": "sample_query_edges", "path": "sparse_diffusion/diffusion/sample_edges.py", "snippet": "def sample_query_edges(\n num_nodes_per_graph: Tensor, edge_proportion=None, num_edges_to_sample=None\n):\n \"\"\"Sample edge_proportion % of edges in each graph\n num_nodes_per_graph: (bs): tensor of int.\n Return: edge_index, batch\n \"\"\"\n assert num_nodes_per_graph.dtype == torch.long\n # num_nodes could be 1 in QM9\n assert torch.all(num_nodes_per_graph >= 1), num_nodes_per_graph\n\n batch_size = len(num_nodes_per_graph)\n device = num_nodes_per_graph.device\n\n n = num_nodes_per_graph\n max_condensed_value = (n * (n - 1) / 2).long()\n if num_edges_to_sample is None and edge_proportion is not None:\n assert 0 < edge_proportion <= 1, edge_proportion\n num_edges_to_sample = torch.ceil(edge_proportion * max_condensed_value).long()\n elif num_edges_to_sample is not None:\n assert num_edges_to_sample.dtype == torch.long\n else:\n raise ValueError(\n \"Either edge_proportion or num_edges_to_sample should be provided\"\n )\n\n condensed_index, edge_batch = sampled_condensed_indices_uniformly(\n max_condensed_value, num_edges_to_sample\n )\n\n if batch_size == 1:\n edge_index = condensed_to_matrix_index(condensed_index, num_nodes=n[0])\n return edge_index, torch.zeros(n, dtype=torch.long, device=device)\n\n if len(torch.unique(num_nodes_per_graph)) == 1:\n # Case of several graphs of the same size\n # Add the offset to the edge_index\n offset = torch.cumsum(num_nodes_per_graph, dim=0)[:-1] # (bs - 1)\n offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.long), offset)\n ) # (bs)\n\n edge_index = condensed_to_matrix_index_batch(\n condensed_index,\n num_nodes=num_nodes_per_graph,\n edge_batch=edge_batch,\n ptr=offset,\n )\n return edge_index, torch.arange(batch_size, device=device).repeat_interleave(n)\n\n # Most general case: graphs of varying sizes\n # condensed_index = randperm_expanded[complete_mask] # (sum(num_edges_per_graph))\n offset = torch.cumsum(num_nodes_per_graph, dim=0)[:-1] # (bs - 1)\n offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.long), offset)\n ) # (bs)\n edge_index = condensed_to_matrix_index_batch(\n condensed_index,\n num_nodes=num_nodes_per_graph,\n edge_batch=edge_batch,\n ptr=offset,\n )\n # Get the batch information\n batch = torch.arange(batch_size, device=device).repeat_interleave(\n num_nodes_per_graph\n )\n return edge_index, batch" }, { "identifier": "sample_non_existing_edges_batched", "path": "sparse_diffusion/diffusion/sample_edges.py", "snippet": "def sample_non_existing_edges_batched(\n num_edges_to_sample, existing_edge_index, num_nodes, batch\n):\n \"\"\"Sample non-existing edges from a complete graph.\n num_edges_to_sample: (bs) long\n existing_edge_index: (2, E)\n num_nodes: (bs) long\n batch: (N) long\n existing_edge_index only contains edges that exist in the top part of triangle matrix\n \"\"\"\n device = existing_edge_index.device\n unit_graph_mask = num_nodes == 1\n unit_graph_mask_offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.bool), unit_graph_mask[:-1])\n )\n\n # Compute the number of existing and non-existing edges.\n num_edges_total = (num_nodes * (num_nodes - 1) / 2).long()\n # Count existing edges using global pooling. In case a graph has no edge, global_add_pool\n # May return something of the wrong length. To avoid this, add a 0 for each graph\n # TODO: check if it can be simplified using the size argument of global add pool\n # full_edge_count = torch.hstack((torch.ones(existing_edge_index.shape[1], device=device),\n # torch.zeros(batch.max()+1, device=device))) # (ne+bs)\n # full_edge_batch = torch.hstack((batch[existing_edge_index[0]],\n # torch.arange(batch.max()+1, device=device))) # (ne+bs)\n # num_edges_existing = pool.global_add_pool(x=full_edge_count, batch=full_edge_batch).long()\n num_edges_existing = pool.global_add_pool(\n x=torch.ones(existing_edge_index.shape[1], device=device),\n batch=batch[existing_edge_index[0]],\n size=len(num_edges_to_sample),\n ).long()\n num_non_existing_edges = num_edges_total - num_edges_existing\n assert (num_edges_to_sample <= num_non_existing_edges).all(), (\n num_edges_to_sample,\n num_non_existing_edges,\n )\n\n # Sample non-existing edge indices without considering existing edges.\n # print(\"Num edges non existing\", num_non_existing_edges)\n # multinomial and not randint because we want to sample without replacement\n sampled_indices, sampled_edge_batch = sampled_condensed_indices_uniformly(\n max_condensed_value=num_non_existing_edges,\n num_edges_to_sample=num_edges_to_sample,\n )\n\n # Compute the offset (bs, ) for each graph, where offset -> nbr of nodes, sq_offset -> nbr of edges\n # Go from a matrix problem to a 1d problem, it is easier\n existing_edge_batch = batch[existing_edge_index[0]]\n num_edges_total = (num_nodes * (num_nodes - 1) / 2).long()\n sq_offset = torch.cumsum(num_edges_total, dim=0)[:-1] # (bs - 1)\n # Prepend a 0\n sq_offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.long), sq_offset)\n ) # (bs)\n\n offset = torch.cumsum(num_nodes, dim=0)[\n :-1\n ] # (bs - 1) # (bs - 1)\n offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.long), offset)\n ) # (bs)\n # existing_indices (E, ) is of form [0 1 2 3 4 0 2 3 4]\n rescaled_edge_index = (\n existing_edge_index - offset[existing_edge_batch]\n ) # of form [0 1 2 3 4 0 2 3 4]\n existing_indices = matrix_to_condensed_index_batch(\n rescaled_edge_index, num_nodes=num_nodes, edge_batch=existing_edge_batch\n )\n\n # Add offset to the sampled indices\n # Example of sampled condensed: [0 3 1 0 2]\n epsilon = 0.1\n sampled_indices_offset = sq_offset[sampled_edge_batch] # (E_sample, )\n # print(\"sampled indices\", sampled_indices)\n # print(\"sampled edge batch\", sampled_edge_batch)\n samp_ind_w_offset = sampled_indices + sampled_indices_offset\n samp_ind_w_offset = torch.sort(samp_ind_w_offset)[\n 0\n ] # E.g. [0 1 3 6 8], where [0 1 3] belong to a graph of 4 nodes, [6 8] to a graph of 3 nodes\n # print(\"Sampled indices with offset\", samp_ind_w_offset)\n # add small value to create an order later in the sort\n samp_ind_w_offset = samp_ind_w_offset + epsilon\n\n # Add virtual edges to the existing edges to mark the beginning of each graph, for batch processing\n # After adding epsilon, sqrt_ptr is smaller than all edges of the next graph, and bigger than all edges of the current graph\n # * when there exists graphs with size 1, there might be identical values in sq_offset, also in virtual nodes\n existing_ind_w_offset = existing_indices + sq_offset[existing_edge_batch]\n virtual_nodes = (\n sq_offset - epsilon\n ) # Introduce virtual nodes that will be used later to split graphs\n # add different offset for graphs of size 1 to separate them and their following graphs\n virtual_nodes[unit_graph_mask] = virtual_nodes[unit_graph_mask] - 0.1\n existing_ind_w_offset = torch.cat((existing_ind_w_offset, virtual_nodes))\n existing_ind_w_offset, existing_condensed_offset_argsort = torch.sort(\n existing_ind_w_offset\n )\n # print(\"Existing condensed indices with offset\", existing_ind_w_offset)\n virtual_existing_mask = torch.cat(\n (\n torch.zeros(len(existing_indices), dtype=torch.long, device=device),\n torch.ones(len(sq_offset), dtype=torch.long, device=device),\n )\n )\n virtual_existing_mask = virtual_existing_mask[\n existing_condensed_offset_argsort\n ] # [1 0 0 0 1 0 0]\n # print('Virtual nodes mask', virtual_existing_mask)\n\n # Compute the mask of free edges\n # When there exists graphs with size 1, free spots might be negative, which means that\n # existing condensed indices have same neighbor value\n free_spots = (\n torch.diff(existing_ind_w_offset, prepend=torch.tensor([-1]).to(device)) - 1\n ) # [-0.1, 0, 2, 9, 9.9, 18, 25]\n free_spots = torch.ceil(free_spots).long() # [0, 0, 1, 6, 0, 8, 6]\n # print(\"Free spots\", free_spots)\n # Map these values to index\n cumsum = torch.cumsum(free_spots, dim=0).long() # [1 2 3 4 5 6 7]\n cumsum_batch = (\n torch.cumsum(virtual_existing_mask, dim=0).long() - 1\n ) # [1 1 1 1 2 2 2] - 1\n # delete the offset of free spots to cumsum\n cumsum_offset = cumsum[virtual_existing_mask.bool()][cumsum_batch]\n # print(\"Cumsum offset\", cumsum_offset)\n # print(\"Cumsum before removing offset\", cumsum)\n cumsum = cumsum - cumsum_offset # [0 2 5 0 2 5]\n # add the offset of edge number to cumsum\n cumsum = cumsum + sq_offset[cumsum_batch] # [0 2 5 6 8 11]\n # print(\"Cumsum\", cumsum)\n # Cumsum now contains the number of free spots at the left -- it is computed separetely for each graph\n # An offset is added on the result\n\n # Add virtual edges to the sampled edges to mark the end of each graph\n num_sampled_edges = len(sampled_indices)\n num_virtual_nodes = len(sq_offset)\n num_free_spots_indices = len(cumsum)\n\n # Group the different vectors together: the existing edges, the virtual nodes and the free spots\n grouped = torch.cat((samp_ind_w_offset, virtual_nodes, cumsum))\n # print(\"grouped\", grouped)\n sorted, argsort = torch.sort(grouped)\n # print(\"sorted\", sorted)\n # Create the masks corresponding to these 3 types of objects\n num_total = num_sampled_edges + num_virtual_nodes + num_free_spots_indices\n # mask is created for virtual nodes, in order to reduce the offset for cumsum\n virtual_sampled_mask = torch.zeros(num_total, dtype=torch.bool, device=device)\n virtual_sampled_mask[\n num_sampled_edges : num_sampled_edges + num_virtual_nodes\n ] = True\n virtual_sampled_mask = virtual_sampled_mask[argsort]\n\n free_spots_ind_mask = torch.zeros(num_total, dtype=torch.bool, device=device)\n free_spots_ind_mask[-num_free_spots_indices:] = True\n free_spots_ind_mask = free_spots_ind_mask[argsort]\n\n sampled_ind_mask = torch.zeros(num_total, dtype=torch.bool, device=device)\n sampled_ind_mask[:num_sampled_edges] = True\n sampled_ind_mask = sampled_ind_mask[argsort]\n\n # to_shift tells by how much to shift sampled and virtual edges\n to_shift = torch.cumsum(free_spots_ind_mask, dim=0) # - sampled_edge_batch\n # print(\"to_shift\", to_shift)\n new_indices = sorted + to_shift\n # remove epsilon added to sampled edges\n new_indices = new_indices[sampled_ind_mask] - epsilon\n # remove cumsum_offset to unify the indices of different graphs from cumsum_mask\n # 1 is added to compensate the fact that cumsum is computed with virtual nodes\n cumsum_offset = to_shift[virtual_sampled_mask.bool()][sampled_edge_batch] + 1\n cumsum_offset[unit_graph_mask_offset[sampled_edge_batch]] = (\n cumsum_offset[unit_graph_mask_offset[sampled_edge_batch]] + 1\n )\n # print(\"Cumsum offset\", cumsum_offset)\n # remove sq_offset contained by sorted\n new_indices = new_indices - cumsum_offset - sq_offset[sampled_edge_batch]\n # print(\"New indices long\", new_indices)\n new_indices = new_indices.round()\n # print('Existing edge indices', existing_indices)\n # Convert to matrix index.\n new_edge_index = condensed_to_matrix_index_batch(\n condensed_index=new_indices,\n num_nodes=num_nodes,\n edge_batch=sampled_edge_batch,\n ptr=offset,\n )\n\n # # debugging\n # # check if there are repeated edges\n # print('smallest graph size is {}'.format(num_nodes.min()))\n # existing_ind_w_offset = existing_indices + sq_offset[existing_edge_batch]\n # samp_ind_w_offset = new_indices + sq_offset[sampled_edge_batch]\n # repeated = existing_ind_w_offset.round().unsqueeze(1) == samp_ind_w_offset.round().unsqueeze(0)\n # repeated_ind = torch.where(repeated)\n # if repeated.sum()>0:\n # print('repeated edges')\n # import pdb; pdb.set_trace()\n # cur_shift = to_shift[sampled_ind_mask][1188] - cumsum_offset[1188]\n\n return new_edge_index" }, { "identifier": "sampled_condensed_indices_uniformly", "path": "sparse_diffusion/diffusion/sample_edges.py", "snippet": "def sampled_condensed_indices_uniformly(\n max_condensed_value, num_edges_to_sample, return_mask=False\n):\n \"\"\"Max_condensed value: (bs) long tensor\n num_edges_to_sample: (bs) long tensor\n Return: condensed_index e.g. [0 1 3 0 2]\n \"\"\"\n assert (0 <= num_edges_to_sample).all(), (\n num_edges_to_sample <= max_condensed_value\n ).all()\n batch_size = max_condensed_value.shape[0]\n device = max_condensed_value.device\n\n if (\n len(torch.unique(max_condensed_value)) == 1\n and len(torch.unique(num_edges_to_sample)) == 1\n ):\n max_val = max_condensed_value[0]\n to_sample = num_edges_to_sample[0]\n sampled_condensed = torch.multinomial(\n torch.ones(max_val, device=device), num_samples=to_sample, replacement=False\n )\n edge_batch = torch.zeros(\n num_edges_to_sample[0], device=device, dtype=torch.long\n )\n if batch_size == 1:\n if return_mask:\n condensed_mask = torch.arange(num_edges_to_sample[0], device=device)\n return sampled_condensed, edge_batch, condensed_mask\n\n return sampled_condensed, edge_batch\n\n # Case of several graphs of the same size\n # Repeat the edge_index for each graph and aggregate them\n sampled_condensed_repeated = (\n sampled_condensed.unsqueeze(0).expand(batch_size, -1).flatten()\n )\n edge_batch = torch.arange(batch_size, device=device).repeat_interleave(\n to_sample\n )\n\n if return_mask:\n condensed_mask = torch.arange(num_edges_to_sample[0], device=device)\n condensed_mask = (\n condensed_mask.unsqueeze(0).expand(batch_size, -1).flatten()\n )\n return sampled_condensed_repeated, edge_batch, condensed_mask\n\n return sampled_condensed_repeated, edge_batch\n\n # Most general case: graphs of varying sizes\n max_size = torch.max(max_condensed_value)\n # import pdb; pdb.set_trace()\n if max_size > 10**7:\n print(\"[Warning]: sampling random edges might bew slow\")\n\n randperm_full = torch.randperm(max_size, device=device) # (max_condensed)\n randperm_expanded = randperm_full.unsqueeze(0).expand(\n batch_size, -1\n ) # (bs, max_condensed)\n\n # General goal: keep the indices on the left that are not too big for each graph\n # Mask1 is used to mask the indices that are too large for current graph\n mask1 = randperm_expanded < max_condensed_value.unsqueeze(1) # (bs, max_condensed)\n\n # Cumsum(mask1) is the number of valid indices on the left of each index\n # Mask2 will select the right number of indices on the left\n mask2 = torch.cumsum(mask1, dim=1) <= num_edges_to_sample.unsqueeze(\n 1\n ) # (bs, max_condensed)\n complete_mask = mask1 * mask2\n condensed_index = randperm_expanded[complete_mask] # (sum(num_edges_per_graph))\n edge_batch = (\n torch.arange(batch_size, device=device)\n .unsqueeze(1)\n .expand(-1, max_size)[complete_mask]\n )\n\n if return_mask:\n complete_mask = complete_mask.cumsum(-1)[complete_mask] - 1\n return condensed_index, edge_batch, complete_mask\n\n return condensed_index, edge_batch" }, { "identifier": "SignNetNodeEncoder", "path": "sparse_diffusion/models/sign_pos_encoder.py", "snippet": "class SignNetNodeEncoder(torch.nn.Module):\n \"\"\"SignNet Positional Embedding node encoder.\n https://arxiv.org/abs/2202.13013\n https://github.com/cptq/SignNet-BasisNet\n Uses precomputated Laplacian eigen-decomposition, but instead\n of eigen-vector sign flipping + DeepSet/Transformer, computes the PE as:\n SignNetPE(v_1, ... , v_k) = \\rho ( [\\phi(v_i) + \\rhi(−v_i)]^k_i=1 )\n where \\phi is GIN network applied to k first non-trivial eigenvectors, and\n \\rho is an MLP if k is a constant, but if all eigenvectors are used then\n \\rho is DeepSet with sum-pooling.\n SignNetPE of size dim_pe will get appended to each node feature vector.\n If `expand_x` set True, original node features will be first linearly\n projected to (dim_emb - dim_pe) size and the concatenated with SignNetPE.\n Args:\n dim_emb: Size of final node embedding\n expand_x: Expand node features `x` from dim_in to (dim_emb - dim_pe)\n \"\"\"\n\n def __init__(self, dataset_infos, sn_hidden_dim, k_node, expand_x=True):\n \"\"\"\n Initialize the model with the default parameters.\n \"\"\"\n super().__init__()\n self.dataset_infos = dataset_infos\n self.k_node = k_node\n dim_in = (\n dataset_infos.input_dims.X + dataset_infos.input_dims.charge - self.k_node\n ) # Expected original input node features dim\n dim_emb = sn_hidden_dim\n\n dim_pe = 16 # Size of PE embedding\n model_type = \"DeepSet\" # Encoder NN model type for SignNet\n\n if model_type not in [\"MLP\", \"DeepSet\"]:\n raise ValueError(f\"Unexpected SignNet model {model_type}\")\n self.model_type = model_type\n sign_inv_layers = 3 # Num. layers in \\phi GNN part\n rho_layers = 1 # Num. layers in \\rho MLP/DeepSet\n\n if rho_layers < 1:\n raise ValueError(f\"Num layers in rho model has to be positive.\")\n\n max_freqs = 10 # Num. eigenvectors (frequencies)\n self.pass_as_var = False # Pass PE also as a separate variable\n\n if dim_emb - dim_pe < 1:\n raise ValueError(\n f\"SignNet PE size {dim_pe} is too large for \"\n f\"desired embedding size of {dim_emb}.\"\n )\n\n if expand_x:\n self.linear_x = nn.Linear(dim_in, dim_emb - dim_pe)\n self.expand_x = expand_x\n\n # Sign invariant neural network.\n if self.model_type == \"MLP\":\n self.sign_inv_net = GINDeepSigns(\n in_channels=1,\n hidden_channels=64,\n out_channels=4,\n num_layers=sign_inv_layers,\n k=max_freqs,\n dim_pe=dim_pe,\n rho_num_layers=rho_layers,\n use_bn=True,\n dropout=0.0,\n activation=\"relu\",\n )\n elif self.model_type == \"DeepSet\":\n self.sign_inv_net = MaskedGINDeepSigns(\n in_channels=1,\n hidden_channels=64,\n out_channels=4,\n num_layers=sign_inv_layers,\n dim_pe=dim_pe,\n rho_num_layers=rho_layers,\n use_bn=True,\n dropout=0.0,\n activation=\"relu\",\n )\n else:\n raise ValueError(f\"Unexpected model {self.model_type}\")\n\n def forward(self, x, edge_index, batch):\n eigvecs = x[:, -self.k_node:]\n x = x[:, : -self.k_node]\n\n pos_enc = eigvecs.unsqueeze(-1) # (Num nodes) x (Num Eigenvectors) x 1\n\n empty_mask = torch.isnan(pos_enc)\n pos_enc[empty_mask] = 0 # (Num nodes) x (Num Eigenvectors) x 1\n\n # SignNet\n pos_enc = self.sign_inv_net(\n pos_enc, edge_index, batch\n ) # (Num nodes) x (pos_enc_dim)\n\n # Expand node features if needed\n if self.expand_x:\n h = self.linear_x(x)\n else:\n h = x\n\n # Concatenate final PEs to input embedding\n x = torch.cat((h, pos_enc), 1)\n # Keep PE also separate in a variable (e.g. for skip connections to input)\n\n return x" } ]
import time import os import math import pickle import json import torch import wandb import numpy as np import torch.nn as nn import torch.nn.functional as F import pytorch_lightning as pl from tqdm import tqdm from models.conv_transformer_model import GraphTransformerConv from diffusion.noise_schedule import ( PredefinedNoiseScheduleDiscrete, MarginalUniformTransition, ) from metrics.train_metrics import TrainLossDiscrete from metrics.abstract_metrics import SumExceptBatchMetric, SumExceptBatchKL, NLL from analysis.visualization import Visualizer from sparse_diffusion import utils from sparse_diffusion.diffusion import diffusion_utils from sparse_diffusion.diffusion.sample_edges_utils import ( get_computational_graph, mask_query_graph_from_comp_graph, sample_non_existing_edge_attr, condensed_to_matrix_index_batch, ) from sparse_diffusion.diffusion.sample_edges import ( sample_query_edges, sample_non_existing_edges_batched, sampled_condensed_indices_uniformly, ) from sparse_diffusion.models.sign_pos_encoder import SignNetNodeEncoder
11,026
E_t_index = torch.hstack([dir_edge_index, neg_edge_index]) else: E_t_attr = dir_edge_attr E_t_index = dir_edge_index # mask non-existing edges mask = E_t_attr != 0 E_t_attr = E_t_attr[mask] E_t_index = E_t_index[:, mask] E_t_index, E_t_attr = utils.to_undirected(E_t_index, E_t_attr) E_t_attr = F.one_hot(E_t_attr, num_classes=self.out_dims.E) node_t = F.one_hot(node_t, num_classes=self.out_dims.X) sparse_noisy_data = { "t_int": t_int, "t_float": t_float, "beta_t": beta_t, "alpha_s_bar": alpha_s_bar, "alpha_t_bar": alpha_t_bar, "node_t": node_t, "edge_index_t": E_t_index, "edge_attr_t": E_t_attr, "comp_edge_index_t": None, "comp_edge_attr_t": None, # computational graph "y_t": data.y, "batch": data.batch, "ptr": data.ptr, "charge_t": charge_t, } return sparse_noisy_data def compute_val_loss(self, pred, noisy_data, X, E, y, node_mask, charge, test): """Computes an estimator for the variational lower bound. pred: (batch_size, n, total_features) noisy_data: dict X, E, y : (bs, n, dx), (bs, n, n, de), (bs, dy) node_mask : (bs, n) Output: nll (size 1) """ t = noisy_data["t_float"] # 1. N = node_mask.sum(1).long() log_pN = self.node_dist.log_prob(N) # 2. The KL between q(z_T | x) and p(z_T) = Uniform(1/num_classes). Should be close to zero. kl_prior = self.kl_prior(X, E, node_mask, charge=charge) # 3. Diffusion loss loss_all_t = self.compute_Lt( X, E, y, charge, pred, noisy_data, node_mask, test=test ) # Combine terms nlls = - log_pN + kl_prior + loss_all_t assert (~nlls.isnan()).all(), f"NLLs contain NaNs: {nlls}" assert len(nlls.shape) == 1, f"{nlls.shape} has more than only batch dim." # Update NLL metric object and return batch nll nll = (self.test_nll if test else self.val_nll)(nlls) # Average over the batch if wandb.run: wandb.log( { "kl prior": kl_prior.mean(), "Estimator loss terms": loss_all_t.mean(), "log_pn": log_pN.mean(), "val_nll": nll, "epoch": self.current_epoch }, commit=False, ) return nll def kl_prior(self, X, E, node_mask, charge): """Computes the KL between q(z1 | x) and the prior p(z1) = Normal(0, 1). This is essentially a lot of work for something that is in practice negligible in the loss. However, you compute it so that you see it when you've made a mistake in your noise schedule. """ # Compute the last alpha value, alpha_T. ones = torch.ones((X.size(0), 1), device=X.device) Ts = self.T * ones alpha_t_bar = self.noise_schedule.get_alpha_bar(t_int=Ts) # (bs, 1) Qtb = self.transition_model.get_Qt_bar(alpha_t_bar, self.device) # Compute transition probabilities probX = X @ Qtb.X # (bs, n, dx_out) probE = E @ Qtb.E.unsqueeze(1) # (bs, n, n, de_out) assert probX.shape == X.shape bs, n, _ = probX.shape limit_X = self.limit_dist.X[None, None, :].expand(bs, n, -1).type_as(probX) limit_E = ( self.limit_dist.E[None, None, None, :].expand(bs, n, n, -1).type_as(probE) ) if self.use_charge: prob_charge = charge @ Qtb.charge # (bs, n, de_out) limit_charge = ( self.limit_dist.charge[None, None, :] .expand(bs, n, -1) .type_as(prob_charge) ) limit_charge = limit_charge.clone() else: prob_charge = limit_charge = None # Make sure that masked rows do not contribute to the loss ( limit_dist_X, limit_dist_E, probX, probE, limit_dist_charge, prob_charge,
class DiscreteDenoisingDiffusion(pl.LightningModule): model_dtype = torch.float32 best_val_nll = 1e8 val_counter = 0 start_epoch_time = None val_iterations = None def __init__( self, cfg, dataset_infos, train_metrics, extra_features, domain_features, val_sampling_metrics, test_sampling_metrics, ): super().__init__() self.in_dims = dataset_infos.input_dims self.out_dims = dataset_infos.output_dims self.use_charge = cfg.model.use_charge and self.out_dims.charge > 1 self.node_dist = dataset_infos.nodes_dist self.extra_features = extra_features self.domain_features = domain_features self.sign_net = cfg.model.sign_net if not self.sign_net: cfg.model.sn_hidden_dim = 0 # sparse settings self.edge_fraction = cfg.model.edge_fraction self.autoregressive = cfg.model.autoregressive self.cfg = cfg self.test_variance = cfg.general.test_variance self.dataset_info = dataset_infos self.visualization_tools = Visualizer(dataset_infos) self.name = cfg.general.name self.T = cfg.model.diffusion_steps self.train_loss = TrainLossDiscrete(cfg.model.lambda_train, self.edge_fraction) self.train_metrics = train_metrics self.val_sampling_metrics = val_sampling_metrics self.test_sampling_metrics = test_sampling_metrics # TODO: transform to torchmetrics.MetricCollection self.val_nll = NLL() # self.val_metrics = torchmetrics.MetricCollection([]) self.val_X_kl = SumExceptBatchKL() self.val_E_kl = SumExceptBatchKL() self.val_X_logp = SumExceptBatchMetric() self.val_E_logp = SumExceptBatchMetric() self.best_nll = 1e8 self.best_epoch = 0 # TODO: transform to torchmetrics.MetricCollection self.test_nll = NLL() self.test_X_kl = SumExceptBatchKL() self.test_E_kl = SumExceptBatchKL() self.test_X_logp = SumExceptBatchMetric() self.test_E_logp = SumExceptBatchMetric() if self.use_charge: self.val_charge_kl = SumExceptBatchKL() self.val_charge_logp = SumExceptBatchMetric() self.test_charge_kl = SumExceptBatchKL() self.test_charge_logp = SumExceptBatchMetric() self.model = GraphTransformerConv( n_layers=cfg.model.n_layers, input_dims=self.in_dims, hidden_dims=cfg.model.hidden_dims, output_dims=self.out_dims, sn_hidden_dim=cfg.model.sn_hidden_dim, output_y=cfg.model.output_y, dropout=cfg.model.dropout ) # whether to use sign net if self.sign_net and cfg.model.extra_features == "all": self.sign_net = SignNetNodeEncoder( dataset_infos, cfg.model.sn_hidden_dim, cfg.model.num_eigenvectors ) # whether to use scale layers self.scaling_layer = cfg.model.scaling_layer ( self.node_scaling_layer, self.edge_scaling_layer, self.graph_scaling_layer, ) = self.get_scaling_layers() self.noise_schedule = PredefinedNoiseScheduleDiscrete( cfg.model.diffusion_noise_schedule, timesteps=cfg.model.diffusion_steps ) # Marginal transition node_types = self.dataset_info.node_types.float() x_marginals = node_types / torch.sum(node_types) edge_types = self.dataset_info.edge_types.float() e_marginals = edge_types / torch.sum(edge_types) if not self.use_charge: charge_marginals = node_types.new_zeros(0) else: charge_marginals = ( self.dataset_info.charge_types * node_types[:, None] ).sum(dim=0) print( f"Marginal distribution of the classes: {x_marginals} for nodes, {e_marginals} for edges" ) self.transition_model = MarginalUniformTransition( x_marginals=x_marginals, e_marginals=e_marginals, y_classes=self.out_dims.y, charge_marginals=charge_marginals, ) self.limit_dist = utils.PlaceHolder( X=x_marginals, E=e_marginals, y=torch.ones(self.out_dims.y) / self.out_dims.y, charge=charge_marginals, ) self.save_hyperparameters(ignore=["train_metrics", "sampling_metrics"]) self.log_every_steps = cfg.general.log_every_steps self.number_chain_steps = cfg.general.number_chain_steps def training_step(self, data, i): # The above code is using the Python debugger module `pdb` to set a breakpoint at a specific # line of code. When the code is executed, it will pause at that line and allow you to # interactively debug the program. if data.edge_index.numel() == 0: print("Found a batch with no edges. Skipping.") return # Map discrete classes to one hot encoding data = self.dataset_info.to_one_hot(data) start_time = time.time() sparse_noisy_data = self.apply_sparse_noise(data) if hasattr(self, "apply_noise_time"): self.apply_noise_time.append(round(time.time() - start_time, 2)) # Sample the query edges and build the computational graph = union(noisy graph, query edges) start_time = time.time() # print(data.ptr.diff()) triu_query_edge_index, _ = sample_query_edges( num_nodes_per_graph=data.ptr.diff(), edge_proportion=self.edge_fraction ) query_mask, comp_edge_index, comp_edge_attr = get_computational_graph( triu_query_edge_index=triu_query_edge_index, clean_edge_index=sparse_noisy_data["edge_index_t"], clean_edge_attr=sparse_noisy_data["edge_attr_t"], ) # pass sparse comp_graph to dense comp_graph for ease calculation sparse_noisy_data["comp_edge_index_t"] = comp_edge_index sparse_noisy_data["comp_edge_attr_t"] = comp_edge_attr self.sample_query_time.append(round(time.time() - start_time, 2)) sparse_pred = self.forward(sparse_noisy_data) # Compute the loss on the query edges only sparse_pred.edge_attr = sparse_pred.edge_attr[query_mask] sparse_pred.edge_index = comp_edge_index[:, query_mask] # mask true label for query edges # We have the true edge index at time 0, and the query edge index at time t. This function # merge the query edges and edge index at time 0, delete repeated one, and retune the mask # for the true attr of query edges start_time = time.time() ( query_mask2, true_comp_edge_attr, true_comp_edge_index, ) = mask_query_graph_from_comp_graph( triu_query_edge_index=triu_query_edge_index, edge_index=data.edge_index, edge_attr=data.edge_attr, num_classes=self.out_dims.E, ) query_true_edge_attr = true_comp_edge_attr[query_mask2] assert ( true_comp_edge_index[:, query_mask2] - sparse_pred.edge_index == 0 ).all() self.query_count.append(len(query_true_edge_attr)) true_data = utils.SparsePlaceHolder( node=data.x, charge=data.charge, edge_attr=query_true_edge_attr, edge_index=sparse_pred.edge_index, y=data.y, batch=data.batch, ) true_data.collapse() # Map one-hot to discrete class self.coalesce_time.append(round(time.time() - start_time, 2)) # Loss calculation start_time = time.time() loss = self.train_loss.forward( pred=sparse_pred, true_data=true_data, log=i % self.log_every_steps == 0 ) self.train_metrics( pred=sparse_pred, true_data=true_data, log=i % self.log_every_steps == 0 ) self.loss_time.append(round(time.time() - start_time, 2)) return {"loss": loss} def on_fit_start(self) -> None: print( f"Size of the input features:" f" X {self.in_dims.X}, E {self.in_dims.E}, charge {self.in_dims.charge}, y {self.in_dims.y}" ) if self.local_rank == 0: utils.setup_wandb( self.cfg ) # Initialize wandb only on one process to log metrics only once def on_train_epoch_start(self) -> None: self.print("Starting train epoch...") self.start_epoch_time = time.time() self.train_loss.reset() self.train_metrics.reset() self.query_count = [] self.apply_noise_time = [] self.extra_data_time = [] self.forward_time = [] self.sample_query_time = [] self.coalesce_time = [] self.loss_time = [] self.cycle_time = [] self.eigen_time = [] def on_train_epoch_end(self) -> None: epoch_loss = self.train_loss.log_epoch_metrics() self.print( f"Epoch {self.current_epoch} finished: X: {epoch_loss['train_epoch/x_CE'] :.2f} -- " f"E: {epoch_loss['train_epoch/E_CE'] :.2f} --" f"charge: {epoch_loss['train_epoch/charge_CE'] :.2f} --" f"y: {epoch_loss['train_epoch/y_CE'] :.2f}" ) self.train_metrics.log_epoch_metrics() if wandb.run: wandb.log({"epoch": self.current_epoch}, commit=False) def on_validation_epoch_start(self) -> None: val_metrics = [self.val_nll, self.val_X_kl, self.val_E_kl, self.val_X_logp, self.val_E_logp, self.val_sampling_metrics] if self.use_charge: val_metrics.extend([self.val_charge_kl, self.val_charge_logp]) for metric in val_metrics: metric.reset() def validation_step(self, data, i): data = self.dataset_info.to_one_hot(data) sparse_noisy_data = self.apply_sparse_noise(data) # Sample the query edges and build the computational graph = union(noisy graph, query edges) triu_query_edge_index, _ = sample_query_edges( num_nodes_per_graph=data.ptr.diff(), edge_proportion=self.edge_fraction ) _, comp_edge_index, comp_edge_attr = get_computational_graph( triu_query_edge_index=triu_query_edge_index, clean_edge_index=sparse_noisy_data["edge_index_t"], clean_edge_attr=sparse_noisy_data["edge_attr_t"] ) # pass sparse comp_graph to dense comp_graph for ease calculation sparse_noisy_data["comp_edge_index_t"] = comp_edge_index sparse_noisy_data["comp_edge_attr_t"] = comp_edge_attr sparse_pred = self.forward(sparse_noisy_data) # to dense dense_pred, node_mask = utils.to_dense( x=sparse_pred.node, edge_index=sparse_pred.edge_index, edge_attr=sparse_pred.edge_attr, batch=sparse_pred.batch, charge=sparse_pred.charge, ) dense_original, _ = utils.to_dense( x=data.x, edge_index=data.edge_index, edge_attr=data.edge_attr, batch=data.batch, charge=data.charge, ) noisy_data = utils.densify_noisy_data(sparse_noisy_data) nll = self.compute_val_loss( dense_pred, noisy_data, dense_original.X, dense_original.E, dense_original.y, node_mask, charge=dense_original.charge, test=False, ) return {"loss": nll} def on_validation_epoch_end(self) -> None: metrics = [ self.val_nll.compute(), self.val_X_kl.compute() * self.T, self.val_E_kl.compute() * self.T, self.val_X_logp.compute(), self.val_E_logp.compute(), ] if self.use_charge: metrics += [ self.val_charge_kl.compute() * self.T, self.val_charge_logp.compute(), ] else: metrics += [-1, -1] if self.val_nll.compute() < self.best_nll: self.best_epoch = self.current_epoch self.best_nll = self.val_nll.compute() metrics += [self.best_epoch, self.best_nll] if wandb.run: wandb.log( { "val/epoch_NLL": metrics[0], "val/X_kl": metrics[1], "val/E_kl": metrics[2], "val/X_logp": metrics[3], "val/E_logp": metrics[4], "val/charge_kl": metrics[5], "val/charge_logp": metrics[6], "val/best_nll_epoch": metrics[7], "val/best_nll": metrics[8], }, commit=False, ) self.print( f"Epoch {self.current_epoch}: Val NLL {metrics[0] :.2f} -- Val Atom type KL {metrics[1] :.2f} -- ", f"Val Edge type KL: {metrics[2] :.2f}", ) # Log val nll with default Lightning logger, so it can be monitored by checkpoint callback val_nll = metrics[0] self.log("val/epoch_NLL", val_nll, sync_dist=True) if val_nll < self.best_val_nll: self.best_val_nll = val_nll self.print( "Val loss: %.4f \t Best val loss: %.4f\n" % (val_nll, self.best_val_nll) ) self.val_counter += 1 print("Starting to sample") if self.val_counter % self.cfg.general.sample_every_val == 0: start = time.time() samples_left_to_generate = self.cfg.general.samples_to_generate samples_left_to_save = self.cfg.general.samples_to_save chains_left_to_save = self.cfg.general.chains_to_save # multi gpu operation samples_left_to_generate = math.ceil(samples_left_to_generate / max(self._trainer.num_devices, 1)) self.print( f"Samples to generate: {samples_left_to_generate} for each of the {max(self._trainer.num_devices, 1)} devices" ) print(f"Sampling start on GR{self.global_rank}") print('multi-gpu metrics for uniqueness is not accurate in the validation step.') generated_graphs = [] ident = 0 while samples_left_to_generate > 0: bs = self.cfg.train.batch_size * 2 to_generate = min(samples_left_to_generate, bs) to_save = min(samples_left_to_save, bs) chains_save = min(chains_left_to_save, bs) sampled_batch = self.sample_batch( batch_id=ident, batch_size=to_generate, save_final=to_save, keep_chain=chains_save, number_chain_steps=self.number_chain_steps, ) generated_graphs.append(sampled_batch) ident += to_generate samples_left_to_save -= to_save samples_left_to_generate -= to_generate chains_left_to_save -= chains_save generated_graphs = utils.concat_sparse_graphs(generated_graphs) print( f"Sampled {generated_graphs.batch.max().item()+1} batches on local rank {self.local_rank}. ", "Sampling took {time.time() - start:.2f} seconds\n" ) print("Computing sampling metrics...") self.val_sampling_metrics.compute_all_metrics( generated_graphs, self.current_epoch, local_rank=self.local_rank ) def on_test_epoch_start(self) -> None: print("Starting test...") if self.local_rank == 0: utils.setup_wandb( self.cfg ) # Initialize wandb only on one process to log metrics only once test_metrics = [self.test_nll, self.test_X_kl, self.test_E_kl, self.test_X_logp, self.test_E_logp, self.test_sampling_metrics] if self.use_charge: test_metrics.extend([self.test_charge_kl, self.test_charge_logp]) for metric in test_metrics: metric.reset() def test_step(self, data, i): pass def on_test_epoch_end(self) -> None: """Measure likelihood on a test set and compute stability metrics.""" if self.cfg.general.generated_path: self.print("Loading generated samples...") samples = np.load(self.cfg.general.generated_path) with open(self.cfg.general.generated_path, "rb") as f: samples = pickle.load(f) else: samples_left_to_generate = self.cfg.general.final_model_samples_to_generate samples_left_to_save = self.cfg.general.final_model_samples_to_save chains_left_to_save = self.cfg.general.final_model_chains_to_save # multi gpu operation samples_left_to_generate = math.ceil(samples_left_to_generate / max(self._trainer.num_devices, 1)) self.print( f"Samples to generate: {samples_left_to_generate} for each of the {max(self._trainer.num_devices, 1)} devices" ) print(f"Sampling start on GR{self.global_rank}") samples = [] id = 0 while samples_left_to_generate > 0: print( f"Samples left to generate: {samples_left_to_generate}/" f"{self.cfg.general.final_model_samples_to_generate}", end="", flush=True, ) bs = self.cfg.train.batch_size * 2 to_generate = min(samples_left_to_generate, bs) to_save = min(samples_left_to_save, bs) chains_save = min(chains_left_to_save, bs) sampled_batch = self.sample_batch( batch_id=id, batch_size=to_generate, num_nodes=None, save_final=to_save, keep_chain=chains_save, number_chain_steps=self.number_chain_steps, ) samples.append(sampled_batch) id += to_generate samples_left_to_save -= to_save samples_left_to_generate -= to_generate chains_left_to_save -= chains_save print("Saving the generated graphs") samples = utils.concat_sparse_graphs(samples) filename = f"generated_samples1.txt" # Save the samples list as pickle to a file that depends on the local rank # This is needed to avoid overwriting the same file on different GPUs with open(f"generated_samples_rank{self.local_rank}.pkl", "wb") as f: pickle.dump(samples, f) # This line is used to sync between gpus self._trainer.strategy.barrier() for i in range(2, 10): if os.path.exists(filename): filename = f"generated_samples{i}.txt" else: break with open(filename, "w") as f: for i in range(samples.batch.max().item() + 1): atoms = samples.node[samples.batch == i] f.write(f"N={atoms.shape[0]}\n") atoms = atoms.tolist() f.write("X: \n") for at in atoms: f.write(f"{at} ") f.write("\n") f.write("E: \n") bonds = samples.edge_attr[samples.batch[samples.edge_index[0]] == i] for bond in bonds: f.write(f"{bond} ") f.write("\n") print("Saved.") print("Computing sampling metrics...") # Load the pickles of the other GPUs samples = [] for i in range(self._trainer.num_devices): with open(f"generated_samples_rank{i}.pkl", "rb") as f: samples.append(pickle.load(f)) samples = utils.concat_sparse_graphs(samples) print('saving all samples') with open(f"generated_samples.pkl", "wb") as f: pickle.dump(samples, f) if self.test_variance == 1: to_log, _ = self.test_sampling_metrics.compute_all_metrics( samples, self.current_epoch, self.local_rank ) # save results for testing print('saving results for testing') current_path = os.getcwd() res_path = os.path.join( current_path, f"test_epoch{self.current_epoch}.json", ) with open(res_path, 'w') as file: # Convert the dictionary to a JSON string and write it to the file json.dump(to_log, file) else: to_log = {} for i in range(self.test_variance): start_idx = int(self.cfg.general.final_model_samples_to_generate / self.test_variance * i) end_idx = int(self.cfg.general.final_model_samples_to_generate / self.test_variance * (i + 1)) cur_samples = utils.split_samples(samples, start_idx, end_idx) cur_to_log, _ = self.test_sampling_metrics.compute_all_metrics(cur_samples, self.current_epoch, self.local_rank) if i == 0: to_log = {i: [cur_to_log[i]] for i in cur_to_log} else: to_log = {i: to_log[i].append(cur_to_log[i]) for i in cur_to_log} # get the variance and mean value of the metrics final_to_log = {i: [np.mean(i), np.var(i)] for i in to_log} to_log.update(final_to_log) # save results for testing print('saving results for testing') current_path = os.getcwd() res_path = os.path.join( current_path, f"test_epoch{self.current_epoch}_fold{self.test_variance}.json", ) with open(res_path, 'w') as file: # Convert the dictionary to a JSON string and write it to the file json.dump(to_log, file) print("Test sampling metrics computed.") def apply_sparse_noise(self, data): """Sample noise and apply it to the data.""" bs = int(data.batch.max() + 1) t_int = torch.randint( 1, self.T + 1, size=(bs, 1), device=self.device ).float() # (bs, 1) s_int = t_int - 1 t_float = t_int / self.T s_float = s_int / self.T # beta_t and alpha_s_bar are used for denoising/loss computation beta_t = self.noise_schedule(t_normalized=t_float) # (bs, 1) alpha_s_bar = self.noise_schedule.get_alpha_bar(t_normalized=s_float) # (bs, 1) alpha_t_bar = self.noise_schedule.get_alpha_bar(t_normalized=t_float) # (bs, 1) Qtb = self.transition_model.get_Qt_bar( alpha_t_bar, device=self.device ) # (bs, dx_in, dx_out), (bs, de_in, de_out) assert (abs(Qtb.X.sum(dim=2) - 1.0) < 1e-4).all(), Qtb.X.sum(dim=2) - 1 assert (abs(Qtb.E.sum(dim=2) - 1.0) < 1e-4).all() # Compute transition probabilities # get charge distribution if self.use_charge: prob_charge = data.charge.unsqueeze(1) @ Qtb.charge[data.batch] charge_t = prob_charge.squeeze(1).multinomial(1).flatten() # (N, ) charge_t = F.one_hot(charge_t, num_classes=self.out_dims.charge) else: charge_t = data.charge # Diffuse sparse nodes and sample sparse node labels probN = data.x.unsqueeze(1) @ Qtb.X[data.batch] # (N, dx) node_t = probN.squeeze(1).multinomial(1).flatten() # (N, ) # count node numbers and edge numbers for existing edges for each graph num_nodes = data.ptr.diff().long() batch_edge = data.batch[data.edge_index[0]] num_edges = torch.zeros(num_nodes.shape).to(self.device) unique, counts = torch.unique(batch_edge, sorted=True, return_counts=True) num_edges[unique] = counts.float() # count number of non-existing edges for each graph num_neg_edge = ((num_nodes - 1) * num_nodes - num_edges) / 2 # (bs, ) # Step1: diffuse on existing edges # get edges defined in the top triangle of the adjacency matrix dir_edge_index, dir_edge_attr = utils.undirected_to_directed( data.edge_index, data.edge_attr ) batch_edge = data.batch[dir_edge_index[0]] batch_Qtb = Qtb.E[batch_edge] probE = dir_edge_attr.unsqueeze(1) @ batch_Qtb dir_edge_attr = probE.squeeze(1).multinomial(1).flatten() # Step2: diffuse on non-existing edges # get number of new edges according to Qtb emerge_prob = Qtb.E[:, 0, 1:].sum(-1) # (bs, ) num_emerge_edges = ( torch.distributions.binomial.Binomial(num_neg_edge, emerge_prob) .sample() .int() ) # combine existing and non-existing edges (both are directed, i.e. triu) if num_emerge_edges.max() > 0: # sample non-existing edges neg_edge_index = sample_non_existing_edges_batched( num_edges_to_sample=num_emerge_edges, existing_edge_index=dir_edge_index, num_nodes=num_nodes, batch=data.batch, ) neg_edge_attr = sample_non_existing_edge_attr( query_edges_dist_batch=Qtb.E[:, 0, 1:], num_edges_to_sample=num_emerge_edges, ) E_t_attr = torch.hstack([dir_edge_attr, neg_edge_attr]) E_t_index = torch.hstack([dir_edge_index, neg_edge_index]) else: E_t_attr = dir_edge_attr E_t_index = dir_edge_index # mask non-existing edges mask = E_t_attr != 0 E_t_attr = E_t_attr[mask] E_t_index = E_t_index[:, mask] E_t_index, E_t_attr = utils.to_undirected(E_t_index, E_t_attr) E_t_attr = F.one_hot(E_t_attr, num_classes=self.out_dims.E) node_t = F.one_hot(node_t, num_classes=self.out_dims.X) sparse_noisy_data = { "t_int": t_int, "t_float": t_float, "beta_t": beta_t, "alpha_s_bar": alpha_s_bar, "alpha_t_bar": alpha_t_bar, "node_t": node_t, "edge_index_t": E_t_index, "edge_attr_t": E_t_attr, "comp_edge_index_t": None, "comp_edge_attr_t": None, # computational graph "y_t": data.y, "batch": data.batch, "ptr": data.ptr, "charge_t": charge_t, } return sparse_noisy_data def compute_val_loss(self, pred, noisy_data, X, E, y, node_mask, charge, test): """Computes an estimator for the variational lower bound. pred: (batch_size, n, total_features) noisy_data: dict X, E, y : (bs, n, dx), (bs, n, n, de), (bs, dy) node_mask : (bs, n) Output: nll (size 1) """ t = noisy_data["t_float"] # 1. N = node_mask.sum(1).long() log_pN = self.node_dist.log_prob(N) # 2. The KL between q(z_T | x) and p(z_T) = Uniform(1/num_classes). Should be close to zero. kl_prior = self.kl_prior(X, E, node_mask, charge=charge) # 3. Diffusion loss loss_all_t = self.compute_Lt( X, E, y, charge, pred, noisy_data, node_mask, test=test ) # Combine terms nlls = - log_pN + kl_prior + loss_all_t assert (~nlls.isnan()).all(), f"NLLs contain NaNs: {nlls}" assert len(nlls.shape) == 1, f"{nlls.shape} has more than only batch dim." # Update NLL metric object and return batch nll nll = (self.test_nll if test else self.val_nll)(nlls) # Average over the batch if wandb.run: wandb.log( { "kl prior": kl_prior.mean(), "Estimator loss terms": loss_all_t.mean(), "log_pn": log_pN.mean(), "val_nll": nll, "epoch": self.current_epoch }, commit=False, ) return nll def kl_prior(self, X, E, node_mask, charge): """Computes the KL between q(z1 | x) and the prior p(z1) = Normal(0, 1). This is essentially a lot of work for something that is in practice negligible in the loss. However, you compute it so that you see it when you've made a mistake in your noise schedule. """ # Compute the last alpha value, alpha_T. ones = torch.ones((X.size(0), 1), device=X.device) Ts = self.T * ones alpha_t_bar = self.noise_schedule.get_alpha_bar(t_int=Ts) # (bs, 1) Qtb = self.transition_model.get_Qt_bar(alpha_t_bar, self.device) # Compute transition probabilities probX = X @ Qtb.X # (bs, n, dx_out) probE = E @ Qtb.E.unsqueeze(1) # (bs, n, n, de_out) assert probX.shape == X.shape bs, n, _ = probX.shape limit_X = self.limit_dist.X[None, None, :].expand(bs, n, -1).type_as(probX) limit_E = ( self.limit_dist.E[None, None, None, :].expand(bs, n, n, -1).type_as(probE) ) if self.use_charge: prob_charge = charge @ Qtb.charge # (bs, n, de_out) limit_charge = ( self.limit_dist.charge[None, None, :] .expand(bs, n, -1) .type_as(prob_charge) ) limit_charge = limit_charge.clone() else: prob_charge = limit_charge = None # Make sure that masked rows do not contribute to the loss ( limit_dist_X, limit_dist_E, probX, probE, limit_dist_charge, prob_charge,
) = diffusion_utils.mask_distributions(
1
2023-10-30 12:12:16+00:00
16k
akekic/causal-component-analysis
experiments/cauca/main.py
[ { "identifier": "DGP", "path": "config.py", "snippet": "DGP = {\n \"graph-4-0\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-1\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-2\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 75_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-3\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 75_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-4\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 12_500,\n \"observation_dim\": 128, # D\n },\n \"graph-4-5\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0], [1, 0, 1, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 75_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-6\": {\n \"num_causal_variables\": 10, # N\n \"adj_matrix\": np.array(\n [\n [0, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 1, 1, 0],\n ]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 75_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-7\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 100_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-8\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-9\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-10\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-9-local\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 2_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-1\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p000\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p025\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.25,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p050\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p075\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.75,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p100\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 1.0,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-1-local\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 2_000,\n \"observation_dim\": 4, # D\n },\n \"graph-7-random-1\": {\n \"num_causal_variables\": 7, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 7, # D\n },\n \"graph-7-random-1-local\": {\n \"num_causal_variables\": 7, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 2_000,\n \"observation_dim\": 7, # D\n },\n \"graph-2-1\": {\n \"num_causal_variables\": 2, # N\n \"adj_matrix\": np.array([[0, 1], [0, 0]]),\n \"int_targets\": torch.tensor(\n [\n [0, 0],\n [1, 0],\n [0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 2, # D\n },\n \"graph-2-2\": {\n \"num_causal_variables\": 2, # N\n \"adj_matrix\": np.array([[0, 0], [0, 0]]),\n \"int_targets\": torch.tensor(\n [\n [0, 0],\n [1, 0],\n [0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 2, # D\n },\n \"graph-3-1\": {\n \"num_causal_variables\": 3, # N\n \"adj_matrix\": np.array([[0, 1, 1], [0, 0, 0], [0, 0, 0]]),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0],\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 3, # D\n },\n \"graph-3-random-1\": {\n \"num_causal_variables\": 3, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0],\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 3, # D\n },\n \"graph-5-random-1\": {\n \"num_causal_variables\": 5, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 5, # D\n },\n}" }, { "identifier": "MultiEnvDataModule", "path": "data_generator/data_module.py", "snippet": "class MultiEnvDataModule(LightningDataModule):\n \"\"\"\n Data module for multi-environment data.\n\n Attributes\n ----------\n medgp: MultiEnvDGP\n Multi-environment data generating process.\n num_samples_per_env: int\n Number of samples per environment.\n batch_size: int\n Batch size.\n num_workers: int\n Number of workers for the data loaders.\n intervention_targets_per_env: Tensor, shape (num_envs, num_causal_variables)\n Intervention targets per environment, with 1 indicating that the variable is intervened on.\n log_dir: Optional[Path]\n Directory to save summary statistics and plots to. Default: None.\n intervention_target_misspec: bool\n Whether to misspecify the intervention targets. If true, the intervention targets are permuted.\n I.e. the model received the wrong intervention targets. Default: False.\n intervention_target_perm: Optional[list[int]]\n Permutation of the intervention targets. If None, a random permutation is used. Only used if\n intervention_target_misspec is True. Default: None.\n\n Methods\n -------\n setup(stage=None) -> None\n Setup the data module. This is where the data is sampled.\n train_dataloader() -> DataLoader\n Return the training data loader.\n val_dataloader() -> DataLoader\n Return the validation data loader.\n test_dataloader() -> DataLoader\n Return the test data loader.\n \"\"\"\n\n def __init__(\n self,\n multi_env_dgp: MultiEnvDGP,\n num_samples_per_env: int,\n batch_size: int,\n num_workers: int,\n intervention_targets_per_env: Tensor,\n log_dir: Optional[Path] = None,\n intervention_target_misspec: bool = False,\n intervention_target_perm: Optional[list[int]] = None,\n ) -> None:\n super().__init__()\n self.medgp = multi_env_dgp\n self.num_samples_per_env = num_samples_per_env\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.intervention_targets_per_env = intervention_targets_per_env\n self.log_dir = log_dir\n\n self.intervention_target_misspec = intervention_target_misspec\n latent_dim = self.medgp.latent_scm.latent_dim\n assert (\n intervention_target_perm is None\n or len(intervention_target_perm) == latent_dim\n )\n self.intervention_target_perm = intervention_target_perm\n\n def setup(self, stage: Optional[str] = None) -> None:\n latent_dim = self.medgp.latent_scm.latent_dim\n num_envs = self.intervention_targets_per_env.shape[0]\n\n x, v, u, e, intervention_targets, log_prob = self.medgp.sample(\n self.num_samples_per_env,\n intervention_targets_per_env=self.intervention_targets_per_env,\n )\n if self.intervention_target_misspec:\n assert (\n num_envs == latent_dim + 1\n ), \"only works if num_envs == num_causal_variables + 1\"\n if self.intervention_target_perm is None:\n perm = random_perm(latent_dim)\n self.intervention_target_perm = perm\n else:\n perm = self.intervention_target_perm\n\n # remember where old targets were\n idx_mask_list = []\n for i in range(latent_dim):\n idx_mask = intervention_targets[:, i] == 1\n idx_mask_list.append(idx_mask)\n intervention_targets[idx_mask, i] = 0\n\n # permute targets\n for i in range(latent_dim):\n intervention_targets[idx_mask_list[i], perm[i]] = 1\n\n dataset = TensorDataset(x, v, u, e, intervention_targets, log_prob)\n train_size = int(0.8 * len(dataset))\n val_size = int(0.5 * (len(dataset) - train_size))\n test_size = len(dataset) - train_size - val_size\n (\n self.train_dataset,\n self.val_dataset,\n self.test_dataset,\n ) = torch.utils.data.random_split(dataset, [train_size, val_size, test_size])\n\n if self.log_dir is not None:\n self.log_dir.mkdir(parents=True, exist_ok=True)\n summary_stats = summary_statistics(x, v, e, intervention_targets)\n for key, value in summary_stats.items():\n value.to_csv(self.log_dir / f\"{key}_summary_stats.csv\")\n plot_dag(self.medgp.adjacency_matrix, self.log_dir)\n try:\n with open(self.log_dir / \"base_coeff_values.txt\", \"w\") as f:\n f.write(str(self.medgp.latent_scm.base_coeff_values))\n except AttributeError:\n pass\n # save mixing function coefficients\n self.medgp.mixing_function.save_coeffs(self.log_dir)\n\n def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n )\n\n def val_dataloader(self) -> DataLoader:\n val_loader = DataLoader(\n self.val_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n )\n return val_loader\n\n def test_dataloader(self) -> DataLoader:\n test_loader = DataLoader(\n self.test_dataset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n )\n return test_loader" }, { "identifier": "make_multi_env_dgp", "path": "data_generator/multi_env_gdp.py", "snippet": "def make_multi_env_dgp(\n latent_dim: int,\n observation_dim: int,\n adjacency_matrix: np.ndarray,\n intervention_targets_per_env: Tensor,\n shift_noise: bool = True,\n noise_shift_type: str = \"mean\",\n mixing: str = \"nonlinear\",\n scm: str = \"linear\",\n n_nonlinearities: int = 1,\n scm_coeffs_low: float = -1,\n scm_coeffs_high: float = 1,\n coeffs_min_abs_value: float = None,\n edge_prob: float = None,\n snr: float = 1.0,\n) -> MultiEnvDGP:\n \"\"\"\n Create a multi-environment data generating process (DGP).\n\n Parameters\n ----------\n latent_dim: int\n Dimension of the latent variables.\n observation_dim: int\n Dimension of the observed variables.\n adjacency_matrix: np.ndarray, shape (latent_dim, latent_dim)\n Adjacency matrix of the latent SCM.\n intervention_targets_per_env: Tensor, shape (num_envs, latent_dim)\n Intervention targets per environment, with 1 indicating that the variable is intervened on\n and 0 indicating that the variable is not intervened on. This variable also implicitly defines\n the number of environments.\n shift_noise: bool\n Whether to shift the noise distribution for variables that are intervened on. Default: False.\n noise_shift_type: str\n Whether to shift the mean or standard deviation of the noise distribution for variables that are intervened on.\n Options: \"mean\" or \"std\". Default: \"mean\".\n mixing: str\n Mixing function. Options: \"linear\" or \"nonlinear\". Default: \"nonlinear\".\n scm: str\n Latent SCM. Options: \"linear\" or \"location-scale\". Default: \"linear\".\n n_nonlinearities: int\n Number of nonlinearities in the nonlinear mixing function. Default: 1.\n scm_coeffs_low: float\n Lower bound of the SCM coefficients in linear SCMs. Default: -1.\n scm_coeffs_high: float\n Upper bound of the SCM coefficients in linear SCMs. Default: 1.\n coeffs_min_abs_value: float\n Minimum absolute value of the SCM coefficients in linear SCMs. If None, no minimum absolute value is enforced.\n Default: None.\n edge_prob: float\n Probability of an edge in the adjacency matrix if no adjacency matrix is given. Default: None.\n snr: float\n Signal-to-noise ratio of the location-scale SCM. Default: 1.0.\n\n Returns\n -------\n medgp: MultiEnvDGP\n Multi-environment data generating process.\n \"\"\"\n if mixing == \"linear\":\n mixing_function = LinearMixing(\n latent_dim=latent_dim, observation_dim=observation_dim\n )\n elif mixing == \"nonlinear\":\n mixing_function = NonlinearMixing(\n latent_dim=latent_dim,\n observation_dim=observation_dim,\n n_nonlinearities=n_nonlinearities,\n )\n else:\n raise ValueError(f\"Unknown mixing function {mixing}\")\n\n # if adjacency_matrix is not given as numpy array, sample a random one\n if not isinstance(adjacency_matrix, np.ndarray):\n assert (\n edge_prob is not None\n ), \"edge_prob must be given if no adjacency_matrix is given\"\n adjacency_matrix = sample_random_dag(latent_dim, edge_prob)\n adjacency_matrix = adjacency_matrix\n\n if scm == \"linear\":\n latent_scm = LinearSCM(\n adjacency_matrix=adjacency_matrix,\n latent_dim=latent_dim,\n intervention_targets_per_env=intervention_targets_per_env,\n coeffs_low=scm_coeffs_low,\n coeffs_high=scm_coeffs_high,\n coeffs_min_abs_value=coeffs_min_abs_value,\n )\n elif scm == \"location-scale\":\n latent_scm = LocationScaleSCM(\n adjacency_matrix=adjacency_matrix,\n latent_dim=latent_dim,\n intervention_targets_per_env=intervention_targets_per_env,\n snr=snr,\n )\n else:\n raise ValueError(f\"Unknown SCM {scm}\")\n\n noise_generator = GaussianNoise(\n latent_dim=latent_dim,\n intervention_targets_per_env=intervention_targets_per_env,\n shift=shift_noise,\n shift_type=noise_shift_type,\n )\n medgp = MultiEnvDGP(\n latent_scm=latent_scm,\n noise_generator=noise_generator,\n mixing_function=mixing_function,\n )\n return medgp" }, { "identifier": "LinearCauCAModel", "path": "model/cauca_model.py", "snippet": "class LinearCauCAModel(CauCAModel):\n \"\"\"\n CauCA model with linear unmixing function.\n \"\"\"\n\n def __init__(\n self,\n latent_dim: int,\n adjacency_matrix: np.ndarray,\n intervention_targets_per_env: Tensor,\n lr: float = 1e-2,\n weight_decay: float = 0,\n lr_scheduler: Optional[str] = None,\n lr_min: float = 0.0,\n adjacency_misspecified: bool = False,\n fix_mechanisms: bool = True,\n nonparametric_base_distr: bool = False,\n ) -> None:\n super().__init__(\n latent_dim=latent_dim,\n adjacency_matrix=adjacency_matrix,\n lr=lr,\n weight_decay=weight_decay,\n lr_scheduler=lr_scheduler,\n lr_min=lr_min,\n adjacency_misspecified=adjacency_misspecified,\n )\n self.encoder = LinearCauCAEncoder(\n latent_dim,\n self.adjacency_matrix, # this is the misspecified adjacency matrix if adjacency_misspecified=True\n intervention_targets_per_env=intervention_targets_per_env,\n fix_mechanisms=fix_mechanisms,\n nonparametric_base_distr=nonparametric_base_distr,\n )\n self.save_hyperparameters()" }, { "identifier": "NaiveNonlinearModel", "path": "model/cauca_model.py", "snippet": "class NaiveNonlinearModel(CauCAModel):\n \"\"\"\n Naive CauCA model with nonlinear unmixing function. It assumes no causal dependencies.\n \"\"\"\n\n def __init__(\n self,\n latent_dim: int,\n adjacency_matrix: np.ndarray,\n lr: float = 1e-2,\n weight_decay: float = 0,\n lr_scheduler: Optional[str] = None,\n lr_min: float = 0.0,\n adjacency_misspecified: bool = False,\n k_flows: int = 1,\n intervention_targets_per_env: Optional[torch.Tensor] = None,\n net_hidden_dim: int = 128,\n net_hidden_layers: int = 3,\n ) -> None:\n super().__init__(\n latent_dim=latent_dim,\n adjacency_matrix=adjacency_matrix,\n lr=lr,\n weight_decay=weight_decay,\n lr_scheduler=lr_scheduler,\n lr_min=lr_min,\n adjacency_misspecified=adjacency_misspecified,\n )\n self.encoder = NaiveEncoder(\n latent_dim,\n self.adjacency_matrix, # this is the misspecified adjacency matrix if adjacency_misspecified=True\n K=k_flows,\n intervention_targets_per_env=intervention_targets_per_env,\n net_hidden_dim=net_hidden_dim,\n net_hidden_layers=net_hidden_layers,\n )\n self.save_hyperparameters()" }, { "identifier": "NonlinearCauCAModel", "path": "model/cauca_model.py", "snippet": "class NonlinearCauCAModel(CauCAModel):\n \"\"\"\n CauCA model with nonlinear unmixing function.\n\n Additional attributes\n ---------------------\n k_flows : int\n Number of flows to use in the nonlinear unmixing function. Default: 1.\n net_hidden_dim : int\n Hidden dimension of the neural network used in the nonlinear unmixing function. Default: 128.\n net_hidden_layers : int\n Number of hidden layers of the neural network used in the nonlinear unmixing function. Default: 3.\n fix_mechanisms : bool\n Some mechanisms can be fixed to a simple gaussian distribution without loss of generality.\n This has only an effect for the parametric base distribution. If True, these mechanisms are fixed.\n Default: True.\n fix_all_intervention_targets : bool\n When fixable mechanisms are fixed, this parameter determines whether all intervention targets\n are fixed (option 1) or all intervention targets which are non-root nodes together with all\n non-intervened root nodes (option 2). See documentation of ParamMultiEnvCausalDistribution\n for more details. Default: False.\n nonparametric_base_distr : bool\n Whether to use a nonparametric base distribution for the flows. If false, a parametric linear\n gaussian causal base distribution is used. Default: False.\n K_cbn : int\n Number of flows to use in the nonlinear nonparametric base distribution. Default: 3.\n net_hidden_dim_cbn : int\n Hidden dimension of the neural network used in the nonlinear nonparametric base distribution. Default: 128.\n net_hidden_layers_cbn : int\n Number of hidden layers of the neural network used in the nonlinear nonparametric base distribution. Default: 3.\n \"\"\"\n\n def __init__(\n self,\n latent_dim: int,\n adjacency_matrix: np.ndarray,\n intervention_targets_per_env: Tensor,\n lr: float = 1e-2,\n weight_decay: float = 0,\n lr_scheduler: Optional[str] = None,\n lr_min: float = 0.0,\n adjacency_misspecified: bool = False,\n k_flows: int = 1,\n net_hidden_dim: int = 128,\n net_hidden_layers: int = 3,\n fix_mechanisms: bool = True,\n fix_all_intervention_targets: bool = False,\n nonparametric_base_distr: bool = False,\n K_cbn: int = 3,\n net_hidden_dim_cbn: int = 128,\n net_hidden_layers_cbn: int = 3,\n ) -> None:\n super().__init__(\n latent_dim=latent_dim,\n adjacency_matrix=adjacency_matrix,\n lr=lr,\n weight_decay=weight_decay,\n lr_scheduler=lr_scheduler,\n lr_min=lr_min,\n adjacency_misspecified=adjacency_misspecified,\n )\n self.encoder = NonlinearCauCAEncoder(\n latent_dim,\n self.adjacency_matrix, # this is the misspecified adjacency matrix if adjacency_misspecified=True\n K=k_flows,\n intervention_targets_per_env=intervention_targets_per_env,\n net_hidden_dim=net_hidden_dim,\n net_hidden_layers=net_hidden_layers,\n fix_mechanisms=fix_mechanisms,\n fix_all_intervention_targets=fix_all_intervention_targets,\n nonparametric_base_distr=nonparametric_base_distr,\n K_cbn=K_cbn,\n net_hidden_dim_cbn=net_hidden_dim_cbn,\n net_hidden_layers_cbn=net_hidden_layers_cbn,\n )\n self.save_hyperparameters()" } ]
import argparse import os import pytorch_lightning as pl from pathlib import Path from pytorch_lightning.loggers import WandbLogger from config import DGP from data_generator import MultiEnvDataModule, make_multi_env_dgp from model.cauca_model import LinearCauCAModel, NaiveNonlinearModel, NonlinearCauCAModel
12,011
) parser.add_argument( "--function-misspec", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Misspecify function class - assume linear.", ) parser.add_argument( "--net-hidden-layers", type=int, default=3, help="Number of hidden layers in nonlinear encoder.", ) parser.add_argument( "--net-hidden-layers-cbn", type=int, default=3, help="Number of hidden layers in latent CBN model.", ) parser.add_argument( "--net-hidden-dim", type=int, default=128, help="Number of hidden dimensions in nonlinear encoder.", ) parser.add_argument( "--net-hidden-dim-cbn", type=int, default=128, help="Number of hidden dimensions in latent CBN model.", ) parser.add_argument( "--fix-mechanisms", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Fix fixable mechanisms in latents.", ) parser.add_argument( "--fix-all-intervention-targets", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Fix all intervention targets.", ) parser.add_argument( "--nonparametric-base-distr", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Use nonparametric base distribution for flows.", ) parser.add_argument( "--wandb", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Whether to log to weights and biases.", ) parser.add_argument( "--wandb-project", type=str, default="cauca", help="Weights & Biases project name.", ) args = parser.parse_args() if args.function_misspec: assert ( args.mixing == "nonlinear" and args.model == "linear" ), "Function not misspecified." if args.wandb: wandb_logger = WandbLogger(project=args.wandb_project) wandb_logger.experiment.config.update(args, allow_val_change=True) checkpoint_dir = ( Path(args.checkpoint_root_dir) / f"{wandb_logger.experiment.id}" ) logger = [wandb_logger] else: checkpoint_dir = Path(args.checkpoint_root_dir) / "default" logger = None checkpoint_callback = pl.callbacks.ModelCheckpoint( dirpath=checkpoint_dir, save_last=True, every_n_epochs=args.check_val_every_n_epoch, ) multi_env_dgp = make_multi_env_dgp( latent_dim=DGP[args.dgp]["num_causal_variables"], observation_dim=DGP[args.dgp]["observation_dim"], adjacency_matrix=DGP[args.dgp]["adj_matrix"], intervention_targets_per_env=DGP[args.dgp]["int_targets"], noise_shift_type=args.noise_shift_type, mixing=args.mixing, scm=args.scm, n_nonlinearities=args.n_nonlinearities, scm_coeffs_low=args.scm_coeffs_low, scm_coeffs_high=args.scm_coeffs_high, coeffs_min_abs_value=args.scm_coeffs_min_abs_value, edge_prob=DGP[args.dgp].get("edge_prob", None), snr=args.snr, ) data_module = MultiEnvDataModule( multi_env_dgp=multi_env_dgp, num_samples_per_env=DGP[args.dgp]["num_samples_per_env"], batch_size=args.batch_size, num_workers=os.cpu_count(), intervention_targets_per_env=DGP[args.dgp]["int_targets"], log_dir=checkpoint_dir / "data_stats", ) data_module.setup() pl.seed_everything(args.training_seed, workers=True) intervention_targets_per_env = DGP[args.dgp]["int_targets"] # Model Initialization if args.model == "nonlinear":
if __name__ == "__main__": parser = argparse.ArgumentParser( description="Run experiment for Causal Component Analysis (CauCA)." ) parser.add_argument( "--max-epochs", type=int, default=10, help="Number of epochs to train for.", ) parser.add_argument( "--accelerator", type=str, default="gpu", help="Accelerator to use for training.", ) parser.add_argument( "--batch-size", type=int, default=1024, help="Number of samples per batch.", ) parser.add_argument( "--lr", type=float, default=1e-4, help="Learning rate for Adam optimizer.", ) parser.add_argument( "--checkpoint-root-dir", type=str, default="checkpoints", help="Checkpoint root directory.", ) parser.add_argument( "--noise-shift-type", type=str, default="mean", choices=["mean", "std"], help="Property of noise distribution that is shifted between environments.", ) parser.add_argument( "--check-val-every-n-epoch", type=int, default=1, help="Check validation loss every n epochs.", ) parser.add_argument( "--dgp", type=str, default="graph-4-0", help="Data generation process to use.", ) parser.add_argument( "--k-flows", type=int, default=1, help="Number of flows to use in nonlinear ICA model.", ) parser.add_argument( "--k-flows-cbn", type=int, default=3, help="Number of flows to use in nonlinear latent CBN model.", ) parser.add_argument( "--model", type=str, default="nonlinear", help="Type of encoder to use.", choices=["linear", "nonlinear", "naive"], ) parser.add_argument( "--seed", type=int, default=42, ) parser.add_argument( "--training-seed", type=int, default=42, ) parser.add_argument( "--mixing", type=str, default="nonlinear", help="Type of mixing function to use.", choices=["linear", "nonlinear"], ) parser.add_argument( "--scm", type=str, default="linear", help="Type of SCM to use.", choices=["linear", "location-scale"], ) parser.add_argument( "--n-nonlinearities", type=int, default=1, help="Number of nonlinearities to use in nonlinear mixing function.", ) parser.add_argument( "--learn-scm-params", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Whether to learn SCM parameters.", ) parser.add_argument( "--lr-scheduler", type=str, default=None, help="Learning rate scheduler.", choices=[None, "cosine"], ) parser.add_argument( "--lr-min", type=float, default=0.0, help="Minimum learning rate for cosine learning rate scheduler.", ) parser.add_argument( "--scm-coeffs-low", type=float, default=-1, help="Lower bound for SCM coefficients.", ) parser.add_argument( "--scm-coeffs-high", type=float, default=1, help="Upper bound for SCM coefficients.", ) parser.add_argument( "--scm-coeffs-min-abs-value", type=float, default=None, help="Minimum absolute value for SCM coefficients.", ) parser.add_argument( "--snr", type=float, default=1.0, help="Signal-to-noise ratio in latent SCM.", ) parser.add_argument( "--adjacency-misspec", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Misspecify adjacency matrix - assume ICA.", ) parser.add_argument( "--function-misspec", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Misspecify function class - assume linear.", ) parser.add_argument( "--net-hidden-layers", type=int, default=3, help="Number of hidden layers in nonlinear encoder.", ) parser.add_argument( "--net-hidden-layers-cbn", type=int, default=3, help="Number of hidden layers in latent CBN model.", ) parser.add_argument( "--net-hidden-dim", type=int, default=128, help="Number of hidden dimensions in nonlinear encoder.", ) parser.add_argument( "--net-hidden-dim-cbn", type=int, default=128, help="Number of hidden dimensions in latent CBN model.", ) parser.add_argument( "--fix-mechanisms", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Fix fixable mechanisms in latents.", ) parser.add_argument( "--fix-all-intervention-targets", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Fix all intervention targets.", ) parser.add_argument( "--nonparametric-base-distr", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Use nonparametric base distribution for flows.", ) parser.add_argument( "--wandb", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Whether to log to weights and biases.", ) parser.add_argument( "--wandb-project", type=str, default="cauca", help="Weights & Biases project name.", ) args = parser.parse_args() if args.function_misspec: assert ( args.mixing == "nonlinear" and args.model == "linear" ), "Function not misspecified." if args.wandb: wandb_logger = WandbLogger(project=args.wandb_project) wandb_logger.experiment.config.update(args, allow_val_change=True) checkpoint_dir = ( Path(args.checkpoint_root_dir) / f"{wandb_logger.experiment.id}" ) logger = [wandb_logger] else: checkpoint_dir = Path(args.checkpoint_root_dir) / "default" logger = None checkpoint_callback = pl.callbacks.ModelCheckpoint( dirpath=checkpoint_dir, save_last=True, every_n_epochs=args.check_val_every_n_epoch, ) multi_env_dgp = make_multi_env_dgp( latent_dim=DGP[args.dgp]["num_causal_variables"], observation_dim=DGP[args.dgp]["observation_dim"], adjacency_matrix=DGP[args.dgp]["adj_matrix"], intervention_targets_per_env=DGP[args.dgp]["int_targets"], noise_shift_type=args.noise_shift_type, mixing=args.mixing, scm=args.scm, n_nonlinearities=args.n_nonlinearities, scm_coeffs_low=args.scm_coeffs_low, scm_coeffs_high=args.scm_coeffs_high, coeffs_min_abs_value=args.scm_coeffs_min_abs_value, edge_prob=DGP[args.dgp].get("edge_prob", None), snr=args.snr, ) data_module = MultiEnvDataModule( multi_env_dgp=multi_env_dgp, num_samples_per_env=DGP[args.dgp]["num_samples_per_env"], batch_size=args.batch_size, num_workers=os.cpu_count(), intervention_targets_per_env=DGP[args.dgp]["int_targets"], log_dir=checkpoint_dir / "data_stats", ) data_module.setup() pl.seed_everything(args.training_seed, workers=True) intervention_targets_per_env = DGP[args.dgp]["int_targets"] # Model Initialization if args.model == "nonlinear":
model = NonlinearCauCAModel(
5
2023-10-25 09:25:26+00:00
16k
endo-yuki-t/MAG
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas)\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec\n \n @torch.no_grad()\n def forward_ddim(self, x_latent, cond, total_step=10, t0=1000, return_noised_maps=False):\n seq_inv = np.linspace(0, 1, total_step) * (t0-1)\n seq_inv = [int(s) for s in list(seq_inv)]\n seq_inv_next = [-1] + list(seq_inv[:-1])\n x_enc = x_latent.clone()\n x_encs = []\n with tqdm(total=len(seq_inv), desc=f\"Inversion process \", ascii=True) as progress_bar:\n for it, (i, j) in enumerate(zip((seq_inv_next[1:]), (seq_inv[1:]))):\n if return_noised_maps:\n x_encs.append(x_enc)\n t = torch.full((x_latent.shape[0],), i, device=x_latent.device, dtype=torch.long)\n t_prev = torch.full((x_latent.shape[0],), j, device=x_latent.device, dtype=torch.long)\n x_enc, _ = denoising_step(x_enc, c=cond, t=t, t_next=t_prev, model=self.model, b=self.model.betas, eta=0)\n progress_bar.update(1)\n \n if return_noised_maps:\n return x_enc, x_encs\n \n return x_enc\n \n @torch.no_grad()\n def reverse_ddim(self, x_latent, cond, total_step=10, t0=1000, eta=0, unconditional_guidance_scale=1.0, unconditional_conditioning=None, noised_maps=False, mask=False, merge_stop_th=10):\n seq_test = np.linspace(0, 1, total_step) * (t0-1)\n seq_test = [int(s) for s in list(seq_test)]\n seq_test_next = [-1] + list(seq_test[:-1])\n x_dec = x_latent.clone()\n step=len(seq_test)-1\n with tqdm(total=len(seq_test), desc=\"Generative process\", ascii=True) as progress_bar:\n for i, j in zip(reversed(seq_test[1:]), reversed(seq_test_next[1:])):\n t = torch.full((x_latent.shape[0],), i, device=x_latent.device, dtype=torch.long)\n t_next = torch.full((x_latent.shape[0],), j, device=x_latent.device, dtype=torch.long)\n x_dec, x_0 = denoising_step(x_dec, c=cond, t=t, t_next=t_next, model=self.model, b=self.model.betas, \n eta=eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning) \n if noised_maps is not False:\n step -= 1\n if step>merge_stop_th:\n x_dec = mask*x_dec+(1.-mask)*noised_maps[step]\n progress_bar.update(1)\n return x_dec\n \n def attention_guided_reverse_ddim(self, x_latent, cond, total_step=50, t0=1000, eta=0, unconditional_guidance_scale=1.0, unconditional_conditioning=None, att_masks=None, word_ids_for_mask=None, alpha=0.08, lmbd=0.5, swapping_step_th=float('inf'), guidance_step_th=float('inf')):\n seq_test = np.linspace(0, 1, total_step) * (t0-1)\n seq_test = [int(s) for s in list(seq_test)]\n seq_test_next = [-1] + list(seq_test[:-1])\n step=len(seq_test)-1\n optimized_latent = x_latent.clone().detach()\n \n with tqdm(total=len(seq_test), desc=\"Generative process\", ascii=True) as progress_bar:\n for i, j in zip(reversed(seq_test[1:]), reversed(seq_test_next[1:])):\n t = torch.full((x_latent.shape[0],), i, device=x_latent.device, dtype=torch.long)\n t_next = torch.full((x_latent.shape[0],), j, device=x_latent.device, dtype=torch.long)\n \n if t[0]>guidance_step_th:\n optimized_latent.requires_grad = True\n opt = torch.optim.SGD([optimized_latent], lr=alpha)\n _ = self.model.apply_model(optimized_latent, t.detach(), cond.detach())\n \n loss = 0.\n for name, module in self.model.named_modules():\n module_name = type(module).__name__\n if module_name == \"MemoryEfficientCrossAttention\" and 'attn2' in name:\n att = module.stored_attention\n w = int(math.sqrt(att.shape[1]))\n for amid, att_mask in enumerate(att_masks):\n if amid >= len(word_ids_for_mask):\n continue\n att_mask = att_mask.detach()\n att_mask_resized = torch.nn.functional.interpolate(att_mask, size=(w,w)).to(torch.bool)\n att_mask_ = rearrange(att_mask_resized, 'b ... -> b (...)')\n att_mask_ = repeat(att_mask_, 'b j -> (b h) j', h=module.heads)\n \n word_ids = word_ids_for_mask[amid]\n loss += -att[:,:,word_ids][att_mask_==1].sum()\n loss += lmbd*att[:,:,word_ids][att_mask_==0].sum()\n \n #print(\"Masked attention loss:\", loss)\n loss.backward(retain_graph=False)\n opt.step()\n \n with torch.no_grad():\n if t[0]>swapping_step_th:\n att_masks_att_ids = [att_masks,word_ids_for_mask]\n else:\n att_masks_att_ids = None\n x_dec, x_0 = denoising_step(optimized_latent, c=cond, t=t, t_next=t_next, model=self.model, b=self.model.betas, \n eta=eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning,\n att_mask=att_masks_att_ids)\n \n optimized_latent = x_dec.detach().clone() \n step -= 1\n progress_bar.update(1)\n \n return optimized_latent" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
13,686
if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False, att_mask=None): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond, att_mask=att_mask) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc
if ismap(xc):
3
2023-10-27 06:56:37+00:00
16k
Gene-Weaver/VoucherVision
vouchervision/VoucherVision_GUI.py
[ { "identifier": "write_config_file", "path": "vouchervision/LeafMachine2_Config_Builder.py", "snippet": "def write_config_file(config_data, dir_home, filename=\"LeafMachine2.yaml\"):\n file_path = os.path.join(dir_home, filename)\n\n # Write the data to a YAML file\n with open(file_path, \"w\") as outfile:\n yaml.dump(config_data, outfile, default_flow_style=False)" }, { "identifier": "build_VV_config", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def build_VV_config():\n #############################################\n ############ Set common defaults ############\n #############################################\n # Changing the values below will set new \n # default values each time you open the \n # VoucherVision user interface\n #############################################\n #############################################\n #############################################\n\n dir_home = os.path.dirname(os.path.dirname(__file__))\n run_name = 'test'\n # dir_images_local = 'D:/Dropbox/LM2_Env/Image_Datasets/GBIF_BroadSample_3SppPerFamily1'\n dir_images_local = os.path.join(dir_home,'demo','demo_images')\n \n # The default output location is the computer's \"Downloads\" folder\n # You can set dir_output directly by typing the folder path,\n # OR you can uncomment the line \"dir_output = default_output_folder\" \n # to have VoucherVision save to the Downloads folder by default\n default_output_folder = get_default_download_folder()\n dir_output = default_output_folder\n # dir_output = 'D:/D_Desktop/LM2'\n\n prefix_removal = '' #'MICH-V-'\n suffix_removal = ''\n catalog_numerical_only = False\n\n LLM_version_user = 'Azure GPT 4'\n prompt_version = 'Version 2' # from [\"Version 1\", \"Version 1 No Domain Knowledge\", \"Version 2\"]\n use_LeafMachine2_collage_images = False # Use LeafMachine2 collage images\n do_create_OCR_helper_image = False\n\n batch_size = 500\n\n path_domain_knowledge = os.path.join(dir_home,'domain_knowledge','SLTP_UM_AllAsiaMinimalInRegion.xlsx')\n embeddings_database_name = os.path.splitext(os.path.basename(path_domain_knowledge))[0]\n\n #############################################\n #############################################\n ########## DO NOT EDIT BELOW HERE ###########\n #############################################\n #############################################\n return assemble_config(dir_home, run_name, dir_images_local,dir_output,\n prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size,\n path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images,\n prompt_version, do_create_OCR_helper_image, use_domain_knowledge=False)" }, { "identifier": "run_demo_tests_GPT", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def run_demo_tests_GPT(progress_report):\n dir_home, path_to_configs, test_results = build_demo_tests('gpt')\n progress_report.set_n_overall(len(test_results.items()))\n\n JSON_results = {}\n\n for ind, (cfg, result) in enumerate(test_results.items()):\n OPT1, OPT2, OPT3 = TestOptionsGPT.get_options()\n \n test_ind, ind_opt1, ind_opt2, ind_opt3 = cfg.split('__')\n opt1_readable = OPT1[int(ind_opt1.split('-')[1])]\n\n if opt1_readable in [\"Azure GPT 4\", \"Azure GPT 3.5\"]:\n api_version = 'gpt-azure'\n elif opt1_readable in [\"GPT 4\", \"GPT 3.5\"]:\n api_version = 'gpt'\n else:\n raise\n\n opt2_readable = \"Use LeafMachine2 for Collage Images\" if OPT2[int(ind_opt2.split('-')[1])] else \"Don't use LeafMachine2 for Collage Images\"\n opt3_readable = f\"Prompt {OPT3[int(ind_opt3.split('-')[1])]}\"\n # Construct the human-readable test name\n human_readable_name = f\"{opt1_readable}, {opt2_readable}, {opt3_readable}\"\n get_n_overall = progress_report.get_n_overall()\n progress_report.update_overall(f\"Test {int(test_ind)+1} of {get_n_overall} --- Validating {human_readable_name}\")\n print_main_fail(f\"Starting validation test: {human_readable_name}\")\n cfg_file_path = os.path.join(path_to_configs,'.'.join([cfg,'yaml']))\n \n if check_API_key(dir_home, api_version) and check_API_key(dir_home, 'google-vision-ocr'):\n try:\n last_JSON_response, total_cost = voucher_vision(cfg_file_path, dir_home, cfg_test=None, progress_report=progress_report, test_ind=int(test_ind))\n test_results[cfg] = True\n JSON_results[ind] = last_JSON_response\n except Exception as e:\n JSON_results[ind] = None\n test_results[cfg] = False\n print(f\"An exception occurred: {e}\")\n traceback.print_exc() # This will print the full traceback\n else:\n fail_response = ''\n if not check_API_key(dir_home, 'google-vision-ocr'):\n fail_response += \"No API key found for Google Vision OCR\"\n if not check_API_key(dir_home, api_version):\n fail_response += f\" + No API key found for {api_version}\"\n test_results[cfg] = False\n JSON_results[ind] = fail_response\n print(f\"No API key found for {fail_response}\")\n \n return test_results, JSON_results" }, { "identifier": "run_demo_tests_Palm", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def run_demo_tests_Palm(progress_report):\n api_version = 'palm'\n\n dir_home, path_to_configs, test_results = build_demo_tests('palm')\n progress_report.set_n_overall(len(test_results.items()))\n\n JSON_results = {}\n\n for ind, (cfg, result) in enumerate(test_results.items()):\n OPT1, OPT2, OPT3 = TestOptionsPalm.get_options()\n test_ind, ind_opt1, ind_opt2, ind_opt3 = cfg.split('__')\n opt1_readable = OPT1[int(ind_opt1.split('-')[1])]\n opt2_readable = \"Use LeafMachine2 for Collage Images\" if OPT2[int(ind_opt2.split('-')[1])] else \"Don't use LeafMachine2 for Collage Images\"\n opt3_readable = f\"Prompt {OPT3[int(ind_opt3.split('-')[1])]}\"\n # opt3_readable = \"Use Domain Knowledge\" if OPT3[int(ind_opt3.split('-')[1])] else \"Don't use Domain Knowledge\"\n # Construct the human-readable test name\n human_readable_name = f\"{opt1_readable}, {opt2_readable}, {opt3_readable}\"\n get_n_overall = progress_report.get_n_overall()\n progress_report.update_overall(f\"Test {int(test_ind)+1} of {get_n_overall} --- Validating {human_readable_name}\")\n print_main_fail(f\"Starting validation test: {human_readable_name}\")\n cfg_file_path = os.path.join(path_to_configs,'.'.join([cfg,'yaml']))\n \n if check_API_key(dir_home, api_version) and check_API_key(dir_home, 'google-vision-ocr') :\n try:\n last_JSON_response, total_cost = voucher_vision(cfg_file_path, dir_home, cfg_test=None, path_custom_prompts=None, progress_report=progress_report, test_ind=int(test_ind))\n test_results[cfg] = True\n JSON_results[ind] = last_JSON_response\n except Exception as e:\n test_results[cfg] = False\n JSON_results[ind] = None\n print(f\"An exception occurred: {e}\")\n traceback.print_exc() # This will print the full traceback\n else:\n fail_response = ''\n if not check_API_key(dir_home, 'google-vision-ocr'):\n fail_response += \"No API key found for Google Vision OCR\"\n if not check_API_key(dir_home, api_version):\n fail_response += f\" + No API key found for {api_version}\"\n test_results[cfg] = False\n JSON_results[ind] = fail_response\n print(f\"No API key found for {fail_response}\")\n\n return test_results, JSON_results" }, { "identifier": "TestOptionsGPT", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "class TestOptionsGPT:\n OPT1 = [\"gpt-4-1106-preview\",\"GPT 4\", \"GPT 3.5\", \"Azure GPT 4\", \"Azure GPT 3.5\"]\n OPT2 = [False, True]\n OPT3 = [\"Version 1\", \"Version 1 No Domain Knowledge\", \"Version 2\"]\n\n @classmethod\n def get_options(cls):\n return cls.OPT1, cls.OPT2, cls.OPT3\n @classmethod\n def get_length(cls):\n return 24" }, { "identifier": "TestOptionsPalm", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "class TestOptionsPalm:\n OPT1 = [\"PaLM 2\"]\n OPT2 = [False, True]\n OPT3 = [\"Version 1 PaLM 2\", \"Version 1 PaLM 2 No Domain Knowledge\", \"Version 2 PaLM 2\"]\n\n @classmethod\n def get_options(cls):\n return cls.OPT1, cls.OPT2, cls.OPT3\n @classmethod\n def get_length(cls):\n return 6" }, { "identifier": "check_if_usable", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def check_if_usable():\n dir_home = os.path.dirname(os.path.dirname(__file__))\n path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml')\n cfg_private = get_cfg_from_full_path(path_cfg_private)\n\n has_key_openai = has_API_key(cfg_private['openai']['OPENAI_API_KEY'])\n\n has_key_azure_openai = has_API_key(cfg_private['openai_azure']['api_version']) \n\n has_key_palm2 = has_API_key(cfg_private['google_palm']['google_palm_api'])\n \n has_key_google_OCR = has_API_key(cfg_private['google_cloud']['path_json_file'])\n\n if has_key_google_OCR and (has_key_azure_openai or has_key_openai or has_key_palm2):\n return True\n else:\n return False" }, { "identifier": "run_api_tests", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def run_api_tests(api):\n try:\n dir_home, path_to_configs, test_results = build_api_tests(api)\n\n JSON_results = {}\n\n for ind, (cfg, result) in enumerate(test_results.items()):\n if api == 'openai':\n OPT1, OPT2, OPT3 = TestOptionsAPI_openai.get_options()\n elif 'azure_openai':\n OPT1, OPT2, OPT3 = TestOptionsAPI_azure_openai.get_options()\n elif 'palm':\n OPT1, OPT2, OPT3 = TestOptionsAPI_palm.get_options()\n test_ind, ind_opt1, ind_opt2, ind_opt3 = cfg.split('__')\n opt1_readable = OPT1[int(ind_opt1.split('-')[1])]\n opt2_readable = \"Use LeafMachine2 for Collage Images\" if OPT2[int(ind_opt2.split('-')[1])] else \"Don't use LeafMachine2 for Collage Images\"\n opt3_readable = f\"Prompt {OPT3[int(ind_opt3.split('-')[1])]}\"\n # opt3_readable = \"Use Domain Knowledge\" if OPT3[int(ind_opt3.split('-')[1])] else \"Don't use Domain Knowledge\"\n # Construct the human-readable test name\n human_readable_name = f\"{opt1_readable}, {opt2_readable}, {opt3_readable}\"\n print_main_fail(f\"Starting validation test: {human_readable_name}\")\n cfg_file_path = os.path.join(path_to_configs,'.'.join([cfg,'yaml']))\n \n if check_API_key(dir_home, api) and check_API_key(dir_home, 'google-vision-ocr') :\n try:\n last_JSON_response, total_cost = voucher_vision(cfg_file_path, dir_home, None,path_custom_prompts=None , cfg_test=None, progress_report=None, test_ind=int(test_ind))\n test_results[cfg] = True\n JSON_results[ind] = last_JSON_response\n return True\n\n except Exception as e:\n print(e)\n return False\n else:\n return False\n except Exception as e:\n print(e)\n return False" }, { "identifier": "voucher_vision", "path": "vouchervision/vouchervision_main.py", "snippet": "def voucher_vision(cfg_file_path, dir_home, path_custom_prompts, cfg_test, progress_report, path_api_cost=None, test_ind = None, is_real_run=False):\n # get_n_overall = progress_report.get_n_overall()\n # progress_report.update_overall(f\"Working on {test_ind+1} of {get_n_overall}\")\n\n t_overall = perf_counter()\n\n # Load config file\n report_config(dir_home, cfg_file_path, system='VoucherVision')\n\n if cfg_test is None:\n cfg = load_config_file(dir_home, cfg_file_path, system='VoucherVision') # For VoucherVision\n else:\n cfg = cfg_test \n # user_cfg = load_config_file(dir_home, cfg_file_path)\n # cfg = Config(user_cfg)\n\n # Check to see if there are subdirs\n # Yes --> use the names of the subsirs as run_name\n run_name, dirs_list, has_subdirs = check_for_subdirs_VV(cfg)\n print(f\"run_name {run_name} dirs_list{dirs_list} has_subdirs{has_subdirs}\")\n\n # for dir_ind, dir_in in enumerate(dirs_list):\n # if has_subdirs:\n # cfg['leafmachine']['project']['dir_images_local'] = dir_in\n # cfg['leafmachine']['project']['run_name'] = run_name[dir_ind]\n\n # Dir structure\n if is_real_run:\n progress_report.update_overall(f\"Creating Output Directory Structure\")\n print_main_start(\"Creating Directory Structure\")\n Dirs = Dir_Structure(cfg)\n\n # logging.info(\"Hi\")\n logger = start_logging(Dirs, cfg)\n\n # Check to see if required ML files are ready to use\n if is_real_run:\n progress_report.update_overall(f\"Fetching LeafMachine2 Files\")\n ready_to_use = fetch_data(logger, dir_home, cfg_file_path)\n assert ready_to_use, \"Required ML files are not ready to use!\\nThe download may have failed,\\nor\\nthe directory structure of LM2 has been altered\"\n\n # Wrangle images and preprocess\n print_main_start(\"Gathering Images and Image Metadata\")\n Project = Project_Info(cfg, logger, dir_home, Dirs) # Where file names are modified\n\n # Save config file\n save_config_file(cfg, logger, Dirs)\n\n # Detect Archival Components\n print_main_start(\"Locating Archival Components\")\n Project = detect_archival_components(cfg, logger, dir_home, Project, Dirs, is_real_run, progress_report)\n\n # Save cropped detections\n crop_detections_from_images_VV(cfg, logger, dir_home, Project, Dirs)\n\n # Process labels\n Voucher_Vision = VoucherVision(cfg, logger, dir_home, path_custom_prompts, Project, Dirs)\n n_images = len(Voucher_Vision.img_paths)\n last_JSON_response, total_tokens_in, total_tokens_out = Voucher_Vision.process_specimen_batch(progress_report, is_real_run)\n \n if path_api_cost:\n cost_summary, data, total_cost = save_token_info_as_csv(Dirs, cfg['leafmachine']['LLM_version'], path_api_cost, total_tokens_in, total_tokens_out, n_images)\n add_to_expense_report(dir_home, data)\n logger.info(cost_summary)\n else:\n total_cost = None #TODO add config tests to expense_report\n\n t_overall_s = perf_counter()\n logger.name = 'Run Complete! :)'\n logger.info(f\"[Total elapsed time] {round((t_overall_s - t_overall)/60)} minutes\")\n space_saver(cfg, Dirs, logger)\n\n if is_real_run:\n progress_report.update_overall(f\"Run Complete! :sunglasses:\")\n\n for handler in logger.handlers[:]:\n handler.close()\n logger.removeHandler(handler)\n\n return last_JSON_response, total_cost" }, { "identifier": "voucher_vision_OCR_test", "path": "vouchervision/vouchervision_main.py", "snippet": "def voucher_vision_OCR_test(cfg_file_path, dir_home, cfg_test, path_to_crop):\n # get_n_overall = progress_report.get_n_overall()\n # progress_report.update_overall(f\"Working on {test_ind+1} of {get_n_overall}\")\n\n # Load config file\n report_config(dir_home, cfg_file_path, system='VoucherVision')\n\n if cfg_test is None:\n cfg = load_config_file(dir_home, cfg_file_path, system='VoucherVision') # For VoucherVision\n else:\n cfg = cfg_test \n # user_cfg = load_config_file(dir_home, cfg_file_path)\n # cfg = Config(user_cfg)\n\n # Check to see if there are subdirs\n # Yes --> use the names of the subsirs as run_name\n run_name, dirs_list, has_subdirs = check_for_subdirs_VV(cfg)\n print(f\"run_name {run_name} dirs_list{dirs_list} has_subdirs{has_subdirs}\")\n\n # for dir_ind, dir_in in enumerate(dirs_list):\n # if has_subdirs:\n # cfg['leafmachine']['project']['dir_images_local'] = dir_in\n # cfg['leafmachine']['project']['run_name'] = run_name[dir_ind]\n\n # Dir structure\n print_main_start(\"Creating Directory Structure\")\n Dirs = Dir_Structure(cfg)\n\n # logging.info(\"Hi\")\n logger = start_logging(Dirs, cfg)\n\n # Check to see if required ML files are ready to use\n ready_to_use = fetch_data(logger, dir_home, cfg_file_path)\n assert ready_to_use, \"Required ML files are not ready to use!\\nThe download may have failed,\\nor\\nthe directory structure of LM2 has been altered\"\n\n # Wrangle images and preprocess\n print_main_start(\"Gathering Images and Image Metadata\")\n Project = Project_Info(cfg, logger, dir_home, Dirs) # Where file names are modified\n\n # Save config file\n save_config_file(cfg, logger, Dirs)\n\n # Detect Archival Components\n print_main_start(\"Locating Archival Components\")\n Project = detect_archival_components(cfg, logger, dir_home, Project, Dirs)\n\n # Save cropped detections\n crop_detections_from_images_VV(cfg, logger, dir_home, Project, Dirs)\n\n # Process labels\n Voucher_Vision = VoucherVision(cfg, logger, dir_home, None, Project, Dirs)\n last_JSON_response = Voucher_Vision.process_specimen_batch_OCR_test(path_to_crop)" }, { "identifier": "test_GPU", "path": "vouchervision/general_utils.py", "snippet": "def test_GPU():\n info = []\n success = False\n\n if torch.cuda.is_available():\n num_gpus = torch.cuda.device_count()\n info.append(f\"Number of GPUs: {num_gpus}\")\n\n for i in range(num_gpus):\n gpu = torch.cuda.get_device_properties(i)\n info.append(f\"GPU {i}: {gpu.name}\")\n\n success = True\n else:\n info.append(\"No GPU found!\")\n info.append(\"LeafMachine2 image cropping and embedding search will be slow or not possible.\")\n\n return success, info" }, { "identifier": "get_cfg_from_full_path", "path": "vouchervision/general_utils.py", "snippet": "def get_cfg_from_full_path(path_cfg):\n with open(path_cfg, \"r\") as ymlfile:\n cfg = yaml.full_load(ymlfile)\n return cfg" }, { "identifier": "summarize_expense_report", "path": "vouchervision/general_utils.py", "snippet": "def summarize_expense_report(path_expense_report):\n # Initialize counters and sums\n run_count = 0\n total_cost_sum = 0\n tokens_in_sum = 0\n tokens_out_sum = 0\n rate_in_sum = 0\n rate_out_sum = 0\n cost_in_sum = 0\n cost_out_sum = 0\n n_images_sum = 0\n api_version_counts = Counter()\n\n # Try to read the CSV file into a DataFrame\n try:\n df = pd.read_csv(path_expense_report)\n\n # Process each row in the DataFrame\n for index, row in df.iterrows():\n run_count += 1\n total_cost_sum += row['total_cost']\n tokens_in_sum += row['tokens_in']\n tokens_out_sum += row['tokens_out']\n rate_in_sum += row['rate_in']\n rate_out_sum += row['rate_out']\n cost_in_sum += row['cost_in']\n cost_out_sum += row['cost_out']\n n_images_sum += row['n_images']\n api_version_counts[row['api_version']] += 1\n\n except FileNotFoundError:\n print(f\"The file {path_expense_report} does not exist.\")\n return None\n\n # Calculate API version percentages\n api_version_percentages = {version: (count / run_count) * 100 for version, count in api_version_counts.items()}\n\n # Calculate cost per image for each API version\n cost_per_image_dict = {}\n for version, count in api_version_counts.items():\n total_cost = df[df['api_version'] == version]['total_cost'].sum()\n n_images = df[df['api_version'] == version]['n_images'].sum()\n cost_per_image = total_cost / n_images if n_images > 0 else 0\n cost_per_image_dict[version] = cost_per_image\n\n # Return the DataFrame and all summaries\n return {\n 'run_count': run_count,\n 'total_cost_sum': total_cost_sum,\n 'tokens_in_sum': tokens_in_sum,\n 'tokens_out_sum': tokens_out_sum,\n 'rate_in_sum': rate_in_sum,\n 'rate_out_sum': rate_out_sum,\n 'cost_in_sum': cost_in_sum,\n 'cost_out_sum': cost_out_sum,\n 'n_images_sum':n_images_sum,\n 'api_version_percentages': api_version_percentages,\n 'cost_per_image': cost_per_image_dict\n }, df" }, { "identifier": "create_google_ocr_yaml_config", "path": "vouchervision/general_utils.py", "snippet": "def create_google_ocr_yaml_config(output_file, dir_images_local, dir_output):\n # Define the configuration dictionary\n config = {\n 'leafmachine': {\n 'LLM_version': 'PaLM 2',\n 'archival_component_detector': {\n 'detector_iteration': 'PREP_final',\n 'detector_type': 'Archival_Detector',\n 'detector_version': 'PREP_final',\n 'detector_weights': 'best.pt',\n 'do_save_prediction_overlay_images': True,\n 'ignore_objects_for_overlay': [],\n 'minimum_confidence_threshold': 0.5\n },\n 'cropped_components': {\n 'binarize_labels': False,\n 'binarize_labels_skeletonize': False,\n 'do_save_cropped_annotations': True,\n 'save_cropped_annotations': ['label', 'barcode'],\n 'save_per_annotation_class': True,\n 'save_per_image': False\n },\n 'data': {\n 'do_apply_conversion_factor': False,\n 'include_darwin_core_data_from_combined_file': False,\n 'save_individual_csv_files_landmarks': False,\n 'save_individual_csv_files_measurements': False,\n 'save_individual_csv_files_rulers': False,\n 'save_individual_efd_files': False,\n 'save_json_measurements': False,\n 'save_json_rulers': False\n },\n 'do': {\n 'check_for_corrupt_images_make_vertical': True,\n 'check_for_illegal_filenames': False\n },\n 'logging': {\n 'log_level': None\n },\n 'modules': {\n 'specimen_crop': True\n },\n 'overlay': {\n 'alpha_transparency_archival': 0.3,\n 'alpha_transparency_plant': 0,\n 'alpha_transparency_seg_partial_leaf': 0.3,\n 'alpha_transparency_seg_whole_leaf': 0.4,\n 'ignore_archival_detections_classes': [],\n 'ignore_landmark_classes': [],\n 'ignore_plant_detections_classes': ['leaf_whole', 'specimen'],\n 'line_width_archival': 12,\n 'line_width_efd': 12,\n 'line_width_plant': 12,\n 'line_width_seg': 12,\n 'overlay_background_color': 'black',\n 'overlay_dpi': 300,\n 'save_overlay_to_jpgs': True,\n 'save_overlay_to_pdf': False,\n 'show_archival_detections': True,\n 'show_landmarks': True,\n 'show_plant_detections': True,\n 'show_segmentations': True\n },\n 'print': {\n 'optional_warnings': True,\n 'verbose': True\n },\n 'project': {\n 'batch_size': 500,\n 'build_new_embeddings_database': False,\n 'catalog_numerical_only': False,\n 'continue_run_from_partial_xlsx': '',\n 'delete_all_temps': False,\n 'delete_temps_keep_VVE': False,\n 'dir_images_local': dir_images_local,\n 'dir_output': dir_output,\n 'embeddings_database_name': 'SLTP_UM_AllAsiaMinimalInRegion',\n 'image_location': 'local',\n 'num_workers': 1,\n 'path_to_domain_knowledge_xlsx': '',\n 'prefix_removal': '',\n 'prompt_version': 'Version 2 PaLM 2',\n 'run_name': 'google_vision_ocr_test',\n 'suffix_removal': '',\n 'use_domain_knowledge': False\n },\n 'use_RGB_label_images': False\n }\n }\n # Generate the YAML string from the data structure\n validate_dir(os.path.dirname(output_file))\n yaml_str = yaml.dump(config, sort_keys=False)\n\n # Write the YAML string to a file\n with open(output_file, 'w') as file:\n file.write(yaml_str)" }, { "identifier": "validate_dir", "path": "vouchervision/general_utils.py", "snippet": "def validate_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir, exist_ok=True)" } ]
import streamlit as st import yaml, os, json, random, time, re import matplotlib.pyplot as plt import plotly.graph_objs as go import numpy as np import pandas as pd from itertools import chain from PIL import Image from typing import Union from streamlit_extras.let_it_rain import rain from vouchervision.LeafMachine2_Config_Builder import write_config_file from vouchervision.VoucherVision_Config_Builder import build_VV_config, run_demo_tests_GPT, run_demo_tests_Palm , TestOptionsGPT, TestOptionsPalm, check_if_usable, run_api_tests from vouchervision.vouchervision_main import voucher_vision, voucher_vision_OCR_test from vouchervision.general_utils import test_GPU, get_cfg_from_full_path, summarize_expense_report, create_google_ocr_yaml_config, validate_dir
10,807
api_versions = list(cost_per_image_dict.keys()) colors = [COLORS_EXPENSE_REPORT[version] if version in COLORS_EXPENSE_REPORT else '#DDDDDD' for version in api_versions] # Calculate the cost per image for each API version for version, cost_data in cost_per_image_dict.items(): total_cost = cost_data['total_cost'] n_images = cost_data['n_images'] # Calculate the cost per image for this version cost_per_image = total_cost / n_images if n_images > 0 else 0 cost_labels.append(version) cost_values.append(cost_per_image) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_values, hole=.3)]) # Update traces for custom text in hoverinfo, displaying cost with a dollar sign and two decimal places cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${value:.2f}" for value in cost_values], # Formats the cost as a string with a dollar sign and two decimals textinfo='percent+label', hoverinfo='label+percent+text' # Adds custom text (formatted cost) to the hover information ) st.plotly_chart(cost_pie_chart, use_container_width=True) st.subheader('Proportion of Total Cost by API Version') cost_labels = [] cost_proportions = [] total_cost_by_version = {} # Sum the total cost for each API version for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] if api_version not in total_cost_by_version: total_cost_by_version[api_version] = 0 total_cost_by_version[api_version] += total_cost # Calculate the combined total cost for all versions combined_total_cost = sum(total_cost_by_version.values()) # Calculate the proportion of total cost for each API version for version, total_cost in total_cost_by_version.items(): proportion = (total_cost / combined_total_cost) * 100 if combined_total_cost > 0 else 0 cost_labels.append(version) cost_proportions.append(proportion) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_proportions, hole=.3)]) # Update traces for custom text in hoverinfo cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${cost:.2f}" for cost in total_cost_by_version.values()], # This will format the cost to 2 decimal places textinfo='percent+label', hoverinfo='label+percent+text' # This tells Plotly to show the label, percent, and custom text (cost) on hover ) st.plotly_chart(cost_pie_chart, use_container_width=True) # API version usage percentages pie chart st.subheader('Runs by API Version') api_versions = list(expense_summary['api_version_percentages'].keys()) percentages = [expense_summary['api_version_percentages'][version] for version in api_versions] pie_chart = go.Figure(data=[go.Pie(labels=api_versions, values=percentages, hole=.3)]) pie_chart.update_layout(margin=dict(t=0, b=0, l=0, r=0)) pie_chart.update_traces(marker=dict(colors=colors),) st.plotly_chart(pie_chart, use_container_width=True) else: st.error('No expense report data available.') def sidebar_content(): if not os.path.exists(os.path.join(st.session_state.dir_home,'expense_report')): validate_dir(os.path.join(st.session_state.dir_home,'expense_report')) expense_report_path = os.path.join(st.session_state.dir_home, 'expense_report', 'expense_report.csv') if os.path.exists(expense_report_path): # File exists, proceed with summarization st.session_state.expense_summary, st.session_state.expense_report = summarize_expense_report(expense_report_path) render_expense_report_summary() else: # File does not exist, handle this case appropriately # For example, you could set the session state variables to None or an empty value st.session_state.expense_summary, st.session_state.expense_report = None, None st.header('Expense Report Summary') st.write('Available after first run...') def main(): with st.sidebar: sidebar_content() # Main App content_header() tab_settings, tab_prompt, tab_domain, tab_component, tab_processing, tab_private, tab_delete = st.tabs(["Project Settings", "Prompt Builder", "Domain Knowledge","Component Detector", "Processing Options", "API Keys", "Space-Saver"]) with tab_settings: content_tab_settings() with tab_prompt: if st.button("Build Custom LLM Prompt"): st.session_state.proceed_to_build_llm_prompt = True st.rerun() with tab_component: content_tab_component() with tab_domain: content_tab_domain() with tab_processing: content_tab_processing() with tab_private: if st.button("Edit API Keys"): st.session_state.proceed_to_private = True st.rerun() with tab_delete: create_space_saver() st.set_page_config(layout="wide", page_icon='img/icon.ico', page_title='VoucherVision') # Default YAML file path if 'config' not in st.session_state:
PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE = ["Version 1","Version 1 PaLM 2"] COLORS_EXPENSE_REPORT = { 'GPT_4': '#8fff66', # Bright Green 'GPT_3_5': '#006400', # Dark Green 'PALM2': '#66a8ff' # blue } class ProgressReport: def __init__(self, overall_bar, batch_bar, text_overall, text_batch): self.overall_bar = overall_bar self.batch_bar = batch_bar self.text_overall = text_overall self.text_batch = text_batch self.current_overall_step = 0 self.total_overall_steps = 20 # number of major steps in machine function self.current_batch = 0 self.total_batches = 20 def update_overall(self, step_name=""): self.current_overall_step += 1 self.overall_bar.progress(self.current_overall_step / self.total_overall_steps) self.text_overall.text(step_name) def update_batch(self, step_name=""): self.current_batch += 1 self.batch_bar.progress(self.current_batch / self.total_batches) self.text_batch.text(step_name) def set_n_batches(self, n_batches): self.total_batches = n_batches def set_n_overall(self, total_overall_steps): self.current_overall_step = 0 self.overall_bar.progress(0) self.total_overall_steps = total_overall_steps def reset_batch(self, step_name): self.current_batch = 0 self.batch_bar.progress(0) self.text_batch.text(step_name) def reset_overall(self, step_name): self.current_overall_step = 0 self.overall_bar.progress(0) self.text_overall.text(step_name) def get_n_images(self): return self.n_images def get_n_overall(self): return self.total_overall_steps def does_private_file_exist(): dir_home = os.path.dirname(os.path.dirname(__file__)) path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml') return os.path.exists(path_cfg_private) def setup_streamlit_config(dir_home): # Define the directory path and filename dir_path = os.path.join(dir_home, ".streamlit") file_path = os.path.join(dir_path, "config.toml") # Check if directory exists, if not create it if not os.path.exists(dir_path): os.makedirs(dir_path) # Create or modify the file with the provided content config_content = f""" [theme] base = "dark" primaryColor = "#00ff00" [server] enableStaticServing = false runOnSave = true port = 8524 """ with open(file_path, "w") as f: f.write(config_content.strip()) def display_scrollable_results(JSON_results, test_results, OPT2, OPT3): """ Display the results from JSON_results in a scrollable container. """ # Initialize the container con_results = st.empty() with con_results.container(): # Start the custom container for all the results results_html = """<div class='scrollable-results-container'>""" for idx, (test_name, _) in enumerate(sorted(test_results.items())): _, ind_opt1, ind_opt2, ind_opt3 = test_name.split('__') opt2_readable = "Use LeafMachine2" if OPT2[int(ind_opt2.split('-')[1])] else "Don't use LeafMachine2" opt3_readable = f"{OPT3[int(ind_opt3.split('-')[1])]}" if JSON_results[idx] is None: results_html += f"<p>None</p>" else: formatted_json = json.dumps(JSON_results[idx], indent=4, sort_keys=False) results_html += f"<pre>[{opt2_readable}] + [{opt3_readable}]<br/>{formatted_json}</pre>" # End the custom container results_html += """</div>""" # The CSS to make this container scrollable css = """ <style> .scrollable-results-container { overflow-y: auto; height: 600px; width: 100%; white-space: pre-wrap; # To wrap the content font-family: monospace; # To give the JSON a code-like appearance } </style> """ # Apply the CSS and then the results st.markdown(css, unsafe_allow_html=True) st.markdown(results_html, unsafe_allow_html=True) def refresh(): st.write('') def display_test_results(test_results, JSON_results, llm_version): if llm_version == 'gpt': OPT1, OPT2, OPT3 = TestOptionsGPT.get_options() elif llm_version == 'palm': OPT1, OPT2, OPT3 = TestOptionsPalm.get_options() else: raise widths = [1] * (len(OPT1) + 2) + [2] columns = st.columns(widths) with columns[0]: st.write("LeafMachine2") with columns[1]: st.write("Prompt") with columns[len(OPT1) + 2]: st.write("Scroll to See Last Transcription in Each Test") already_written = set() for test_name, result in sorted(test_results.items()): _, ind_opt1, _, _ = test_name.split('__') option_value = OPT1[int(ind_opt1.split('-')[1])] if option_value not in already_written: with columns[int(ind_opt1.split('-')[1]) + 2]: st.write(option_value) already_written.add(option_value) printed_options = set() with columns[-1]: display_scrollable_results(JSON_results, test_results, OPT2, OPT3) # Close the custom container st.write('</div>', unsafe_allow_html=True) for idx, (test_name, result) in enumerate(sorted(test_results.items())): _, ind_opt1, ind_opt2, ind_opt3 = test_name.split('__') opt2_readable = "Use LeafMachine2" if OPT2[int(ind_opt2.split('-')[1])] else "Don't use LeafMachine2" opt3_readable = f"{OPT3[int(ind_opt3.split('-')[1])]}" if (opt2_readable, opt3_readable) not in printed_options: with columns[0]: st.info(f"{opt2_readable}") st.write('---') with columns[1]: st.info(f"{opt3_readable}") st.write('---') printed_options.add((opt2_readable, opt3_readable)) with columns[int(ind_opt1.split('-')[1]) + 2]: if result: st.success(f"Test Passed") else: st.error(f"Test Failed") st.write('---') # success_count = sum(1 for result in test_results.values() if result) # failure_count = len(test_results) - success_count # proportional_rain("🥇", success_count, "💔", failure_count, font_size=72, falling_speed=5, animation_length="infinite") rain_emojis(test_results) def add_emoji_delay(): time.sleep(0.3) def rain_emojis(test_results): # test_results = { # 'test1': True, # Test passed # 'test2': True, # Test passed # 'test3': True, # Test passed # 'test4': False, # Test failed # 'test5': False, # Test failed # 'test6': False, # Test failed # 'test7': False, # Test failed # 'test8': False, # Test failed # 'test9': False, # Test failed # 'test10': False, # Test failed # } success_emojis = ["🥇", "🏆", "🍾", "🙌"] failure_emojis = ["💔", "😭"] success_count = sum(1 for result in test_results.values() if result) failure_count = len(test_results) - success_count chosen_emoji = random.choice(success_emojis) for _ in range(success_count): rain( emoji=chosen_emoji, font_size=72, falling_speed=4, animation_length=2, ) add_emoji_delay() chosen_emoji = random.choice(failure_emojis) for _ in range(failure_count): rain( emoji=chosen_emoji, font_size=72, falling_speed=5, animation_length=1, ) add_emoji_delay() def get_prompt_versions(LLM_version): yaml_files = [f for f in os.listdir(os.path.join(st.session_state.dir_home, 'custom_prompts')) if f.endswith('.yaml')] if LLM_version in ["gpt-4-1106-preview", "GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5"]: versions = ["Version 1", "Version 1 No Domain Knowledge", "Version 2"] return (versions + yaml_files, "Version 2") elif LLM_version in ["PaLM 2",]: versions = ["Version 1 PaLM 2", "Version 1 PaLM 2 No Domain Knowledge", "Version 2 PaLM 2"] return (versions + yaml_files, "Version 2 PaLM 2") else: # Handle other cases or raise an error return (yaml_files, None) def get_private_file(): dir_home = os.path.dirname(os.path.dirname(__file__)) path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml') return get_cfg_from_full_path(path_cfg_private) def create_space_saver(): st.subheader("Space Saving Options") col_ss_1, col_ss_2 = st.columns([2,2]) with col_ss_1: st.write("Several folders are created and populated with data during the VoucherVision transcription process.") st.write("Below are several options that will allow you to automatically delete temporary files that you may not need for everyday operations.") st.write("VoucherVision creates the following folders. Folders marked with a :star: are required if you want to use VoucherVisionEditor for quality control.") st.write("`../[Run Name]/Archival_Components`") st.write("`../[Run Name]/Config_File`") st.write("`../[Run Name]/Cropped_Images` :star:") st.write("`../[Run Name]/Logs`") st.write("`../[Run Name]/Original_Images` :star:") st.write("`../[Run Name]/Transcription` :star:") with col_ss_2: st.session_state.config['leafmachine']['project']['delete_temps_keep_VVE'] = st.checkbox("Delete Temporary Files (KEEP files required for VoucherVisionEditor)", st.session_state.config['leafmachine']['project'].get('delete_temps_keep_VVE', False)) st.session_state.config['leafmachine']['project']['delete_all_temps'] = st.checkbox("Keep only the final transcription file", st.session_state.config['leafmachine']['project'].get('delete_all_temps', False),help="*WARNING:* This limits your ability to do quality assurance. This will delete all folders created by VoucherVision, leaving only the `transcription.xlsx` file.") # def create_private_file(): # st.session_state.proceed_to_main = False # if st.session_state.private_file: # cfg_private = get_private_file() # create_private_file_0(cfg_private) # else: # st.title("VoucherVision") # create_private_file_0() def create_private_file(): st.session_state.proceed_to_main = False st.title("VoucherVision") col_private,_= st.columns([12,2]) if st.session_state.private_file: cfg_private = get_private_file() else: cfg_private = {} cfg_private['openai'] = {} cfg_private['openai']['OPENAI_API_KEY'] ='' cfg_private['openai_azure'] = {} cfg_private['openai_azure']['openai_api_key'] = '' cfg_private['openai_azure']['api_version'] = '' cfg_private['openai_azure']['openai_api_base'] ='' cfg_private['openai_azure']['openai_organization'] ='' cfg_private['openai_azure']['openai_api_type'] ='' cfg_private['google_cloud'] = {} cfg_private['google_cloud']['path_json_file'] ='' cfg_private['google_palm'] = {} cfg_private['google_palm']['google_palm_api'] ='' with col_private: st.header("Set API keys") st.info("***Note:*** There is a known bug with tabs in Streamlit. If you update an input field it may take you back to the 'Project Settings' tab. Changes that you made are saved, it's just an annoying glitch. We are aware of this issue and will fix it as soon as we can.") st.warning("To commit changes to API keys you must press the 'Set API Keys' button at the bottom of the page.") st.write("Before using VoucherVision you must set your API keys. All keys are stored locally on your computer and are never made public.") st.write("API keys are stored in `../VoucherVision/PRIVATE_DATA.yaml`.") st.write("Deleting this file will allow you to reset API keys. Alternatively, you can edit the keys in the user interface.") st.write("Leave keys blank if you do not intend to use that service.") st.write("---") st.subheader("Google Vision (*Required*)") st.markdown("VoucherVision currently uses [Google Vision API](https://cloud.google.com/vision/docs/ocr) for OCR. Generating an API key for this is more involved than the others. [Please carefully follow the instructions outlined here to create and setup your account.](https://cloud.google.com/vision/docs/setup) ") st.markdown(""" Once your account is created, [visit this page](https://console.cloud.google.com) and create a project. Then follow these instructions: - **Select your Project**: If you have multiple projects, ensure you select the one where you've enabled the Vision API. - **Open the Navigation Menu**: Click on the hamburger menu (three horizontal lines) in the top left corner. - **Go to IAM & Admin**: In the navigation pane, hover over "IAM & Admin" and then click on "Service accounts." - **Locate Your Service Account**: Find the service account for which you wish to download the JSON key. If you haven't created a service account yet, you'll need to do so by clicking the "CREATE SERVICE ACCOUNT" button at the top. - **Download the JSON Key**: - Click on the three dots (actions menu) on the right side of your service account name. - Select "Manage keys." - In the pop-up window, click on the "ADD KEY" button and select "JSON." - The JSON key file will automatically be downloaded to your computer. - **Store Safely**: This file contains sensitive data that can be used to authenticate and bill your Google Cloud account. Never commit it to public repositories or expose it in any way. Always keep it safe and secure. """) with st.container(): c_in_ocr, c_button_ocr = st.columns([10,2]) with c_in_ocr: google_vision = st.text_input(label = 'Full path to Google Cloud JSON API key file', value = cfg_private['google_cloud'].get('path_json_file', ''), placeholder = 'e.g. C:/Documents/Secret_Files/google_API/application_default_credentials.json', help ="This API Key is in the form of a JSON file. Please save the JSON file in a safe directory. DO NOT store the JSON key inside of the VoucherVision directory.", type='password',key='924857298734590283750932809238') with c_button_ocr: st.empty() st.write("---") st.subheader("OpenAI") st.markdown("API key for first-party OpenAI API. Create an account with OpenAI [here](https://platform.openai.com/signup), then create an API key [here](https://platform.openai.com/account/api-keys).") with st.container(): c_in_openai, c_button_openai = st.columns([10,2]) with c_in_openai: openai_api_key = st.text_input("openai_api_key", cfg_private['openai'].get('OPENAI_API_KEY', ''), help='The actual API key. Likely to be a string of 2 character, a dash, and then a 48-character string: sk-XXXXXXXX...', placeholder = 'e.g. sk-XXXXXXXX...', type='password') with c_button_openai: st.empty() st.write("---") st.subheader("OpenAI - Azure") st.markdown("This version OpenAI relies on Azure servers directly as is intended for private enterprise instances of OpenAI's services, such as [UM-GPT](https://its.umich.edu/computing/ai). Administrators will provide you with the following information.") azure_openai_api_version = st.text_input("azure_openai_api_version", cfg_private['openai_azure'].get('api_version', ''), help='API Version e.g. "2023-05-15"', placeholder = 'e.g. 2023-05-15', type='password') azure_openai_api_key = st.text_input("azure_openai_api_key", cfg_private['openai_azure'].get('openai_api_key', ''), help='The actual API key. Likely to be a 32-character string', placeholder = 'e.g. 12333333333333333333333333333332', type='password') azure_openai_api_base = st.text_input("azure_openai_api_base", cfg_private['openai_azure'].get('openai_api_base', ''), help='The base url for the API e.g. "https://api.umgpt.umich.edu/azure-openai-api"', placeholder = 'e.g. https://api.umgpt.umich.edu/azure-openai-api', type='password') azure_openai_organization = st.text_input("azure_openai_organization", cfg_private['openai_azure'].get('openai_organization', ''), help='Your organization code. Likely a short string', placeholder = 'e.g. 123456', type='password') azure_openai_api_type = st.text_input("azure_openai_api_type", cfg_private['openai_azure'].get('openai_api_type', ''), help='The API type. Typically "azure"', placeholder = 'e.g. azure', type='password') with st.container(): c_in_azure, c_button_azure = st.columns([10,2]) with c_button_azure: st.empty() st.write("---") st.subheader("Google PaLM 2") st.markdown('Follow these [instructions](https://developers.generativeai.google/tutorials/setup) to generate an API key for PaLM 2. You may need to also activate an account with [MakerSuite](https://makersuite.google.com/app/apikey) and enable "early access."') with st.container(): c_in_palm, c_button_palm = st.columns([10,2]) with c_in_palm: google_palm = st.text_input("Google PaLM 2 API Key", cfg_private['google_palm'].get('google_palm_api', ''), help='The MakerSuite API key e.g. a 32-character string', placeholder='e.g. SATgthsykuE64FgrrrrEervr3S4455t_geyDeGq', type='password') with st.container(): with c_button_ocr: st.write("##") st.button("Test OCR", on_click=test_API, args=['google_vision',c_in_ocr, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) with st.container(): with c_button_openai: st.write("##") st.button("Test OpenAI", on_click=test_API, args=['openai',c_in_openai, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) with st.container(): with c_button_azure: st.write("##") st.button("Test Azure OpenAI", on_click=test_API, args=['azure_openai',c_in_azure, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) with st.container(): with c_button_palm: st.write("##") st.button("Test PaLM 2", on_click=test_API, args=['palm',c_in_palm, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) st.button("Set API Keys",type='primary', on_click=save_changes_to_API_keys, args=[cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) if st.button('Proceed to VoucherVision'): st.session_state.proceed_to_private = False st.session_state.proceed_to_main = True def test_API(api, message_loc, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm): # Save the API keys save_changes_to_API_keys(cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key,azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm) with st.spinner('Performing validation checks...'): if api == 'google_vision': print("*** Google Vision OCR API Key ***") try: demo_config_path = os.path.join(st.session_state.dir_home,'demo','validation_configs','google_vision_ocr_test.yaml') demo_images_path = os.path.join(st.session_state.dir_home, 'demo', 'demo_images') demo_out_path = os.path.join(st.session_state.dir_home, 'demo', 'demo_output','run_name') create_google_ocr_yaml_config(demo_config_path, demo_images_path, demo_out_path) voucher_vision_OCR_test(demo_config_path, st.session_state.dir_home, None, demo_images_path) with message_loc: st.success("Google Vision OCR API Key Valid :white_check_mark:") return True except Exception as e: with message_loc: st.error(f"Google Vision OCR API Key Failed! {e}") return False elif api == 'openai': print("*** OpenAI API Key ***") try: if run_api_tests('openai'): with message_loc: st.success("OpenAI API Key Valid :white_check_mark:") else: with message_loc: st.error("OpenAI API Key Failed:exclamation:") return False except Exception as e: with message_loc: st.error(f"OpenAI API Key Failed:exclamation: {e}") elif api == 'azure_openai': print("*** Azure OpenAI API Key ***") try: if run_api_tests('azure_openai'): with message_loc: st.success("Azure OpenAI API Key Valid :white_check_mark:") else: with message_loc: st.error(f"Azure OpenAI API Key Failed:exclamation:") return False except Exception as e: with message_loc: st.error(f"Azure OpenAI API Key Failed:exclamation: {e}") elif api == 'palm': print("*** Google PaLM 2 API Key ***") try: if run_api_tests('palm'): with message_loc: st.success("Google PaLM 2 API Key Valid :white_check_mark:") else: with message_loc: st.error("Google PaLM 2 API Key Failed:exclamation:") return False except Exception as e: with message_loc: st.error(f"Google PaLM 2 API Key Failed:exclamation: {e}") def save_changes_to_API_keys(cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm): # Update the configuration dictionary with the new values cfg_private['openai']['OPENAI_API_KEY'] = openai_api_key cfg_private['openai_azure']['api_version'] = azure_openai_api_version cfg_private['openai_azure']['openai_api_key'] = azure_openai_api_key cfg_private['openai_azure']['openai_api_base'] = azure_openai_api_base cfg_private['openai_azure']['openai_organization'] = azure_openai_organization cfg_private['openai_azure']['openai_api_type'] = azure_openai_api_type cfg_private['google_cloud']['path_json_file'] = google_vision cfg_private['google_palm']['google_palm_api'] = google_palm # Call the function to write the updated configuration to the YAML file write_config_file(cfg_private, st.session_state.dir_home, filename="PRIVATE_DATA.yaml") st.session_state.private_file = does_private_file_exist() # Function to load a YAML file and update session_state def load_prompt_yaml(filename): with open(filename, 'r') as file: st.session_state['prompt_info'] = yaml.safe_load(file) st.session_state['prompt_author'] = st.session_state['prompt_info'].get('prompt_author', st.session_state['default_prompt_author']) st.session_state['prompt_author_institution'] = st.session_state['prompt_info'].get('prompt_author_institution', st.session_state['default_prompt_author_institution']) st.session_state['prompt_description'] = st.session_state['prompt_info'].get('prompt_description', st.session_state['default_prompt_description']) st.session_state['instructions'] = st.session_state['prompt_info'].get('instructions', st.session_state['default_instructions']) st.session_state['json_formatting_instructions'] = st.session_state['prompt_info'].get('json_formatting_instructions', st.session_state['default_json_formatting_instructions'] ) st.session_state['rules'] = st.session_state['prompt_info'].get('rules', {}) st.session_state['mapping'] = st.session_state['prompt_info'].get('mapping', {}) st.session_state['LLM'] = st.session_state['prompt_info'].get('LLM', 'gpt') # Placeholder: st.session_state['assigned_columns'] = list(chain.from_iterable(st.session_state['mapping'].values())) def save_prompt_yaml(filename): yaml_content = { 'prompt_author': st.session_state['prompt_author'], 'prompt_author_institution': st.session_state['prompt_author_institution'], 'prompt_description': st.session_state['prompt_description'], 'LLM': st.session_state['LLM'], 'instructions': st.session_state['instructions'], 'json_formatting_instructions': st.session_state['json_formatting_instructions'], 'rules': st.session_state['rules'], 'mapping': st.session_state['mapping'], } dir_prompt = os.path.join(st.session_state.dir_home, 'custom_prompts') filepath = os.path.join(dir_prompt, f"{filename}.yaml") with open(filepath, 'w') as file: yaml.safe_dump(dict(yaml_content), file, sort_keys=False) st.success(f"Prompt saved as '{filename}.yaml'.") def check_unique_mapping_assignments(): if len(st.session_state['assigned_columns']) != len(set(st.session_state['assigned_columns'])): st.error("Each column name must be assigned to only one category.") return False else: st.success("Mapping confirmed.") return True def check_prompt_yaml_filename(fname): # Check if the filename only contains letters, numbers, underscores, and dashes pattern = r'^[\w-]+$' # The \w matches any alphanumeric character and is equivalent to the character class [a-zA-Z0-9_]. # The hyphen - is literally matched. if re.match(pattern, fname): return True else: return False def btn_load_prompt(selected_yaml_file, dir_prompt): if selected_yaml_file: yaml_file_path = os.path.join(dir_prompt, selected_yaml_file) load_prompt_yaml(yaml_file_path) elif not selected_yaml_file: # Directly assigning default values since no file is selected st.session_state['prompt_info'] = {} st.session_state['prompt_author'] = st.session_state['default_prompt_author'] st.session_state['prompt_author_institution'] = st.session_state['default_prompt_author_institution'] st.session_state['prompt_description'] = st.session_state['default_prompt_description'] st.session_state['instructions'] = st.session_state['default_instructions'] st.session_state['json_formatting_instructions'] = st.session_state['default_json_formatting_instructions'] st.session_state['rules'] = {} st.session_state['LLM'] = 'gpt' st.session_state['assigned_columns'] = [] st.session_state['prompt_info'] = { 'prompt_author': st.session_state['prompt_author'], 'prompt_author_institution': st.session_state['prompt_author_institution'], 'prompt_description': st.session_state['prompt_description'], 'instructions': st.session_state['instructions'], 'json_formatting_instructions': st.session_state['json_formatting_instructions'], 'rules': st.session_state['rules'], 'mapping': st.session_state['mapping'], 'LLM': st.session_state['LLM'] } def build_LLM_prompt_config(): st.session_state['assigned_columns'] = [] st.session_state['default_prompt_author'] = 'unknown' st.session_state['default_prompt_author_institution'] = 'unknown' st.session_state['default_prompt_description'] = 'unknown' st.session_state['default_instructions'] = """1. Refactor the unstructured OCR text into a dictionary based on the JSON structure outlined below. 2. You should map the unstructured OCR text to the appropriate JSON key and then populate the field based on its rules. 3. Some JSON key fields are permitted to remain empty if the corresponding information is not found in the unstructured OCR text. 4. Ignore any information in the OCR text that doesn't fit into the defined JSON structure. 5. Duplicate dictionary fields are not allowed. 6. Ensure that all JSON keys are in lowercase. 7. Ensure that new JSON field values follow sentence case capitalization. 8. Ensure all key-value pairs in the JSON dictionary strictly adhere to the format and data types specified in the template. 9. Ensure the output JSON string is valid JSON format. It should not have trailing commas or unquoted keys. 10. Only return a JSON dictionary represented as a string. You should not explain your answer.""" st.session_state['default_json_formatting_instructions'] = """The next section of instructions outlines how to format the JSON dictionary. The keys are the same as those of the final formatted JSON object. For each key there is a format requirement that specifies how to transcribe the information for that key. The possible formatting options are: 1. "verbatim transcription" - field is populated with verbatim text from the unformatted OCR. 2. "spell check transcription" - field is populated with spelling corrected text from the unformatted OCR. 3. "boolean yes no" - field is populated with only yes or no. 4. "boolean 1 0" - field is populated with only 1 or 0. 5. "integer" - field is populated with only an integer. 6. "[list]" - field is populated from one of the values in the list. 7. "yyyy-mm-dd" - field is populated with a date in the format year-month-day. The desired null value is also given. Populate the field with the null value of the information for that key is not present in the unformatted OCR text.""" # Start building the Streamlit app col_prompt_main_left, ___, col_prompt_main_right = st.columns([6,1,3]) with col_prompt_main_left: st.title("Custom LLM Prompt Builder") st.subheader('About') st.write("This form allows you to craft a prompt for your specific task.") st.subheader('How it works') st.write("1. Edit this page until you are happy with your instructions. We recommend looking at the basic structure, writing down your prompt inforamtion in a Word document so that it does not randomly disappear, and then copying and pasting that info into this form once your whole prompt structure is defined.") st.write("2. After you enter all of your prompt instructions, click 'Save' and give your file a name.") st.write("3. This file will be saved as a yaml configuration file in the `..VoucherVision/custom_prompts` folder.") st.write("4. When you go back the main VoucherVision page you will now see your custom prompt available in the 'Prompt Version' dropdown menu.") st.write("5. Select your custom prompt. Note, your prompt will only be available for the LLM that you set when filling out the form below.") dir_prompt = os.path.join(st.session_state.dir_home, 'custom_prompts') yaml_files = [f for f in os.listdir(dir_prompt) if f.endswith('.yaml')] col_load_text, col_load_btn = st.columns([8,2]) with col_load_text: # Dropdown for selecting a YAML file selected_yaml_file = st.selectbox('Select a prompt YAML file to load:', [''] + yaml_files) with col_load_btn: st.write('##') # Button to load the selected prompt st.button('Load Prompt', on_click=btn_load_prompt, args=[selected_yaml_file, dir_prompt]) # Prompt Author Information st.header("Prompt Author Information") st.write("We value community contributions! Please provide your name(s) (or pseudonym if you prefer) for credit. If you leave this field blank, it will say 'unknown'.") st.session_state['prompt_author'] = st.text_input("Enter names of prompt author(s)", value=st.session_state['default_prompt_author']) st.write("Please provide your institution name. If you leave this field blank, it will say 'unknown'.") st.session_state['prompt_author_institution'] = st.text_input("Enter name of institution", value=st.session_state['default_prompt_author_institution']) st.write("Please provide a description of your prompt and its intended task. Is it designed for a specific collection? Taxa? Database structure?") st.session_state['prompt_description'] = st.text_input("Enter description of prompt", value=st.session_state['default_prompt_description']) st.write('---') st.header("Set LLM Model Type") # Define the options for the dropdown llm_options = ['gpt', 'palm'] # Create the dropdown and set the value to session_state['LLM'] st.write("Which LLM is this prompt designed for? This will not restrict its use to a specific LLM, but some prompts will behave in different ways across models.") st.write("For example, VoucherVision will automatically add multiple JSON formatting blocks to all PaLM 2 prompts to coax PaLM 2 to return a valid JSON object.") st.session_state['LLM'] = st.selectbox('Set LLM', llm_options, index=llm_options.index(st.session_state.get('LLM', 'gpt'))) st.write('---') # Instructions Section st.header("Instructions") st.write("These are the general instructions that guide the LLM through the transcription task. We recommend using the default instructions unless you have a specific reason to change them.") st.session_state['instructions'] = st.text_area("Enter instructions", value=st.session_state['default_instructions'].strip(), height=350, disabled=True) st.write('---') # Column Instructions Section st.header("JSON Formatting Instructions") st.write("The following section tells the LLM how we want to structure the JSON dictionary. We do not recommend changing this section because it would likely result in unstable and inconsistent behavior.") st.session_state['json_formatting_instructions'] = st.text_area("Enter column instructions", value=st.session_state['default_json_formatting_instructions'], height=350, disabled=True) st.write('---') col_left, col_right = st.columns([6,4]) with col_left: st.subheader('Add/Edit Columns') # Initialize rules in session state if not already present if 'rules' not in st.session_state or not st.session_state['rules']: st.session_state['rules']['Dictionary'] = { "catalog_number": { "format": "verbatim transcription", "null_value": "", "description": "The barcode identifier, typically a number with at least 6 digits, but fewer than 30 digits." } } st.session_state['rules']['SpeciesName'] = { "taxonomy": ["Genus_species"] } # Layout for adding a new column name # col_text, col_textbtn = st.columns([8, 2]) # with col_text: new_column_name = st.text_input("Enter a new column name:") # with col_textbtn: # st.write('##') if st.button("Add New Column") and new_column_name: if new_column_name not in st.session_state['rules']['Dictionary']: st.session_state['rules']['Dictionary'][new_column_name] = {"format": "", "null_value": "", "description": ""} st.success(f"New column '{new_column_name}' added. Now you can edit its properties.") else: st.error("Column name already exists. Please enter a unique column name.") # Get columns excluding the protected "catalog_number" st.write('#') editable_columns = [col for col in st.session_state['rules']['Dictionary'] if col != "catalog_number"] column_name = st.selectbox("Select a column to edit:", [""] + editable_columns) # Handle rules editing current_rule = st.session_state['rules']['Dictionary'].get(column_name, { "format": "", "null_value": "", "description": "" }) if 'selected_column' not in st.session_state: st.session_state['selected_column'] = column_name # Form for input fields with st.form(key='rule_form'): format_options = ["verbatim transcription", "spell check transcription", "boolean yes no", "boolean 1 0", "integer", "[list]", "yyyy-mm-dd"] current_rule["format"] = st.selectbox("Format:", format_options, index=format_options.index(current_rule["format"]) if current_rule["format"] else 0) current_rule["null_value"] = st.text_input("Null value:", value=current_rule["null_value"]) current_rule["description"] = st.text_area("Description:", value=current_rule["description"]) commit_button = st.form_submit_button("Commit Column") default_rule = { "format": format_options[0], # default format "null_value": "", # default null value "description": "", # default description } if st.session_state['selected_column'] != column_name: # Column has changed. Update the session_state selected column. st.session_state['selected_column'] = column_name # Reset the current rule to the default for this new column, or a blank rule if not set. current_rule = st.session_state['rules']['Dictionary'].get(column_name, default_rule.copy()) # Handle commit action if commit_button and column_name: # Commit the rules to the session state. st.session_state['rules']['Dictionary'][column_name] = current_rule.copy() st.success(f"Column '{column_name}' added/updated in rules.") # Force the form to reset by clearing the fields from the session state st.session_state.pop('selected_column', None) # Clear the selected column to force reset # st.session_state['rules'][column_name] = current_rule # st.success(f"Column '{column_name}' added/updated in rules.") # # Reset current_rule to default values for the next input # current_rule["format"] = default_rule["format"] # current_rule["null_value"] = default_rule["null_value"] # current_rule["description"] = default_rule["description"] # # To ensure that the form fields are reset, we can clear them from the session state # for key in current_rule.keys(): # st.session_state[key] = default_rule[key] # Layout for removing an existing column # del_col, del_colbtn = st.columns([8, 2]) # with del_col: delete_column_name = st.selectbox("Select a column to delete:", [""] + editable_columns, key='delete_column') # with del_colbtn: # st.write('##') if st.button("Delete Column") and delete_column_name: del st.session_state['rules'][delete_column_name] st.success(f"Column '{delete_column_name}' removed from rules.") with col_right: # Display the current state of the JSON rules st.subheader('Formatted Columns') st.json(st.session_state['rules']['Dictionary']) # st.subheader('All Prompt Info') # st.json(st.session_state['prompt_info']) st.write('---') col_left_mapping, col_right_mapping = st.columns([6,4]) with col_left_mapping: st.header("Mapping") st.write("Assign each column name to a single category.") st.session_state['refresh_mapping'] = False # Dynamically create a list of all column names that can be assigned # This assumes that the column names are the keys in the dictionary under 'rules' all_column_names = list(st.session_state['rules']['Dictionary'].keys()) categories = ['TAXONOMY', 'GEOGRAPHY', 'LOCALITY', 'COLLECTING', 'MISCELLANEOUS'] if ('mapping' not in st.session_state) or (st.session_state['mapping'] == {}): st.session_state['mapping'] = {category: [] for category in categories} for category in categories: # Filter out the already assigned columns available_columns = [col for col in all_column_names if col not in st.session_state['assigned_columns'] or col in st.session_state['mapping'].get(category, [])] # Ensure the current mapping is a subset of the available options current_mapping = [col for col in st.session_state['mapping'].get(category, []) if col in available_columns] # Provide a safe default if the current mapping is empty or contains invalid options safe_default = current_mapping if all(col in available_columns for col in current_mapping) else [] # Create a multi-select widget for the category with a safe default selected_columns = st.multiselect( f"Select columns for {category}:", available_columns, default=safe_default, key=f"mapping_{category}" ) # Update the assigned_columns based on the selections for col in current_mapping: if col not in selected_columns and col in st.session_state['assigned_columns']: st.session_state['assigned_columns'].remove(col) st.session_state['refresh_mapping'] = True for col in selected_columns: if col not in st.session_state['assigned_columns']: st.session_state['assigned_columns'].append(col) st.session_state['refresh_mapping'] = True # Update the mapping in session state when there's a change st.session_state['mapping'][category] = selected_columns if st.session_state['refresh_mapping']: st.session_state['refresh_mapping'] = False # Button to confirm and save the mapping configuration if st.button('Confirm Mapping'): if check_unique_mapping_assignments(): # Proceed with further actions since the mapping is confirmed and unique pass with col_right_mapping: # Display the current state of the JSON rules st.subheader('Formatted Column Maps') st.json(st.session_state['mapping']) col_left_save, col_right_save = st.columns([6,4]) with col_left_save: # Input for new file name new_filename = st.text_input("Enter filename to save your prompt as a configuration YAML:",placeholder='my_prompt_name') # Button to save the new YAML file if st.button('Save YAML', type='primary'): if new_filename: if check_unique_mapping_assignments(): if check_prompt_yaml_filename(new_filename): save_prompt_yaml(new_filename) else: st.error("File name can only contain letters, numbers, underscores, and dashes. Cannot contain spaces.") else: st.error("Mapping contains an error. Make sure that each column is assigned to only ***one*** category.") else: st.error("Please enter a filename.") if st.button('Exit'): st.session_state.proceed_to_build_llm_prompt = False st.session_state.proceed_to_main = True st.rerun() with col_prompt_main_right: st.subheader('All Prompt Components') st.session_state['prompt_info'] = { 'prompt_author': st.session_state['prompt_author'], 'prompt_author_institution': st.session_state['prompt_author_institution'], 'prompt_description': st.session_state['prompt_description'], 'LLM': st.session_state['LLM'], 'instructions': st.session_state['instructions'], 'json_formatting_instructions': st.session_state['json_formatting_instructions'], 'rules': st.session_state['rules'], 'mapping': st.session_state['mapping'], } st.json(st.session_state['prompt_info']) def show_header_welcome(): st.session_state.logo_path = os.path.join(st.session_state.dir_home, 'img','logo.png') st.session_state.logo = Image.open(st.session_state.logo_path) st.image(st.session_state.logo, width=250) def determine_n_images(): try: # Check if 'dir_uploaded_images' key exists and it is not empty if 'dir_uploaded_images' in st and st['dir_uploaded_images']: dir_path = st['dir_uploaded_images'] # This would be the path to the directory return len([f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))]) else: return None except: return None def content_header(): col_run_1, col_run_2, col_run_3 = st.columns([4,4,2]) col_test = st.container() st.write("") st.write("") st.write("") st.write("") st.subheader("Overall Progress") col_run_info_1 = st.columns([1])[0] st.write("") st.write("") st.write("") st.write("") st.header("Configuration Settings") with col_run_info_1: # Progress # Progress # st.subheader('Project') # bar = st.progress(0) # new_text = st.empty() # Placeholder for current step name # progress_report = ProgressReportVV(bar, new_text, n_images=10) # Progress overall_progress_bar = st.progress(0) text_overall = st.empty() # Placeholder for current step name st.subheader('Transcription Progress') batch_progress_bar = st.progress(0) text_batch = st.empty() # Placeholder for current step name progress_report = ProgressReport(overall_progress_bar, batch_progress_bar, text_overall, text_batch) st.info("***Note:*** There is a known bug with tabs in Streamlit. If you update an input field it may take you back to the 'Project Settings' tab. Changes that you made are saved, it's just an annoying glitch. We are aware of this issue and will fix it as soon as we can.") st.write("If you use VoucherVision frequently, you can change the default values that are auto-populated in the form below. In a text editor or IDE, edit the first few rows in the file `../VoucherVision/vouchervision/VoucherVision_Config_Builder.py`") with col_run_1: show_header_welcome() st.subheader('Run VoucherVision') N_STEPS = 6 if determine_n_images(): st.session_state['processing_add_on'] = f" {determine_n_images()} Images" else: st.session_state['processing_add_on'] = '' if check_if_usable(): if st.button(f"Start Processing{st.session_state['processing_add_on']}", type='primary'): # Define number of overall steps progress_report.set_n_overall(N_STEPS) progress_report.update_overall(f"Starting VoucherVision...") # First, write the config file. write_config_file(st.session_state.config, st.session_state.dir_home, filename="VoucherVision.yaml") path_custom_prompts = os.path.join(st.session_state.dir_home,'custom_prompts',st.session_state.config['leafmachine']['project']['prompt_version']) # Call the machine function. last_JSON_response, total_cost = voucher_vision(None, st.session_state.dir_home, path_custom_prompts, None, progress_report,path_api_cost=os.path.join(st.session_state.dir_home,'api_cost','api_cost.yaml'), is_real_run=True) if total_cost: st.success(f":money_with_wings: This run cost :heavy_dollar_sign:{total_cost:.4f}") # Format the JSON string for display. if last_JSON_response is None: st.markdown(f"Last JSON object in the batch: NONE") else: try: formatted_json = json.dumps(json.loads(last_JSON_response), indent=4, sort_keys=False) except: formatted_json = json.dumps(last_JSON_response, indent=4, sort_keys=False) st.markdown(f"Last JSON object in the batch:\n```\n{formatted_json}\n```") st.balloons() else: st.button("Start Processing", type='primary', disabled=True) st.error(":heavy_exclamation_mark: Required API keys not set. Please visit the 'API Keys' tab and set the Google Vision OCR API key and at least one LLM key.") st.button("Refresh", on_click=refresh) with col_run_2: if st.button("Test GPT"): progress_report.set_n_overall(TestOptionsGPT.get_length()) test_results, JSON_results = run_demo_tests_GPT(progress_report) with col_test: display_test_results(test_results, JSON_results, 'gpt') st.balloons() if st.button("Test PaLM2"): progress_report.set_n_overall(TestOptionsPalm.get_length()) test_results, JSON_results = run_demo_tests_Palm(progress_report) with col_test: display_test_results(test_results, JSON_results, 'palm') st.balloons() with col_run_3: st.subheader('Check GPU') if st.button("GPU"): success, info = test_GPU() if success: st.balloons() for message in info: st.success(message) else: for message in info: st.error(message) def content_tab_settings(): st.header('Project') col_project_1, col_project_2 = st.columns([4,2]) st.write("---") st.header('Input Images') col_local_1, col_local_2 = st.columns([4,2]) st.write("---") st.header('LeafMachine2 Label Collage') col_cropped_1, col_cropped_2 = st.columns([4,4]) st.write("---") st.header('OCR Overlay Image') col_ocr_1, col_ocr_2 = st.columns([4,4]) os.path.join(st.session_state.dir_home, ) ### Project with col_project_1: st.session_state.config['leafmachine']['project']['run_name'] = st.text_input("Run name", st.session_state.config['leafmachine']['project'].get('run_name', '')) st.session_state.config['leafmachine']['project']['dir_output'] = st.text_input("Output directory", st.session_state.config['leafmachine']['project'].get('dir_output', '')) ### Input Images Local with col_local_1: st.session_state.config['leafmachine']['project']['dir_images_local'] = st.text_input("Input images directory", st.session_state.config['leafmachine']['project'].get('dir_images_local', '')) st.session_state.config['leafmachine']['project']['continue_run_from_partial_xlsx'] = st.text_input("Continue run from partially completed project XLSX", st.session_state.config['leafmachine']['project'].get('continue_run_from_partial_xlsx', ''), disabled=True) st.write("---") st.subheader('LLM Version') st.markdown( """ ***Note:*** GPT-4 is 20x more expensive than GPT-3.5 """ ) st.session_state.config['leafmachine']['LLM_version'] = st.selectbox("LLM version", ["gpt-4-1106-preview", "GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5", "PaLM 2"], index=["gpt-4-1106-preview", "GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5", "PaLM 2"].index(st.session_state.config['leafmachine'].get('LLM_version', 'Azure GPT 4'))) st.write("---") st.subheader('Prompt Version') versions, default_version = get_prompt_versions(st.session_state.config['leafmachine']['LLM_version']) if versions: selected_version = st.session_state.config['leafmachine']['project'].get('prompt_version', default_version) if selected_version not in versions: selected_version = default_version st.session_state.config['leafmachine']['project']['prompt_version'] = st.selectbox("Prompt Version", versions, index=versions.index(selected_version)) with col_cropped_1: default_crops = st.session_state.config['leafmachine']['cropped_components'].get('save_cropped_annotations', ['leaf_whole']) st.write("Prior to transcription, use LeafMachine2 to crop all labels from input images to create label collages for each specimen image. (Requires GPU)") st.session_state.config['leafmachine']['use_RGB_label_images'] = st.checkbox("Use LeafMachine2 label collage for transcriptions", st.session_state.config['leafmachine'].get('use_RGB_label_images', False)) st.session_state.config['leafmachine']['cropped_components']['save_cropped_annotations'] = st.multiselect("Components to crop", ['ruler', 'barcode','label', 'colorcard','map','envelope','photo','attached_item','weights', 'leaf_whole', 'leaf_partial', 'leaflet', 'seed_fruit_one', 'seed_fruit_many', 'flower_one', 'flower_many', 'bud','specimen','roots','wood'],default=default_crops) with col_cropped_2: ba = os.path.join(st.session_state.dir_home,'demo', 'ba','ba2.png') image = Image.open(ba) st.image(image, caption='LeafMachine2 Collage', output_format = "PNG") with col_ocr_1: st.write('This will plot bounding boxes around all text that Google Vision was able to detect. If there are no boxes around text, then the OCR failed, so that missing text will not be seen by the LLM when it is creating the JSON object. The created image will be viewable in the VoucherVisionEditor.') st.session_state.config['leafmachine']['do_create_OCR_helper_image'] = st.checkbox("Create image showing an overlay of the OCR detections", st.session_state.config['leafmachine'].get('do_create_OCR_helper_image', False)) with col_ocr_2: ocr = os.path.join(st.session_state.dir_home,'demo', 'ba','ocr.png') image_ocr = Image.open(ocr) st.image(image_ocr, caption='OCR Overlay Images', output_format = "PNG") def content_tab_component(): st.header('Archival Components') ACD_version = st.selectbox("Archival Component Detector (ACD) Version", ["Version 2.1", "Version 2.2"]) ACD_confidence_default = int(st.session_state.config['leafmachine']['archival_component_detector']['minimum_confidence_threshold'] * 100) ACD_confidence = st.number_input("ACD Confidence Threshold (%)", min_value=0, max_value=100,value=ACD_confidence_default) st.session_state.config['leafmachine']['archival_component_detector']['minimum_confidence_threshold'] = float(ACD_confidence/100) st.session_state.config['leafmachine']['archival_component_detector']['do_save_prediction_overlay_images'] = st.checkbox("Save Archival Prediction Overlay Images", st.session_state.config['leafmachine']['archival_component_detector'].get('do_save_prediction_overlay_images', True)) st.session_state.config['leafmachine']['archival_component_detector']['ignore_objects_for_overlay'] = st.multiselect("Hide Archival Components in Prediction Overlay Images", ['ruler', 'barcode','label', 'colorcard','map','envelope','photo','attached_item','weights',], default=[]) # Depending on the selected version, set the configuration if ACD_version == "Version 2.1": st.session_state.config['leafmachine']['archival_component_detector']['detector_type'] = 'Archival_Detector' st.session_state.config['leafmachine']['archival_component_detector']['detector_version'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_iteration'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_weights'] = 'best.pt' elif ACD_version == "Version 2.2": #TODO update this to version 2.2 st.session_state.config['leafmachine']['archival_component_detector']['detector_type'] = 'Archival_Detector' st.session_state.config['leafmachine']['archival_component_detector']['detector_version'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_iteration'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_weights'] = 'best.pt' def content_tab_processing(): st.header('Processing Options') col_processing_1, col_processing_2 = st.columns([2,2,]) with col_processing_1: st.subheader('Compute Options') st.session_state.config['leafmachine']['project']['num_workers'] = st.number_input("Number of CPU workers", value=st.session_state.config['leafmachine']['project'].get('num_workers', 1), disabled=True) st.session_state.config['leafmachine']['project']['batch_size'] = st.number_input("Batch size", value=st.session_state.config['leafmachine']['project'].get('batch_size', 500), help='Sets the batch size for the LeafMachine2 cropping. If computer RAM is filled, lower this value to ~100.') with col_processing_2: st.subheader('Misc') st.session_state.config['leafmachine']['project']['prefix_removal'] = st.text_input("Remove prefix from catalog number", st.session_state.config['leafmachine']['project'].get('prefix_removal', '')) st.session_state.config['leafmachine']['project']['suffix_removal'] = st.text_input("Remove suffix from catalog number", st.session_state.config['leafmachine']['project'].get('suffix_removal', '')) st.session_state.config['leafmachine']['project']['catalog_numerical_only'] = st.checkbox("Require 'Catalog Number' to be numerical only", st.session_state.config['leafmachine']['project'].get('catalog_numerical_only', True)) ### Logging and Image Validation - col_v1 st.header('Logging and Image Validation') col_v1, col_v2 = st.columns(2) with col_v1: st.session_state.config['leafmachine']['do']['check_for_illegal_filenames'] = st.checkbox("Check for illegal filenames", st.session_state.config['leafmachine']['do'].get('check_for_illegal_filenames', True)) st.session_state.config['leafmachine']['do']['check_for_corrupt_images_make_vertical'] = st.checkbox("Check for corrupt images", st.session_state.config['leafmachine']['do'].get('check_for_corrupt_images_make_vertical', True)) st.session_state.config['leafmachine']['print']['verbose'] = st.checkbox("Print verbose", st.session_state.config['leafmachine']['print'].get('verbose', True)) st.session_state.config['leafmachine']['print']['optional_warnings'] = st.checkbox("Show optional warnings", st.session_state.config['leafmachine']['print'].get('optional_warnings', True)) with col_v2: log_level = st.session_state.config['leafmachine']['logging'].get('log_level', None) log_level_display = log_level if log_level is not None else 'default' selected_log_level = st.selectbox("Logging Level", ['default', 'DEBUG', 'INFO', 'WARNING', 'ERROR'], index=['default', 'DEBUG', 'INFO', 'WARNING', 'ERROR'].index(log_level_display)) if selected_log_level == 'default': st.session_state.config['leafmachine']['logging']['log_level'] = None else: st.session_state.config['leafmachine']['logging']['log_level'] = selected_log_level def content_tab_domain(): st.header('Embeddings Database') col_emb_1, col_emb_2 = st.columns([4,2]) with col_emb_1: st.markdown( """ VoucherVision includes the option of using domain knowledge inside of the dynamically generated prompts. The OCR text is queried against a database of existing label transcriptions. The most similar existing transcriptions act as an example of what the LLM should emulate and are shown to the LLM as JSON objects. VoucherVision uses cosine similarity search to return the most similar existing transcription. - Note: Using domain knowledge may increase the chance that foreign text is included in the final transcription - Disabling this feature will show the LLM multiple examples of an empty JSON skeleton structure instead - Enabling this option requires a GPU with at least 8GB of VRAM - The domain knowledge files can be located in the directory "../VoucherVision/domain_knowledge". On first run the embeddings database must be created, which takes time. If the database creation runs each time you use VoucherVision, then something is wrong. """ ) st.write(f"Domain Knowledge is only available for the following prompts:") for available_prompts in PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE: st.markdown(f"- {available_prompts}") if st.session_state.config['leafmachine']['project']['prompt_version'] in PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE: st.session_state.config['leafmachine']['project']['use_domain_knowledge'] = st.checkbox("Use domain knowledge", True, disabled=True) else: st.session_state.config['leafmachine']['project']['use_domain_knowledge'] = st.checkbox("Use domain knowledge", False, disabled=True) st.write("") if st.session_state.config['leafmachine']['project']['use_domain_knowledge']: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', '')) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False)) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', '')) else: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', ''), disabled=True) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False), disabled=True) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', ''), disabled=True) def render_expense_report_summary(): expense_summary = st.session_state.expense_summary expense_report = st.session_state.expense_report st.header('Expense Report Summary') if expense_summary: st.metric(label="Total Cost", value=f"${round(expense_summary['total_cost_sum'], 4):,}") col1, col2 = st.columns(2) # Run count and total costs with col1: st.metric(label="Run Count", value=expense_summary['run_count']) st.metric(label="Tokens In", value=f"{expense_summary['tokens_in_sum']:,}") # Token information with col2: st.metric(label="Total Images", value=expense_summary['n_images_sum']) st.metric(label="Tokens Out", value=f"{expense_summary['tokens_out_sum']:,}") # Calculate cost proportion per image for each API version st.subheader('Average Cost per Image by API Version') cost_labels = [] cost_values = [] total_images = 0 cost_per_image_dict = {} # Iterate through the expense report to accumulate costs and image counts for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] n_images = row['n_images'] total_images += n_images # Keep track of total images processed if api_version not in cost_per_image_dict: cost_per_image_dict[api_version] = {'total_cost': 0, 'n_images': 0} cost_per_image_dict[api_version]['total_cost'] += total_cost cost_per_image_dict[api_version]['n_images'] += n_images api_versions = list(cost_per_image_dict.keys()) colors = [COLORS_EXPENSE_REPORT[version] if version in COLORS_EXPENSE_REPORT else '#DDDDDD' for version in api_versions] # Calculate the cost per image for each API version for version, cost_data in cost_per_image_dict.items(): total_cost = cost_data['total_cost'] n_images = cost_data['n_images'] # Calculate the cost per image for this version cost_per_image = total_cost / n_images if n_images > 0 else 0 cost_labels.append(version) cost_values.append(cost_per_image) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_values, hole=.3)]) # Update traces for custom text in hoverinfo, displaying cost with a dollar sign and two decimal places cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${value:.2f}" for value in cost_values], # Formats the cost as a string with a dollar sign and two decimals textinfo='percent+label', hoverinfo='label+percent+text' # Adds custom text (formatted cost) to the hover information ) st.plotly_chart(cost_pie_chart, use_container_width=True) st.subheader('Proportion of Total Cost by API Version') cost_labels = [] cost_proportions = [] total_cost_by_version = {} # Sum the total cost for each API version for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] if api_version not in total_cost_by_version: total_cost_by_version[api_version] = 0 total_cost_by_version[api_version] += total_cost # Calculate the combined total cost for all versions combined_total_cost = sum(total_cost_by_version.values()) # Calculate the proportion of total cost for each API version for version, total_cost in total_cost_by_version.items(): proportion = (total_cost / combined_total_cost) * 100 if combined_total_cost > 0 else 0 cost_labels.append(version) cost_proportions.append(proportion) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_proportions, hole=.3)]) # Update traces for custom text in hoverinfo cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${cost:.2f}" for cost in total_cost_by_version.values()], # This will format the cost to 2 decimal places textinfo='percent+label', hoverinfo='label+percent+text' # This tells Plotly to show the label, percent, and custom text (cost) on hover ) st.plotly_chart(cost_pie_chart, use_container_width=True) # API version usage percentages pie chart st.subheader('Runs by API Version') api_versions = list(expense_summary['api_version_percentages'].keys()) percentages = [expense_summary['api_version_percentages'][version] for version in api_versions] pie_chart = go.Figure(data=[go.Pie(labels=api_versions, values=percentages, hole=.3)]) pie_chart.update_layout(margin=dict(t=0, b=0, l=0, r=0)) pie_chart.update_traces(marker=dict(colors=colors),) st.plotly_chart(pie_chart, use_container_width=True) else: st.error('No expense report data available.') def sidebar_content(): if not os.path.exists(os.path.join(st.session_state.dir_home,'expense_report')): validate_dir(os.path.join(st.session_state.dir_home,'expense_report')) expense_report_path = os.path.join(st.session_state.dir_home, 'expense_report', 'expense_report.csv') if os.path.exists(expense_report_path): # File exists, proceed with summarization st.session_state.expense_summary, st.session_state.expense_report = summarize_expense_report(expense_report_path) render_expense_report_summary() else: # File does not exist, handle this case appropriately # For example, you could set the session state variables to None or an empty value st.session_state.expense_summary, st.session_state.expense_report = None, None st.header('Expense Report Summary') st.write('Available after first run...') def main(): with st.sidebar: sidebar_content() # Main App content_header() tab_settings, tab_prompt, tab_domain, tab_component, tab_processing, tab_private, tab_delete = st.tabs(["Project Settings", "Prompt Builder", "Domain Knowledge","Component Detector", "Processing Options", "API Keys", "Space-Saver"]) with tab_settings: content_tab_settings() with tab_prompt: if st.button("Build Custom LLM Prompt"): st.session_state.proceed_to_build_llm_prompt = True st.rerun() with tab_component: content_tab_component() with tab_domain: content_tab_domain() with tab_processing: content_tab_processing() with tab_private: if st.button("Edit API Keys"): st.session_state.proceed_to_private = True st.rerun() with tab_delete: create_space_saver() st.set_page_config(layout="wide", page_icon='img/icon.ico', page_title='VoucherVision') # Default YAML file path if 'config' not in st.session_state:
st.session_state.config, st.session_state.dir_home = build_VV_config()
1
2023-10-30 23:25:20+00:00
16k
medsagou/massar-direction-sagoubot
main.py
[ { "identifier": "C_File", "path": "utilities/Class_Files.py", "snippet": "class C_File():\n #____________________________________________________________________________________________________________________________________________________________\n # Le constructeur d'une instance d'un fichier\n # Ce constructeur permet d'attribuer à une instance de fichier son nom (vide par défaut) \n # Ce constructeur permet de spécifier le séparateur des éléments s'il existe (également vide par défauté)su\n # Un séparateur peut être un \";\", une \",\" un \"#', etc. \n def __init__(self,file_name=\"\",sep=\";\", sep2=\"+\"):\n self.nomFichier=file_name\n self.separateur=sep\n self.separateur2=sep2\n \n #____________________________________________________________________________________________________________________________________________________________\n # Vérifie si un fichier exite ou non.\n def existe_fichier(self):\n if os.path.isfile(self.nomFichier):\n return True\n else:\n return False\n #____________________________________________________________________________________________________________________________________________________________\n # Vérifie si un fichier exite ou non.\n def specifier_Nom_fichier(self):\n while True:\n print(\"\\n\")\n print(\"Instanciation et saisie d'un nouveau fichier de travail :\\n\")\n self.nomFichier=input(\"Entrez le chemin de votre fichier : \"+\"\\n\")\n if self.existe_fichier():\n print(\"le fichier spécifié existe déjà dans le répertoire courant, veuillez recommencer\")\n else:\n break \n #____________________________________________________________________________________________________________________________________________________________\n # Créer un fichier vide sans supprimer le fichier de même nom s'il existe\n def create_file(self):\n f = open(self.nomFichier,\"x\") #Création d'un fichier vide. Ici, le fichier n'est pas écrasé contrairement au mode 'w' \n f.close()\n \n #____________________________________________________________________________________________________________________________________________________________\n # Créer un fichier vide avec suppression du fichier de même nom s'il existe\n def create_file_2(self):\n f = open(self.nomFichier,\"w\") #Création d'un fichier vide. Ici, le fichier existant qui porte le même nom est écrasé contrairement mode 'x' \n f.close()\n \n #____________________________________________________________________________________________________________________________________________________________\n # Créer un fichier vide avec possibilité de dialogue avant de supprimer un fichier de même nom s'il existe dans le même répertoire (dossier)\n def creer_fichier_3(self):\n if os.path.exists(self.nomFichier): # Condition pour vérifier si jamais le fichier à créer existe déjà dans le répertoire courant\n print(\"Il existe un fichier qui porte le même nom\"+\"\\n\")\n print(\"Voulez-vous l'écraser ?\")\n while True: # Itération (boucle infinie) pour prévenir les événetuelles erreurs de frappe (autre chose que '1' et '2') (Attention, il faut absolument provoquer quelque part dans la boucle une rupture avec \"break\" )\n # Menu local pour exposer les dexu cas de figures (on peut également créer une instance de la classe Menu ici)\n print(\"Veuillez choisir ce qu'il faut faire, selon les options suivantes : \"+\"\\n\")\n print(\"1. Ecraser le fichier existant\")\n print(\"2. Garder le fichier\")\n rep=input(\"Veuillez taper 1 ou 2 \")\n if rep=='1': # Cas où l'utilisateur choisit d'écraser le fichier existant \n self.creer_fichier_2() # Appel à laméthode creer_fichier_2()\n break # rupture de la boucle d'itération => on sort de la boucle infinie while\n elif rep=='2': # Cas où l'utilisateur choisit de ne pas écraser le fichier existant (pas besoin dans ce cas de faire appel à la méthode creer_fichier_1()) \n break # rupture de la boucle d'itération => on sort de la boucle infinie while\n else: # cas où l'utilisateur n'a tapé ni \"1\", ni\"2\"\n print(\"Erreur de frappe\"+\"\\n\")\n else: # cas où le fichier à créer n'existe pas dans le répertoire courant\n self.creer_fichier_1() # Appel à laméthode creer_fichier_1()\n \n #____________________________________________________________________________________________________________________________________________________________\n def ActiverFichier(self,Message):\n print(Message)\n self.specifier_Nom_fichier()\n self.creer_fichier_3() \n \n #____________________________________________________________________________________________________________________________________________________________\n # Supprimer un fichier\n def supprimer_fichier(self):\n if os.path.exists(self.nomFichier): # Condition pour vérifier si jamais le fichier à créer existe déjà dans le répertoire courant\n os.remove(self.nomFichier)\n print(\"Le fichier a été supprimé\")\n else:\n print(\"Le fichier spécifié n'existe pas dans le répertoire courant\")\n\n #____________________________________________________________________________________________________________________________________________________________\n # Ajouter un élément\n def enregistrer_Element(self,Element):\n with open(self.nomFichier,'a') as F: # Ouverture du fichier en mode lecture.\n F.write(Element)\n\n #____________________________________________________________________________________________________________________________________________________________\n # Ajouter un ensemble d'éléments sous forme de liste\n def Liste_to_Fichier(self,Liste): # 'creer_Fichier_Avec_Liste_Elements(self,ListeElements)' Créer d'un fichier à partir d'une liste : chaque élément de la liste représente une ligne du fichier\n with open(self.nomFichier,'w') as F: # Ouverture du fichier en mode écriture : à ce niveau si le fichier existe il va être écrasé\n F.writelines(Liste)\n def Liste_stript(self, L):\n for i in range(len(L)):\n L[i] = L[i].strip()\n return L\n\n def str_to_fichier(self,string):\n with open(self.nomFichier,'a') as F: # Ouverture du fichier en mode écriture : à ce niveau si le fichier existe il va être écrasé\n F.write(string)\n F.write(\"\\n\")\n return\n def str_to_fichier2(self,string):\n with open(self.nomFichier,'w') as F:\n F.write(string)\n F.write(\"\\n\")\n return\n\n def dict_to_file(self, D):\n if type(D) == dict and D != {}:\n with open(self.nomFichier, 'w') as F:\n for c, v in D.items():\n F.write(str(c) + \";\" + str(v))\n F.write(\"\\n\")\n return True\n else:\n print_error(\"WE HAD A PROBLEM WHILE SAVING YOUR DICT\", console=self.console)\n \n def Liste_to_str_to_Fichier(self,Liste_1): \n Liste = self.Liste_to_Str1(Liste_1)\n with open(self.nomFichier,'a') as F: # Ouverture du fichier en mode écriture : à ce niveau si le fichier existe il va être écrasé\n \n F.writelines(Liste) \n F.writelines('\\n')\n #____________________________________________________________________________________________________________________________________________________________\n # Lire le contenu d'un fichier et le retourne en le plaçant dans une liste\n def fichier_to_Liste(self): # extration d'une liste depuis un fichier : chaque ligne du fichier représente un élément de cette liste\n with open(self.nomFichier, 'r') as f: # Ouverture du fichier en mode lecture.\n return f.readlines()\n def Fichier_to_str(self):\n with open (self.nomFichier,'r') as f:\n return f.read()\n\n def supprimer_element(self,element):\n ch = self.Fichier_to_str()\n print(ch)\n chh = ch.replace(element,'')\n print(chh)\n self.str_to_fichier(ch)\n \n #____________________________________________________________________________________________________________________________________________________________\n # Afficher un fichier ligne par ligne\n def afficher_lignes_fichier(self):\n print(\"\\n Affichage des lignes du fichier \\n\")\n with open(self.nomFichier, 'r') as F:\n for ligne in F:\n print (ligne) \n print(\"\\n Fin affichage des lignes du fichier\")\n\n #____________________________________________________________________________________________________________________________________________________________\n # Afficher un fichier ligne par ligne et pour chaque ligne mot par mot\n def afficher_mots_fichier(self):\n i=0 # uttiliser comme un simple compteur pour afficher dans un message afin de le rendre plus explicite\n with open(self.nomFichier, 'r') as F:\n for ligne in F:\n i+=1\n print(\"Affichage des éléments du contenu la ligne : \",i,\"\\n\") # message explicite\n L=C_Liste(ligne.split(self.separateur)) # Création d'une instance de la classe 'C_Liste'\n L.afficher_Liste() # ici on fait appel à la méthode 'afficher_Liste()' de la classe 'C_Liste'\n\n\n def existe_element_fichier(self,Element):\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element in Liste_Lignes_du_Fichier[i]:\n return(True)\n return(False)\n \n \n def existe_element_fichier2(self,element):\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n L=Liste_Lignes_du_Fichier[i].split(self.separateur)\n if element in L:\n return(True)\n return(False)\n \n \n def existe_element_fichier3(self,element):\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n L=Liste_Lignes_du_Fichier[i].split(self.separateur)\n if element in L:\n return(True, Liste_Lignes_du_Fichier[i])\n return(False,False)\n\n \n \n def modifier_element_fichier(self,Element):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n Ligne_Courante=Liste_Lignes_du_Fichier[i] # La variable 'Ligne_Courante' est utilisée pour donner plus de clarté sur le plan pédagogique, on peut à la place utiliser directement directement 'Liste_Lignes_du_Fichier[i]'\n Liste_Elements_Ligne_Courante=self.Str_to_List(Ligne_Courante) # Ici on transforme la chaîne de caractère 'Ligne_Courante' en une liste 'Liste_Elements_Ligne_Courante' \n if Element not in Liste_Elements_Ligne_Courante:\n Nouvelle_Liste=Nouvelle_Liste+[Ligne_Courante+'\\n']\n else:\n Nouvelle_Liste=C_Liste(Liste_Elements_Ligne_Courante) # Nouvelle_Liste est une instance de la classe C_Liste\n Nouvelle_Liste_Elements=Nouvelle_Liste.changer_element(Element)\n Nouvelle_Ligne_Modifiee=self.Liste_to_Str(Nouvelle_Liste_Elements)\n Nouvelle_Liste=Nouvelle_Liste+[Nouvelle_Ligne_Modifiee+'\\n'] \n self.Liste_to_Fichier(Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n \n def ajouter_a_la_fin_de_la_ligne(self,ID,Element,sep):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n Ligne_Courante=Liste_Lignes_du_Fichier[i] # La variable 'Ligne_Courante' est utilisée pour donner plus de clarté sur le plan pédagogique, on peut à la place utiliser directement directement 'Liste_Lignes_du_Fichier[i]'\n Liste_Elements_Ligne_Courante=self.str_to_liste(Ligne_Courante) # Ici on transforme la chaîne de caractère 'Ligne_Courante' en une liste 'Liste_Elements_Ligne_Courante' \n if ID not in Liste_Elements_Ligne_Courante:\n Nouvelle_Liste=Nouvelle_Liste+[Ligne_Courante+'\\n']\n else:\n Liste_Elements_Ligne_Courante[-1] = Liste_Elements_Ligne_Courante[-1].replace('\\n','') +sep+ str(Element)\n \n Nouvelle_Liste_Elements=Liste_Elements_Ligne_Courante\n Nouvelle_Ligne_Modifiee=self.Liste_to_Str1(Nouvelle_Liste_Elements)\n Nouvelle_Liste=Nouvelle_Liste+[Nouvelle_Ligne_Modifiee+'\\n'] \n self.Liste_to_Fichier(Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n \n \n def Liste_to_Str1(self,Liste_Elements):\n return self.separateur.join(map(str, Liste_Elements))\n \n def Liste_to_Str2(self,Liste_Elements):\n return self.separateur2.join(Liste_Elements)\n \n def supprimer_element_fichier(self,Element):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n# erreur d'écriture Liste_Lignes_du_Fichier=Fichier_to_Liste(self) # extraire_liste(nomFichier)\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element not in Liste_Lignes_du_Fichier[i]:\n Nouvelle_Liste=Nouvelle_Liste+[Liste_Lignes_du_Fichier[i]+'\\n']\n# écriture erronée Liste_to_Fichier(self.nomFichier,Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n self.Liste_to_Fichier(Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n \n def supprimer_ligne_fichier(self,Element_ligne):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n# erreur d'écriture Liste_Lignes_du_Fichier=Fichier_to_Liste(self) # extraire_liste(nomFichier)\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element_ligne not in Liste_Lignes_du_Fichier[i]:\n Nouvelle_Liste=Nouvelle_Liste+[Liste_Lignes_du_Fichier[i]]\n else:\n continue\n# écriture erronée Liste_to_Fichier(self.nomFichier,Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n self.Liste_to_Fichier(Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n \n def supprimer_ligne_fichier2(self,Element_ligne):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n# erreur d'écriture Liste_Lignes_du_Fichier=Fichier_to_Liste(self) # extraire_liste(nomFichier)\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element_ligne+\"\\n\" not in Liste_Lignes_du_Fichier[i].split(self.separateur)[-1].split(self.separateur2) and Element_ligne not in Liste_Lignes_du_Fichier[i].split(self.separateur)[-1].split(self.separateur2):\n Nouvelle_Liste=Nouvelle_Liste+[Liste_Lignes_du_Fichier[i]]\n else:\n continue\n# écriture erronée Liste_to_Fichier(self.nomFichier,Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n self.Liste_to_Fichier(Nouvelle_Liste) #\n \n def modiffier_ligne(self,Element_ligne,nv_ligne):\n Nouvelle_Liste=[] \n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() \n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element_ligne not in Liste_Lignes_du_Fichier[i]:\n Nouvelle_Liste=Nouvelle_Liste+[Liste_Lignes_du_Fichier[i]]\n else:\n Nouvelle_Liste = Nouvelle_Liste+[nv_ligne + '\\n']\n self.Liste_to_Fichier(Nouvelle_Liste) #\n return\n \n \n\n def str_to_liste(self, string):\n return string.split(self.separateur)\n \n \n def nbre_ligne(self):\n return len(self.Fichier_to_Liste())\n \n\n def str_to_liste2(self, string):\n return string.split(self.separateur2)" }, { "identifier": "C_Dossier", "path": "utilities/Class_Files.py", "snippet": "class C_Dossier():\n\n \n def __init__(self,sep=\"\"):\n self.separateur=sep\n \n def dossier_courant(self):\n return os.getcwd()\n\n def existe_dossier(self,Chemin):\n if os.path.exists(Chemin) :\n return True\n else:\n return False \n \n def changer_dossier(self,Chemin):\n if C_Dossier.existe_dossier(Chemin):\n return(chdir(Chemin))\n \n \n def creer_dossier(self,Chemin):\n if not C_Dossier.existe_dossier(Chemin):\n return(mkdir(Chemin))" }, { "identifier": "Read_Db", "path": "absence_app/Read_XLSB_File.py", "snippet": "class Read_Db:\n def __init__(self, input_file = r\"data_to_manage/file_data.xls\", template_file = \"data_to_manage/template.xlsx\", output_file = \"data_to_manage/absence.xlsx\", df = \"\", required_classes=[], progress_bar=\"\", console=\"\"):\n self.index = {0: \"CLASS_StudentIndex\",\n 1: \"Niveau\",\n 2: \"class_name\",\n 3: \"student_index\",\n \"Unnamed: 23\": \"CNE\",\n \"Unnamed: 12\": \"nom\",\n \"Unnamed: 16\": \"prenom\"}\n self.input_file = input_file\n self.output_file = output_file\n self.template_file = template_file\n self.df = df\n self.init_cell = [\"A\"]\n self.start_col = 'A'\n self.end_col = 'C'\n # self.workbook_output = self.get_workbook(output_file)\n self.workbook_output = \"\"\n self.required_classes = required_classes\n self.progress_bar = progress_bar\n self.console = console\n\n def get_key(self, val):\n for key, value in self.index.items():\n if val == value:\n return key\n return \"key doesn't exist\"\n\n def get_data_from_xlsb(self):\n xlsb_file = pd.ExcelFile(self.input_file)\n df = xlsb_file.parse('Feuil3', header=None) #\n self.df = df\n return df\n def get_df_from_xls(self):\n xls = pd.ExcelFile(self.input_file)\n workbook = self.get_data_from_xls()\n sheet_names = xls.sheet_names\n data = {}\n for sheet_name in sheet_names:\n sheet = workbook[sheet_name]\n df = pd.read_excel(self.input_file, sheet_name=sheet_name)\n class_name = sheet.cell_value(10, 8)\n data[class_name] = df\n self.df = data\n return data\n\n def get_data_from_xls(self): # new data function\n return xlrd.open_workbook(self.input_file)\n def get_classes_name_from_xls(self):\n workbook = self.get_data_from_xls()\n classes = []\n sheet_names = workbook.sheet_names()\n for sheet_name in sheet_names:\n sheet = workbook[sheet_name]\n class_name = sheet.cell_value(10, 8)\n # print(class_name)\n classes.append(class_name)\n return classes\n\n def get_workbook(self, file_name):\n workbook = openpyxl.load_workbook(file_name)\n return workbook\n\n\n def get_workbook_sheet(self, workbook ,sheet):\n return workbook[sheet]\n\n def add_value_to_sheet(self, worksheet, cell, value):\n cell_to_update = worksheet[cell]\n cell_to_update.value = value\n return\n\n\n def create_copy_sheet(self, class_name = \"\", workbook = \"\", source_sheet = \"\"):\n new_sheet = workbook.copy_worksheet(source_sheet)\n new_sheet.title = class_name\n new_sheet.sheet_view.rightToLeft = True\n return\n\n\n def get_column_list_from_df(self, column_key):\n if self.df == \"\":\n self.get_df_from_xls()\n\n L = list(set(self.df.values[:, column_key].tolist()))\n try:\n L.remove(\"0\")\n except ValueError:\n pass\n try:\n L.remove(0)\n except ValueError:\n pass\n return L\n def restart_workbook_output(self):\n self.workbook_output.close()\n self.workbook_output = self.get_workbook(self.output_file)\n return\n def get_sheet_names_workbout_output(self):\n self.workbook_output = self.get_workbook(self.output_file)\n return self.workbook_output.sheetnames\n\n\n\n\n def create_all_class_sheet(self):\n if check_exist_file(self.output_file):\n # class_in_sheet = self.get_sheet_names_workbout_output()\n # with open(self.output_file, 'w') as f:\n # f.close()\n os.remove(self.output_file)\n print_info(\"WE REMOVED THE OUTPUT FILE TO CREATE NEW ONE\", console=self.console)\n # else:\n # class_in_sheet = []\n # classes_list = self.get_column_list_from_df(column_key=self.get_key(\"class_name\"))\n\n workbook = openpyxl.load_workbook(self.template_file)\n source_sheet = workbook[\"BaseSheet\"]\n classes_list = self.get_classes_name_from_xls()\n # print(classes_list)\n for classe in classes_list:\n # if classe in class_in_sheet:\n # print_error(f\"SHEET FOR {classe} ALREADY EXIST\")\n # continue\n # if not in college just skipit\n if classe.split(\"-\")[0][1:] not in self.required_classes:\n continue\n print_info(f\"CREATE A SHEET FOR {classe} CLASS\", console=self.console)\n if classe != \"\":\n self.create_copy_sheet(class_name=classe, workbook=workbook, source_sheet = source_sheet)\n\n workbook.save(str(self.output_file))\n workbook.close()\n return\n\n def fill_all_class_sheets(self):\n self.create_all_class_sheet()\n # already check above\n if str(self.df) == \"\":\n print_info(\"GETTING THE DATA...\", console=self.console)\n self.get_data_from_xls()\n # print_info(\"RESTARTING WORKSHEET\")\n # self.restart_workbook_output()\n self.workbook_output = self.get_workbook(self.output_file)\n class_in_sheet = list(self.get_sheet_names_workbout_output())\n # print(class_in_sheet)\n for k in range(len(class_in_sheet)):\n # print(f\"{k+1}/{len(class_in_sheet)}\")\n self.progress_bar.set((k+1)/len(class_in_sheet))\n worksheet = self.get_workbook_sheet(workbook = self.workbook_output, sheet=class_in_sheet[k])\n i = 0\n print_info(f\"WORKING ON {class_in_sheet[k]} CLASS DATA TO SHEET\", console=self.console)\n # column = db.df[\"3ASCG-5\"].columns.tolist()\n #\n # for index, row in db.df[\"3ASCG-5\"].iterrows():\n # if pd.isna(row[column[23]]):\n # continue\n # print(row[column[23]], row[column[16]], row[column[12]])\n index_student = 0\n self.get_df_from_xls()\n if class_in_sheet[k] == 'BaseSheet':\n continue\n for index, row in self.df[class_in_sheet[k]].iterrows():\n if pd.isna(row[self.get_key(\"CNE\")]):\n continue\n if index_student == 0:\n index_student += 1\n continue\n i += 1\n # print(row)\n for col in range(ord(self.start_col), ord(self.end_col) + 1):\n if chr(col) == \"A\":\n self.add_value_to_sheet(worksheet=worksheet, cell=chr(col) + str(9 + i), value=index_student)\n elif chr(col) == \"B\":\n self.add_value_to_sheet(worksheet=worksheet, cell=chr(col) + str(9 + i), value=row[self.get_key(\"CNE\")])\n elif chr(col) == \"C\":\n self.add_value_to_sheet(worksheet=worksheet, cell=chr(col) + str(9 + i),\n value=str(row[self.get_key(\"prenom\")] + \" \" + str(row[self.get_key(\"nom\")])))\n self.add_value_to_sheet(worksheet=worksheet, cell=\"BA\" + str(9 + i), value=str(row[self.get_key(\"prenom\")] + \" \" + str(row[self.get_key(\"nom\")])))\n if i > 49:\n return\n\n index_student += 1\n\n\n # add number of students\n self.add_value_to_sheet(worksheet=worksheet, cell=\"AO6\", value=str(i))\n # add class name\n self.add_value_to_sheet(worksheet=worksheet, cell=\"D6\", value=class_in_sheet[k])\n self.workbook_output.save(self.output_file)\n # self.workbook_output.close()\n print_success(\"Your lists is generated successfully\", console=self.console)\n print_success(f\"Your file path: {self.output_file}\", console=self.console)\n return" }, { "identifier": "Absence", "path": "absence_app/Absences.py", "snippet": "class Absence:\n def __init__(self, driver=\"\", console=\"\"):\n self.driver = driver\n self.console = console\n self.data_table_Xpath = \"/html/body/div/div[1]/div[2]/div[2]/section[2]/div[2]/div[1]/div/div/div[2]/div/form/div/div/div/div/div/div/div/div[2]/div/table\"\n self.data_table_reduced_Xpath = '//*[@id=\"DataTables-Table-0\"]/tbody'\n self.row_Xpath = '//*[@id=\"DataTables-Table-0\"]/tbody/tr['\n self.nome_Xpath = ']/td[3]'\n self.CNE_Xpath = ']/td[2]'\n self.select_Xpath = ']/td[4]/select'\n self.h_Xpath = ']/td['\n self.dates = \"\"\n self.searchBtn = self.driver.find_element(By.CSS_SELECTOR, \"#search > div > div > div > div.box-body > div.blocBtn > button\")\n self.saveBtnCssSelector = \"#gridFrom > button\"\n\n def get_list_page(self):\n try:\n self.driver.get(\"https://massar.men.gov.ma/Evaluation/Absence/AbsenceJournaliereParClasse\")\n except Exception as e:\n print_error(e, console=self.console)\n print_error(\"We Can't find the list page! Close the program and try again.\", console=self.console)\n else:\n print_info(\"GETTING TO THE LIST PAGE\", console=self.console)\n\n def get_classes_from_classes_page(self):\n return\n\n def main_absence_loop(self):\n TypeEnseignement = self.driver.find_element(By.ID, \"TypeEnseignement\")\n TypeEnseignement_all_options = TypeEnseignement.find_elements(By.TAG_NAME, \"option\")\n TypeEnseignement_Select = Select(TypeEnseignement)\n\n for TypeEnseignement_option in TypeEnseignement_all_options:\n try:\n WebDriverWait(self.driver, 5).until(\n EC.invisibility_of_element_located(\n (\n By.ID, \"loadingDiv\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n print_error(\"CHECK YOUR INTERNET CONNECTION THEN TRY AGAIN\", console=self.console)\n TypeEnseignement_Select.select_by_value(TypeEnseignement_option.get_attribute(\"value\"))\n\n Cycle = self.driver.find_element(By.ID, \"Cycle\")\n Cycle_all_options = Cycle.find_elements(By.TAG_NAME, \"option\")\n\n Cycle_Select = Select(Cycle)\n\n for Cycle_option in Cycle_all_options:\n if Cycle_option.text != \"\":\n Cycle_Select.select_by_value(Cycle_option.get_attribute(\"value\"))\n Niveau = self.driver.find_element(By.ID, \"Niveau\")\n Niveau_all_options = Niveau.find_elements(By.TAG_NAME, \"option\")\n Niveau_Select = Select(Niveau)\n\n for Niveau_option in Niveau_all_options:\n if Niveau_option.text != \"\":\n Niveau_Select.select_by_value(Niveau_option.get_attribute(\"value\"))\n\n Classe = self.driver.find_element(By.ID, \"Classe\")\n Classe_all_options = Classe.find_elements(By.TAG_NAME, \"option\")\n Classe_Select = Select(Classe)\n\n for Classe_option in Classe_all_options:\n\n if Classe_option.text != \"\":\n classe_absence = Scan_Absences(classe=Classe_option.text)\n classe_list_absence, start_date, end_date = classe_absence.get_absence_day_per_student2()\n\n if classe_list_absence == False:\n print_info(f\"THE CLASS {Classe_option.text} NOT IN THE EXCEL FILE\", console=self.console)\n continue\n self.dates = get_date_list(start_date_str=start_date, end_date_str=end_date)\n Classe_Select.select_by_value(Classe_option.get_attribute(\"value\"))\n for l in range(len(self.dates)):\n print_success(f\"WORKING ON CLASS {Classe_option.text}, DATE {self.dates[l]}...\", console=self.console)\n date = self.driver.find_element(By.ID, \"Jour\")\n date.send_keys(Keys.CONTROL + \"a\")\n date.send_keys(Keys.DELETE)\n date.send_keys(self.dates[l])\n try:\n WebDriverWait(self.driver, 15).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, '#search > div > div > div > div.box-body > div.blocBtn > button'))\n )\n except Exception as e:\n print_error(e, console=self.console)\n pass\n else:\n self.searchBtn = self.driver.find_element(By.CSS_SELECTOR, '#search > div > div > div > div.box-body > div.blocBtn > button')\n self.searchBtn.click()\n try:\n WebDriverWait(self.driver, 3).until(\n EC.invisibility_of_element_located(\n (\n By.ID, \"loadingDiv\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n continue\n else:\n print_info(\"FILLING THE ABSENCE...\", console=self.console)\n self.fill_absence(classe_list_absence=classe_list_absence,class_name=Classe_option.text, day_index = l)\n try:\n WebDriverWait(self.driver, 30).until(\n EC.presence_of_element_located((By.CSS_SELECTOR,\"#gridFrom > button\"))\n )\n except Exception as e:\n print_error(e, console=self.console)\n print_error('WE COULD NOT FIND THE SAVE BUTTON ', console=self.console)\n self.driver.quit()\n # sys.exit()\n else:\n try:\n WebDriverWait(self.driver, 15).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"#gridFrom > button\")))\n except Exception as e:\n print_error(e, console=self.console)\n print_error('WE COULD NOT FIND THE SAVE BUTTON', console=self.console)\n else:\n saveBtn = self.driver.find_element(By.CSS_SELECTOR, \"#gridFrom > button\")\n # saveBtn.click()\n self.driver.execute_script(\"arguments[0].click();\", saveBtn)\n\n print_info('SAVE BUTTON IS CLICKED', console=self.console)\n try:\n WebDriverWait(self.driver, 3).until(\n EC.invisibility_of_element_located(\n (\n By.ID, \"loadingDiv\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n pass\n try:\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located(\n (\n By.ID, \"Model_msg_Btn\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n print_error('WE COULD NOT FIND THE CLOSE BUTTON', console=self.console)\n else:\n print_info('CLOSE BUTTON IS CLOSED', console=self.console)\n close_btn = self.driver.find_element(By.ID, \"Model_msg_Btn\")\n close_btn.click()\n try:\n WebDriverWait(self.driver, 3).until(\n EC.invisibility_of_element_located(\n (\n By.ID, \"loadingDiv\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n pass\n\n print_success(f\"CLASS {Classe_option.text} PASSED, DATE {self.dates[l]}\", console=self.console)\n\n return\n\n def fill_absence(self, classe_list_absence, class_name, day_index):\n mytable = self.driver.find_element(By.XPATH, self.data_table_reduced_Xpath)\n i = 0\n for row in mytable.find_elements(By.CSS_SELECTOR, 'tr'):\n i += 1\n cne = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(i) + str(self.CNE_Xpath))\n name = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(i) + str(self.nome_Xpath))\n\n try:\n week_absence_student = classe_list_absence[cne.text]\n week_days_per_student = self.list_week_to_days(week_absence_student)\n except KeyError as e:\n print_error(e, self.console)\n print_error(f'THIS CNE {cne.text} DOES NOT EXIST, THE NAME IS: {name.text}, CLASS: {class_name}', console=self.console)\n else:\n self.fill_absence_per_day(i,week_days_per_student[day_index])\n\n # if classe_name == \"1APIC-1\":\n # time.sleep(400)\n return\n\n def fill_absence_per_day(self,row_i, day):\n j = 0\n if str(day[0]) == \"0\":\n select_cause = Select(self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.select_Xpath)))\n select_cause.select_by_value(\"2\")\n checkbox = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.h_Xpath) + str(5) + \"]/input[1]\")\n checkbox.click()\n return\n elif \"x\" in day:\n try:\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located(\n (\n By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.select_Xpath)\n )\n )\n )\n except Exception as e:\n print_error(e, self.console)\n print_error(\"AN ERROR IN HTML SELECTION PLEASE TRY AGAIN.\", console=self.console)\n self.exit_program()\n select_cause = Select(self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.select_Xpath)))\n select_cause.select_by_value(\"2\")\n for i in range(len(day)):\n if day[i] == None:\n continue\n if str(day[i]) == \"x\":\n # print(day[i])\n if i < 4:\n checkbox = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.h_Xpath) + str(6 + i) + \"]/input[1]\")\n else:\n checkbox = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(\n self.h_Xpath) + str(8 + i) + \"]/input[1]\")\n checkbox.click()\n else:\n print_error('WE CANNOT REGONIZE THE FILL OF THE CELL', console=self.console)\n\n # j += 1\n # date = self.driver.find_element(By.ID, \"Jour\")\n # date.send_keys(Keys.CONTROL + \"a\")\n # date.send_keys(Keys.DELETE)\n # date.send_keys(self.dates[j])\n # self.searchBtn.click()\n\n\n def list_week_to_days(self, list_week):\n index = 0\n week = []\n day = []\n for i in range(2,len(list_week)):\n if index == 8:\n week.append(day)\n day = []\n index = 0\n day.append(list_week[i])\n index += 1\n week.append(day)\n return week\n\n\n def main_list_reader(self):\n self.get_list_page()\n self.list_of_each_class()\n return" } ]
import tkinter as tk import customtkinter import time import os import threading import logging import sys from tkinter import filedialog from PIL import Image from validate_email import validate_email from utilities import C_File, C_Dossier from dotenv import set_key, load_dotenv from absence_app import Read_Db from absence_app import Absence from Interaction_browser import Massar_Direction_Sagou
12,474
self.college_options.get() or self.high_school_options.get()): if self.high_school_options.get(): for option in optionsHighSchool: if option.get(): selected_classes.append((option.cget("text"))) if self.college_options.get(): for option in optionsCollege: if option.get(): selected_classes.append((option.cget("text"))) if len(selected_classes) == 0: self.college_label_error() self.high_school_label_eroor() else: self.selected_classes = selected_classes self.tabview_generate_lists.set("Output Location") L = paths.fichier_to_Liste() L[0] = "DATA" + "=" + self.entry_path.get() + "\n" L[1] = "TEMPLATE" + "=" + self.entry_path2.get() + "\n" paths.Liste_to_Fichier(L) else: if not self.validate_path(self.entry_path): self.label_data_file_error() if not self.validate_path(self.entry_path2): self.label_template_file_error() if self.high_school_options.get(): for option in optionsHighSchool: if option.get(): selected_classes.append((option.cget("text"))) if self.college_options.get(): for option in optionsCollege: if option.get(): selected_classes.append((option.cget("text"))) if len(selected_classes) == 0: self.college_label_error() self.high_school_label_eroor() if tab == "Output Location": if self.validate_dir(self.output_path): self.tabview_generate_lists.set("Review & Submit") L = paths.fichier_to_Liste() L[-1] = "DIR" + "=" + self.output_path.get() paths.Liste_to_Fichier(L) self.label_all_review1 = customtkinter.CTkTextbox(self.tabview_generate_lists.tab("Review & Submit")) self.label_all_review1.grid(row=0, column=0, columnspan=6, sticky="nsew") # self.label_all_review2.insert("1.0", text) text = f"Data file path:" text += " " * (30 - len("Data file path:")) text += str(self.entry_path.get()) + "\n\n" self.label_all_review1.insert("end", text) text = "Template file path:" text += " " * (30 - len("Template file path:")) text += str(self.entry_path2.get()) + "\n\n" self.label_all_review1.insert("end", text) text = "Classes:" text += " " * (30 - len("Classes:")) for c in self.selected_classes: text = text + c + ",\t" self.label_all_review1.insert("end", text + "\n\n") text = "Output directory:" text += " " * (30 - len("Output directory:")) text += str(self.output_path.get()) + "\n\n" self.label_all_review1.insert("end", text) self.label_all_review1.configure(state="disabled", text_color="gray70") else: self.directory_error() return def browse_path(self): filetypes = ( ("Text files", "*.xls"), # Display only .txt files ("All files", "*.*") # Display all files ) path = filedialog.askopenfilename(filetypes=filetypes, initialdir=os.path.dirname(self.path["DATA"]) if self.path["DATA"] != "" else os.path.join(os.path.expanduser('~'), 'Documents')) if path == "": return self.entry_path.delete(0, tk.END) # Clear the entry self.entry_path.insert(0, os.path.abspath(path)) self.path["DATA"] = path file = C_File(file_name=path) if file.existe_fichier(): self.reset_error1() def browse_path2(self): filetypes = ( ("Text files", "*.xlsx"), # Display only .txt files ("All files", "*.*") # Display all files ) path = filedialog.askopenfilename(filetypes=filetypes, initialdir=os.path.dirname(self.path["TEMPLATE"]) if self.path["TEMPLATE"] != "" else os.path.join(os.path.expanduser('~'), 'Documents')) if path == "": return self.entry_path2.delete(0, tk.END) # Clear the entry self.entry_path2.insert(0, os.path.abspath(path)) self.path["TEMPLATE"] = path file = C_File(file_name=path) if file.existe_fichier(): self.reset_error2() def browser_path3(self): filetypes = ( ("Text files", "*.xlsx"), # Display only .txt files ("All files", "*.*") # Display all files ) path = filedialog.askopenfilename(filetypes=filetypes, initialdir=os.path.dirname(self.path["ABSENCE_FILE"]) if self.path["ABSENCE_FILE"] != "" else os.path.join(os.path.expanduser('~'), 'Documents')) if path == "": return self.path["ABSENCE_FILE"] = path self.entry_path_absence.delete(0, tk.END) # Clear the entry self.entry_path_absence.insert(0, os.path.abspath(path)) file = C_File(file_name=path) if file.existe_fichier(): self.reset_label(self.label_absence_data_file) self.entry_reset(self.entry_path_absence) def browse_folder(self): path = filedialog.askdirectory(initialdir=self.path["DIR"] if self.path["DIR"] != "" else os.path.join(os.path.expanduser('~'), 'Documents')) if path == "": return self.output_path.delete(0, tk.END) self.output_path.insert(0, os.path.abspath(path)) self.path["DIR"] = path
# https://stackoverflow.com/questions/31836104/pyinstaller-and-onefile-how-to-include-an-image-in-the-exe-file def resource_path(relative_path): """ Get absolute path to resource, works for dev and for PyInstaller """ try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS2 except Exception: base_path = os.path.abspath(".") return os.path.join(base_path, relative_path) logging.basicConfig(filename='app.log', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') customtkinter.set_appearance_mode("Dark") # Modes: "System" (standard), "Dark", "Light" customtkinter.set_default_color_theme("dark-blue") # Themes: "blue" (standard), "green", "dark-blue" dirPath = os.path.dirname(os.path.realpath(__file__)) class App(customtkinter.CTk): def __init__(self): super().__init__() self.tabview_generate_lists = None self.tabview_fill_bot= None self.generate_list_menu = None self.about_us_text = None self.fill_absence_menu = None self.try_again_generate = False self.try_again_fill = False self.progressbar_1 = None image_path = resource_path("images") self.main_logo_image = customtkinter.CTkImage( light_image=Image.open(os.path.join(image_path, "logo_black.png")), dark_image=Image.open(os.path.join(image_path, "logo_white.png")), size=(200,200)) self.about_us_image = customtkinter.CTkImage( light_image=Image.open(os.path.join(image_path, "logo_black.png")), dark_image=Image.open(os.path.join(image_path, "logo_white.png")), size=(150, 150)) # self.main_logo_photo = ImageTk.PhotoImage(self.main_logo_image) # configure window self.title("SagouBot Massar Direction") self.iconbitmap(resource_path("icon.ico")) self.geometry(f"{1100}x{580}") # configure grid layout (4x4) self.grid_columnconfigure(1, weight=1) self.grid_columnconfigure((2, 3), weight=0) self.grid_rowconfigure((0, 1, 2), weight=1) # create sidebar frame with widgets self.sidebar_frame = customtkinter.CTkFrame(self, width=200, corner_radius=0) self.sidebar_frame.grid(row=0, column=0, rowspan=4, sticky="nsew") self.sidebar_frame.grid_rowconfigure(5, weight=1) self.sidebar_frame.grid(row=0, column=0) self.sideBar_logo = customtkinter.CTkLabel(self.sidebar_frame, text="", image=self.main_logo_image) self.sideBar_logo.grid(row=5, column=0, padx=20, pady=20) self.entry_default_bordercolor = customtkinter.CTkEntry(self).cget("border_color") # self.logo_label = customtkinter.CTkLabel(self.sidebar_frame, text="SagouBot", font=customtkinter.CTkFont(size=40, weight="bold")) # self.logo_label.grid(row=1, column=0, padx=20, pady=(20, 10)) self.generate_list_menu_button_event() # Console (Text area) self.console_text = customtkinter.CTkTextbox(self, height=200, width=400, fg_color="gray1") self.console_text.insert("0.0", "CONSOLE") self.console_text.insert(F"{len('CONSOLE')}.0", "--------" * 28) self.console_text.configure(state="disabled") self.console_text.grid(row=1, column=1, padx=(20, 20), pady=(5, 15), sticky="nsew") self.console_text.tag_config("error", foreground="red") self.console_text.tag_config("note", foreground="orange") self.console_text.tag_config("successes", foreground="blue") # self.generate_progress_bar() # Progress Bar # progress_bar = customtkinter.CTkProgressBar(self, mode='determinate') # progress_bar.grid(row=1, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") # # Button to trigger updates # update_button = customtkinter.CTkButton(self, text="Start Processing", command=()) # update_button.grid(row=1, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") def high_school_switch(self): state = self.high_school_options.get() options = [self.TCS, self.TCSF, self.TCLSH, self.BACSC, self.BACSH, self.BACSE, self.BACSVT, self.BACSH2] if state: for option in options: option.configure(state="normal") else: for option in options: option.configure(state="disabled") return def college_switch(self): state = self.college_options.get() if state: self.college_generale.configure(state="normal") self.college_aspeb.configure(state="normal") self.college_inter.configure(state="normal") else: self.college_generale.configure(state="disabled") self.college_aspeb.configure(state="disabled") self.college_inter.configure(state="disabled") def college_label_error(self): current_text = self.label_college.cget("text") self.label_college.configure(text=current_text.replace("*", "") + "*", text_color="red") return def high_school_label_eroor(self): current_text = self.label_high_school.cget("text") self.label_high_school.configure(text=current_text.replace("*", "") + "*", text_color="red") return def reset_label_high_college(self): current_text1 = self.label_college.cget("text") current_text = self.label_high_school.cget("text") self.label_high_school.configure(text=current_text.replace("*", ""), text_color="gray90") self.label_college.configure(text=current_text1.replace("*", ""), text_color="gray90") def label_data_file_error(self): current_text = self.label_data_file.cget("text") self.label_data_file.configure(text=current_text.replace("*", "") + "*", text_color="red") return def label_template_file_error(self): current_text = self.label_template_entry.cget("text") self.label_template_entry.configure(text=current_text.replace("*", "") + "*", text_color="red") return def reset_error1(self): current_text = self.label_data_file.cget("text") self.label_data_file.configure(text=current_text.replace("*", ""), text_color="gray90") return def reset_error2(self): current_text = self.label_template_entry.cget("text") self.label_template_entry.configure(text=current_text.replace("*", ""), text_color="gray90") return def directory_error(self): current_text = self.label_output_folder.cget("text") self.label_output_folder.configure(text=current_text + "*", text_color="red") return def reset_error3(self): current_text = self.label_output_folder.cget("text") self.label_output_folder.configure(text=current_text.replace("*", ""), text_color="gray90") return def go_to_review2(self): if self.email_entry.get() == "" or self.password_entry.get() == "" or not self.validate_path(self.entry_path_absence) or not self.check_terms_and_condition.get(): if self.email_entry.get() == "": self.error_label(self.label_email_entry) self.entry_error(self.email_entry) if len(self.password_entry.get()) < 8: self.error_label(self.label_password_entry) self.entry_error(self.password_entry) if not self.validate_path(self.entry_path_absence): self.error_label(self.label_absence_data_file) self.entry_error(self.entry_path_absence) if not self.check_terms_and_condition.get(): self.check_terms_and_condition.configure(border_color="red", text_color="red") self.error_label(self.label_terms) else: paths = C_File(resource_path("db/paths.txt")) L = paths.fichier_to_Liste() L[3] = "ABSENCE_FILE" + "=" + self.entry_path_absence.get() +"\n" L[4] = "EMAIL" + "=" + self.email_entry.get() +"\n" paths.Liste_to_Fichier(L) set_key(dotenv_path=os.path.join(dirPath,".env"), key_to_set="EMAIL", value_to_set=self.email_entry.get()) set_key(dotenv_path=os.path.join(dirPath,".env"), key_to_set="PASSWORD", value_to_set=self.password_entry.get()) load_dotenv(dotenv_path=os.path.join(dirPath,".env")) self.tabview_fill_bot.set("Review & Submit") self.label_all_review2 = customtkinter.CTkTextbox(self.tabview_fill_bot.tab("Review & Submit")) self.label_all_review2.grid(row=0, column=0, columnspan=6, sticky="nsew") # self.label_all_review2.insert("1.0", text) text = f"Email:" text += " " * (30 - len("Email:")) text += str(self.email_entry.get()) + "\n\n" self.label_all_review2.insert("end", text) text = "Absence Excel File:" text += " " * (30 - len("Absence Excel File:")) text += str(self.entry_path_absence.get())+ "\n\n" self.label_all_review2.insert("end", text) text = "Browser:" text += " " * (30 - len("Browser:")) if self.browser_type.get() == 2: text += "FireFox" else: text += "Chrome" self.label_all_review2.insert("end", text) self.label_all_review2.configure(state="disabled", text_color="gray70") return def go_to_output_location(self): if self.tabview_generate_lists.grid_info(): tabview = self.tabview_generate_lists tab = tabview.get() optionsHighSchool = [self.TCS, self.TCSF, self.TCLSH, self.BACSC, self.BACSH, self.BACSE, self.BACSVT, self.BACSH2] optionsCollege = [ self.college_inter, self.college_aspeb, self.college_generale ] selected_classes = [] paths = C_File(resource_path("db/paths.txt")) if tab == "Setup": # path validation if self.validate_path(self.entry_path) and self.validate_path(self.entry_path2) and ( self.college_options.get() or self.high_school_options.get()): if self.high_school_options.get(): for option in optionsHighSchool: if option.get(): selected_classes.append((option.cget("text"))) if self.college_options.get(): for option in optionsCollege: if option.get(): selected_classes.append((option.cget("text"))) if len(selected_classes) == 0: self.college_label_error() self.high_school_label_eroor() else: self.selected_classes = selected_classes self.tabview_generate_lists.set("Output Location") L = paths.fichier_to_Liste() L[0] = "DATA" + "=" + self.entry_path.get() + "\n" L[1] = "TEMPLATE" + "=" + self.entry_path2.get() + "\n" paths.Liste_to_Fichier(L) else: if not self.validate_path(self.entry_path): self.label_data_file_error() if not self.validate_path(self.entry_path2): self.label_template_file_error() if self.high_school_options.get(): for option in optionsHighSchool: if option.get(): selected_classes.append((option.cget("text"))) if self.college_options.get(): for option in optionsCollege: if option.get(): selected_classes.append((option.cget("text"))) if len(selected_classes) == 0: self.college_label_error() self.high_school_label_eroor() if tab == "Output Location": if self.validate_dir(self.output_path): self.tabview_generate_lists.set("Review & Submit") L = paths.fichier_to_Liste() L[-1] = "DIR" + "=" + self.output_path.get() paths.Liste_to_Fichier(L) self.label_all_review1 = customtkinter.CTkTextbox(self.tabview_generate_lists.tab("Review & Submit")) self.label_all_review1.grid(row=0, column=0, columnspan=6, sticky="nsew") # self.label_all_review2.insert("1.0", text) text = f"Data file path:" text += " " * (30 - len("Data file path:")) text += str(self.entry_path.get()) + "\n\n" self.label_all_review1.insert("end", text) text = "Template file path:" text += " " * (30 - len("Template file path:")) text += str(self.entry_path2.get()) + "\n\n" self.label_all_review1.insert("end", text) text = "Classes:" text += " " * (30 - len("Classes:")) for c in self.selected_classes: text = text + c + ",\t" self.label_all_review1.insert("end", text + "\n\n") text = "Output directory:" text += " " * (30 - len("Output directory:")) text += str(self.output_path.get()) + "\n\n" self.label_all_review1.insert("end", text) self.label_all_review1.configure(state="disabled", text_color="gray70") else: self.directory_error() return def browse_path(self): filetypes = ( ("Text files", "*.xls"), # Display only .txt files ("All files", "*.*") # Display all files ) path = filedialog.askopenfilename(filetypes=filetypes, initialdir=os.path.dirname(self.path["DATA"]) if self.path["DATA"] != "" else os.path.join(os.path.expanduser('~'), 'Documents')) if path == "": return self.entry_path.delete(0, tk.END) # Clear the entry self.entry_path.insert(0, os.path.abspath(path)) self.path["DATA"] = path file = C_File(file_name=path) if file.existe_fichier(): self.reset_error1() def browse_path2(self): filetypes = ( ("Text files", "*.xlsx"), # Display only .txt files ("All files", "*.*") # Display all files ) path = filedialog.askopenfilename(filetypes=filetypes, initialdir=os.path.dirname(self.path["TEMPLATE"]) if self.path["TEMPLATE"] != "" else os.path.join(os.path.expanduser('~'), 'Documents')) if path == "": return self.entry_path2.delete(0, tk.END) # Clear the entry self.entry_path2.insert(0, os.path.abspath(path)) self.path["TEMPLATE"] = path file = C_File(file_name=path) if file.existe_fichier(): self.reset_error2() def browser_path3(self): filetypes = ( ("Text files", "*.xlsx"), # Display only .txt files ("All files", "*.*") # Display all files ) path = filedialog.askopenfilename(filetypes=filetypes, initialdir=os.path.dirname(self.path["ABSENCE_FILE"]) if self.path["ABSENCE_FILE"] != "" else os.path.join(os.path.expanduser('~'), 'Documents')) if path == "": return self.path["ABSENCE_FILE"] = path self.entry_path_absence.delete(0, tk.END) # Clear the entry self.entry_path_absence.insert(0, os.path.abspath(path)) file = C_File(file_name=path) if file.existe_fichier(): self.reset_label(self.label_absence_data_file) self.entry_reset(self.entry_path_absence) def browse_folder(self): path = filedialog.askdirectory(initialdir=self.path["DIR"] if self.path["DIR"] != "" else os.path.join(os.path.expanduser('~'), 'Documents')) if path == "": return self.output_path.delete(0, tk.END) self.output_path.insert(0, os.path.abspath(path)) self.path["DIR"] = path
dir = C_Dossier()
1
2023-10-29 18:10:27+00:00
16k
hsma-programme/Teaching_DES_Concepts_Streamlit
pages/4_🏥_The_Full_Model.py
[ { "identifier": "add_logo", "path": "helper_functions.py", "snippet": "def add_logo():\n '''\n Add a logo at the top of the page navigation sidebar\n\n Approach written by blackary on\n https://discuss.streamlit.io/t/put-logo-and-title-above-on-top-of-page-navigation-in-sidebar-of-multipage-app/28213/5\n \n '''\n st.markdown(\n \"\"\"\n <style>\n [data-testid=\"stSidebarNav\"] {\n background-image: url(https://raw.githubusercontent.com/hsma-programme/Teaching_DES_Concepts_Streamlit/main/resources/hsma_logo_transparent_background_small.png);\n background-repeat: no-repeat;\n padding-top: 175px;\n background-position: 40px 30px;\n }\n [data-testid=\"stSidebarNav\"]::before {\n content: \"The DES Playground\";\n padding-left: 20px;\n margin-top: 50px;\n font-size: 30px;\n position: relative;\n top: 100px;\n }\n\n </style>\n \"\"\",\n unsafe_allow_html=True,\n )" }, { "identifier": "mermaid", "path": "helper_functions.py", "snippet": "def mermaid(code: str, height=600) -> None:\n components.html(\n f\"\"\"\n <link href='http://fonts.googleapis.com/css?family=Lexend' rel='stylesheet' type='text/css'>\n\n <pre class=\"mermaid\">\n {code}\n </pre>\n\n <script type=\"module\">\n import mermaid from 'https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.esm.min.mjs';\n mermaid.initialize({{ startOnLoad: true }});\n </script>\n \"\"\",\n height=height\n )" }, { "identifier": "center_running", "path": "helper_functions.py", "snippet": "def center_running():\n \"\"\"\n Have the \"running man\" animation in the center of the screen instead of the top right corner.\n \"\"\"\n st.markdown(\"\"\"\n<style>\n\ndiv[class*=\"StatusWidget\"]{\n\n position: fixed;\n margin: auto;\n top: 50%;\n left: 50%;\n marginRight: \"0px\"\n width: 50%;\n scale: 2.75;\n opacity: 1\n}\n\n</style>\n\"\"\", \n unsafe_allow_html=True)" }, { "identifier": "Scenario", "path": "model_classes.py", "snippet": "class Scenario:\n '''\n Container class for scenario parameters/arguments\n\n Passed to a model and its process classes\n '''\n\n def __init__(self,\n random_number_set=1,\n n_triage=DEFAULT_N_TRIAGE,\n n_reg=DEFAULT_N_REG,\n n_exam=DEFAULT_N_EXAM,\n n_trauma=DEFAULT_N_TRAUMA,\n n_cubicles_1=DEFAULT_N_CUBICLES_1,\n n_cubicles_2=DEFAULT_N_CUBICLES_2,\n triage_mean=DEFAULT_TRIAGE_MEAN,\n reg_mean=DEFAULT_REG_MEAN,\n reg_var=DEFAULT_REG_VAR,\n exam_mean=DEFAULT_EXAM_MEAN,\n exam_var=DEFAULT_EXAM_VAR,\n trauma_mean=DEFAULT_TRAUMA_MEAN,\n trauma_treat_mean=DEFAULT_TRAUMA_TREAT_MEAN,\n trauma_treat_var=DEFAULT_TRAUMA_TREAT_VAR,\n non_trauma_treat_mean=DEFAULT_NON_TRAUMA_TREAT_MEAN,\n non_trauma_treat_var=DEFAULT_NON_TRAUMA_TREAT_VAR,\n non_trauma_treat_p=DEFAULT_NON_TRAUMA_TREAT_P,\n prob_trauma=DEFAULT_PROB_TRAUMA,\n arrival_df=NSPP_PATH,\n override_arrival_rate=OVERRIDE_ARRIVAL_RATE,\n manual_arrival_rate=MANUAL_ARRIVAL_RATE_VALUE,\n model=\"full\"\n ):\n '''\n Create a scenario to parameterise the simulation model\n\n Parameters:\n -----------\n random_number_set: int, optional (default=DEFAULT_RNG_SET)\n Set to control the initial seeds of each stream of pseudo\n random numbers used in the model.\n\n n_triage: int\n The number of triage cubicles\n\n n_reg: int\n The number of registration clerks\n\n n_exam: int\n The number of examination rooms\n\n n_trauma: int\n The number of trauma bays for stablisation\n\n n_cubicles_1: int\n The number of non-trauma treatment cubicles\n\n n_cubicles_2: int\n The number of trauma treatment cubicles\n\n triage_mean: float\n Mean duration of the triage distribution (Exponential)\n\n reg_mean: float\n Mean duration of the registration distribution (Lognormal)\n\n reg_var: float\n Variance of the registration distribution (Lognormal)\n\n exam_mean: float\n Mean of the examination distribution (Normal)\n\n exam_var: float\n Variance of the examination distribution (Normal)\n\n trauma_mean: float\n Mean of the trauma stabilisation distribution (Exponential)\n\n trauma_treat_mean: float\n Mean of the trauma cubicle treatment distribution (Lognormal)\n\n trauma_treat_var: float\n Variance of the trauma cubicle treatment distribution (Lognormal)\n\n non_trauma_treat_mean: float\n Mean of the non trauma treatment distribution\n\n non_trauma_treat_var: float\n Variance of the non trauma treatment distribution\n\n non_trauma_treat_p: float\n Probability non trauma patient requires treatment\n\n prob_trauma: float\n probability that a new arrival is a trauma patient.\n\n model: string\n What model to run. Default is full. \n Options are \"full\", \"simplest\", \"simple_with_branch\"\n '''\n # sampling\n self.random_number_set = random_number_set\n\n # store parameters for sampling\n self.triage_mean = triage_mean\n self.reg_mean = reg_mean\n self.reg_var = reg_var\n self.exam_mean = exam_mean\n self.exam_var = exam_var\n self.trauma_mean = trauma_mean\n self.trauma_treat_mean = trauma_treat_mean\n self.trauma_treat_var = trauma_treat_var\n self.non_trauma_treat_mean = non_trauma_treat_mean\n self.non_trauma_treat_var = non_trauma_treat_var\n self.non_trauma_treat_p = non_trauma_treat_p\n self.prob_trauma = prob_trauma\n self.manual_arrival_rate = manual_arrival_rate\n self.arrival_df = arrival_df\n self.override_arrival_rate = override_arrival_rate\n self.model = model\n\n self.init_sampling()\n\n # count of each type of resource\n self.init_resource_counts(n_triage, n_reg, n_exam, n_trauma,\n n_cubicles_1, n_cubicles_2)\n\n def set_random_no_set(self, random_number_set):\n '''\n Controls the random sampling \n Parameters:\n ----------\n random_number_set: int\n Used to control the set of psuedo random numbers\n used by the distributions in the simulation.\n '''\n self.random_number_set = random_number_set\n self.init_sampling()\n\n def init_resource_counts(self, n_triage, n_reg, n_exam, n_trauma,\n n_cubicles_1, n_cubicles_2):\n '''\n Init the counts of resources to default values...\n '''\n self.n_triage = n_triage\n self.n_reg = n_reg\n self.n_exam = n_exam\n self.n_trauma = n_trauma\n\n # non-trauma (1), trauma (2) treatment cubicles\n self.n_cubicles_1 = n_cubicles_1\n self.n_cubicles_2 = n_cubicles_2\n\n def init_sampling(self):\n '''\n Create the distributions used by the model and initialise \n the random seeds of each.\n '''\n # create random number streams\n rng_streams = np.random.default_rng(self.random_number_set)\n self.seeds = rng_streams.integers(0, 999999999, size=N_STREAMS)\n\n # create distributions\n\n # Triage duration\n self.triage_dist = Exponential(self.triage_mean,\n random_seed=self.seeds[0])\n\n # Registration duration (non-trauma only)\n self.reg_dist = Lognormal(self.reg_mean,\n np.sqrt(self.reg_var),\n random_seed=self.seeds[1])\n\n # Evaluation (non-trauma only)\n self.exam_dist = Normal(self.exam_mean,\n np.sqrt(self.exam_var),\n random_seed=self.seeds[2])\n\n # Trauma/stablisation duration (trauma only)\n self.trauma_dist = Exponential(self.trauma_mean,\n random_seed=self.seeds[3])\n\n # Non-trauma treatment\n self.nt_treat_dist = Lognormal(self.non_trauma_treat_mean,\n np.sqrt(self.non_trauma_treat_var),\n random_seed=self.seeds[4])\n\n # treatment of trauma patients\n self.treat_dist = Lognormal(self.trauma_treat_mean,\n np.sqrt(self.non_trauma_treat_var),\n random_seed=self.seeds[5])\n\n # probability of non-trauma patient requiring treatment\n self.nt_p_treat_dist = Bernoulli(self.non_trauma_treat_p,\n random_seed=self.seeds[6])\n\n # probability of non-trauma versus trauma patient\n self.p_trauma_dist = Bernoulli(self.prob_trauma,\n random_seed=self.seeds[7])\n\n # init sampling for non-stationary poisson process\n self.init_nspp()\n\n def init_nspp(self):\n\n # read arrival profile\n self.arrivals = pd.read_csv(NSPP_PATH) # pylint: disable=attribute-defined-outside-init\n self.arrivals['mean_iat'] = 60 / self.arrivals['arrival_rate']\n\n # maximum arrival rate (smallest time between arrivals)\n self.lambda_max = self.arrivals['arrival_rate'].max() # pylint: disable=attribute-defined-outside-init\n\n # thinning exponential\n if self.override_arrival_rate is True:\n\n self.arrival_dist = Exponential(self.manual_arrival_rate, # pylint: disable=attribute-defined-outside-init\n random_seed=self.seeds[8])\n else:\n self.arrival_dist = Exponential(60.0 / self.lambda_max, # pylint: disable=attribute-defined-outside-init\n random_seed=self.seeds[8])\n\n # thinning uniform rng\n self.thinning_rng = Uniform(low=0.0, high=1.0, # pylint: disable=attribute-defined-outside-init\n random_seed=self.seeds[9])" }, { "identifier": "multiple_replications", "path": "model_classes.py", "snippet": "def multiple_replications(scenario,\n rc_period=DEFAULT_RESULTS_COLLECTION_PERIOD,\n n_reps=5,\n return_detailed_logs=False):\n '''\n Perform multiple replications of the model.\n\n Params:\n ------\n scenario: Scenario\n Parameters/arguments to configurethe model\n\n rc_period: float, optional (default=DEFAULT_RESULTS_COLLECTION_PERIOD)\n results collection period. \n the number of minutes to run the model to collect results\n\n n_reps: int, optional (default=DEFAULT_N_REPS)\n Number of independent replications to run.\n\n Returns:\n --------\n pandas.DataFrame\n '''\n\n # if return_full_log:\n # results = [single_run(scenario,\n # rc_period,\n # random_no_set=(scenario.random_number_set)+rep,\n # return_full_log=True,\n # return_event_log=False)\n # for rep in range(n_reps)]\n\n # format and return results in a dataframe\n # df_results = pd.concat(reesults)\n # df_results.index = np.arange(1, len(df_results)+1)\n # df_results.index.name = 'rep'\n # return df_results\n # return results\n\n if return_detailed_logs:\n results = [{'rep': rep+1,\n 'results': single_run(scenario,\n rc_period,\n random_no_set=(scenario.random_number_set)+rep,\n return_detailed_logs=True)}\n # .assign(Rep=rep+1)\n for rep in range(n_reps)]\n\n # format and return results in a dataframe\n\n return results\n # {\n # {df_results = [pd.concat(result) for result in results] }\n # }\n # return results\n\n results = [single_run(scenario,\n rc_period,\n random_no_set=(scenario.random_number_set)+rep)\n for rep in range(n_reps)]\n\n # format and return results in a dataframe\n df_results = pd.concat(results)\n df_results.index = np.arange(1, len(df_results)+1)\n df_results.index.name = 'rep'\n return df_results" }, { "identifier": "reshape_for_animations", "path": "output_animation_functions.py", "snippet": "def reshape_for_animations(full_event_log, every_x_minutes=10):\n minute_dfs = list()\n patient_dfs = list()\n\n for rep in range(1, max(full_event_log['rep'])+1):\n # print(\"Rep {}\".format(rep))\n # Start by getting data for a single rep\n filtered_log_rep = full_event_log[full_event_log['rep'] == rep].drop('rep', axis=1)\n pivoted_log = filtered_log_rep.pivot_table(values=\"time\", \n index=[\"patient\",\"event_type\",\"pathway\"], \n columns=\"event\").reset_index()\n\n for minute in range(10*60*24):\n # print(minute)\n # Get patients who arrived before the current minute and who left the system after the current minute\n # (or arrived but didn't reach the point of being seen before the model run ended)\n # When turning this into a function, think we will want user to pass\n # 'first step' and 'last step' or something similar\n # and will want to reshape the event log for this so that it has a clear start/end regardless\n # of pathway (move all the pathway stuff into a separate column?)\n\n # Think we maybe need a pathway order and pathway precedence column\n # But what about shared elements of each pathway?\n if minute % every_x_minutes == 0:\n\n try:\n current_patients_in_moment = pivoted_log[(pivoted_log['arrival'] <= minute) & \n (\n (pivoted_log['depart'] >= minute) |\n (pivoted_log['depart'].isnull() )\n )]['patient'].values\n except KeyError:\n current_patients_in_moment = None\n \n if current_patients_in_moment is not None:\n patient_minute_df = filtered_log_rep[filtered_log_rep['patient'].isin(current_patients_in_moment)]\n # print(len(patient_minute_df))\n # Grab just those clients from the filtered log (the unpivoted version)\n # Each person can only be in a single place at once, so filter out any events\n # that have taken place after the minute\n # then just take the latest event that has taken place for each client\n # most_recent_events_minute = patient_minute_df[patient_minute_df['time'] <= minute] \\\n # .sort_values('time', ascending=True) \\\n # .groupby(['patient',\"event_type\",\"pathway\"]) \\\n # .tail(1) \n\n most_recent_events_minute_ungrouped = patient_minute_df[patient_minute_df['time'] <= minute].reset_index() \\\n .sort_values(['time', 'index'], ascending=True) \\\n .groupby(['patient']) \\\n .tail(1) \n\n patient_dfs.append(most_recent_events_minute_ungrouped.assign(minute=minute, rep=rep))\n\n # Now count how many people are in each state\n # CHECK - I THINK THIS IS PROBABLY DOUBLE COUNTING PEOPLE BECAUSE OF THE PATHWAY AND EVENT TYPE. JUST JOIN PATHWAY/EVENT TYPE BACK IN INSTEAD?\n state_counts_minute = most_recent_events_minute_ungrouped[['event']].value_counts().rename(\"count\").reset_index().assign(minute=minute, rep=rep)\n \n minute_dfs.append(state_counts_minute)\n\n\n minute_counts_df = pd.concat(minute_dfs).merge(filtered_log_rep[['event','event_type', 'pathway']].drop_duplicates().reset_index(drop=True), on=\"event\")\n full_patient_df = pd.concat(patient_dfs).sort_values([\"rep\", \"minute\", \"event\"])\n\n # Add a final exit step for each client\n final_step = full_patient_df.sort_values([\"rep\", \"patient\", \"minute\"], ascending=True).groupby([\"rep\", \"patient\"]).tail(1)\n final_step['minute'] = final_step['minute'] + every_x_minutes\n final_step['event'] = \"exit\"\n # final_step['event_type'] = \"arrival_departure\"\n\n full_patient_df = full_patient_df.append(final_step)\n\n minute_counts_df_pivoted = minute_counts_df.pivot_table(values=\"count\", \n index=[\"minute\", \"rep\", \"event_type\", \"pathway\"], \n columns=\"event\").reset_index().fillna(0)\n\n minute_counts_df_complete = minute_counts_df_pivoted.melt(id_vars=[\"minute\", \"rep\",\"event_type\",\"pathway\"])\n\n return {\n \"minute_counts_df\": minute_counts_df,\n \"minute_counts_df_complete\": minute_counts_df_complete,\n \"full_patient_df\": full_patient_df.sort_values([\"rep\", \"minute\", \"event\"])\n \n }" }, { "identifier": "animate_activity_log", "path": "output_animation_functions.py", "snippet": "def animate_activity_log(\n full_patient_df,\n event_position_df,\n scenario,\n rep=1,\n plotly_height=900,\n plotly_width=None,\n wrap_queues_at=None,\n include_play_button=True,\n return_df_only=False,\n add_background_image=None,\n display_stage_labels=True,\n icon_and_text_size=24,\n override_x_max=None,\n override_y_max=None,\n time_display_units=None,\n setup_mode=False,\n frame_duration=400, #milliseconds\n frame_transition_duration=600 #milliseconds\n ):\n \"\"\"_summary_\n\n Args:\n full_patient_df (pd.Dataframe): \n \n event_position_dicts (pd.Dataframe): \n dataframe with three cols - event, x and y\n Can be more easily created by passing a list of dicts to pd.DataFrame\n list of dictionaries with one dicitionary per event type\n containing keys 'event', 'x' and 'y'\n This will determine the intial position of any entries in the animated log\n (think of it as the bottom right hand corner of any group of entities at each stage)\n\n scenario:\n Pass in an object that specifies the number of resources at different steps\n\n rep (int, optional): Defaults to 1.\n The replication of any model to include. Can only display one rep at a time, so will take\n the first rep if not otherwise specified. \n \n plotly_height (int, optional): Defaults to 900.\n\n Returns:\n Plotly fig object\n \"\"\" \n\n # Filter to only a single replication\n\n # TODO: Remove this from this function, and instead write a test\n # to ensure that no patient ID appears in multiple places at a single minute\n # and return an error if it does so\n # Move the step of ensuring there's only a single model run involved to outside\n # of this function as it's not really its job. \n\n full_patient_df = full_patient_df[full_patient_df['rep'] == rep].sort_values([\n 'event','minute','time'\n ])\n\n # full_patient_df['count'] = full_patient_df.groupby(['event','minute','rep'])['minute'] \\\n # .transform('count')\n \n # Order patients within event/minute/rep to determine their eventual position in the line\n full_patient_df['rank'] = full_patient_df.groupby(['event','minute','rep'])['minute'] \\\n .rank(method='first')\n\n full_patient_df_plus_pos = full_patient_df.merge(event_position_df, on=\"event\", how='left') \\\n .sort_values([\"rep\", \"event\", \"minute\", \"time\"])\n\n # Determine the position for any resource use steps\n resource_use = full_patient_df_plus_pos[full_patient_df_plus_pos['event_type'] == \"resource_use\"].copy()\n resource_use['y_final'] = resource_use['y']\n resource_use['x_final'] = resource_use['x'] - resource_use['resource_id']*10\n\n # Determine the position for any queuing steps\n queues = full_patient_df_plus_pos[full_patient_df_plus_pos['event_type']=='queue']\n queues['y_final'] = queues['y']\n queues['x_final'] = queues['x'] - queues['rank']*10\n\n # If we want people to wrap at a certain queue length, do this here\n # They'll wrap at the defined point and then the queue will start expanding upwards\n # from the starting row\n if wrap_queues_at is not None:\n queues['row'] = np.floor((queues['rank']) / (wrap_queues_at+1))\n queues['x_final'] = queues['x_final'] + (wrap_queues_at*queues['row']*10)\n queues['y_final'] = queues['y_final'] + (queues['row'] * 30)\n\n full_patient_df_plus_pos = pd.concat([queues, resource_use])\n\n # full_patient_df_plus_pos['icon'] = '🙍'\n\n individual_patients = full_patient_df['patient'].drop_duplicates().sort_values()\n \n # Recommend https://emojipedia.org/ for finding emojis to add to list\n # note that best compatibility across systems can be achieved by using \n # emojis from v12.0 and below - Windows 10 got no more updates after that point\n icon_list = [\n '🧔🏼', '👨🏿‍🦯', '👨🏻‍🦰', '🧑🏻', '👩🏿‍🦱', \n '🤰', '👳🏽', '👩🏼‍🦳', '👨🏿‍🦳', '👩🏼‍🦱', \n '🧍🏽‍♀️', '👨🏼‍🔬', '👩🏻‍🦰', '🧕🏿', '👨🏼‍🦽', \n '👴🏾', '👨🏼‍🦱', '👷🏾', '👧🏿', '🙎🏼‍♂️',\n '👩🏻‍🦲', '🧔🏾', '🧕🏻', '👨🏾‍🎓', '👨🏾‍🦲',\n '👨🏿‍🦰', '🙍🏼‍♂️', '🙋🏾‍♀️', '👩🏻‍🔧', '👨🏿‍🦽', \n '👩🏼‍🦳', '👩🏼‍🦼', '🙋🏽‍♂️', '👩🏿‍🎓', '👴🏻', \n '🤷🏻‍♀️', '👶🏾', '👨🏻‍✈️', '🙎🏿‍♀️', '👶🏻', \n '👴🏿', '👨🏻‍🦳', '👩🏽', '👩🏽‍🦳', '🧍🏼‍♂️', \n '👩🏽‍🎓', '👱🏻‍♀️', '👲🏼', '🧕🏾', '👨🏻‍🦯', \n '🧔🏿', '👳🏿', '🤦🏻‍♂️', '👩🏽‍🦰', '👨🏼‍✈️', \n '👨🏾‍🦲', '🧍🏾‍♂️', '👧🏼', '🤷🏿‍♂️', '👨🏿‍🔧', \n '👱🏾‍♂️', '👨🏼‍🎓', '👵🏼', '🤵🏿', '🤦🏾‍♀️',\n '👳🏻', '🙋🏼‍♂️', '👩🏻‍🎓', '👩🏼‍🌾', '👩🏾‍🔬',\n '👩🏿‍✈️', '🎅🏼', '👵🏿', '🤵🏻', '🤰'\n ]\n\n full_icon_list = icon_list * int(np.ceil(len(individual_patients)/len(icon_list)))\n\n full_icon_list = full_icon_list[0:len(individual_patients)]\n\n full_patient_df_plus_pos = full_patient_df_plus_pos.merge(\n pd.DataFrame({'patient':list(individual_patients),\n 'icon':full_icon_list}),\n on=\"patient\")\n\n if return_df_only:\n return full_patient_df_plus_pos\n\n if override_x_max is not None:\n x_max = override_x_max\n else:\n x_max = event_position_df['x'].max()*1.25\n\n if override_y_max is not None:\n y_max = override_x_max\n else:\n y_max = event_position_df['y'].max()*1.1\n\n # If we're displaying time as a clock instead of as units of whatever time our model\n # is working in, create a minute_display column that will display as a psuedo datetime\n \n # For now, it starts a few months after the current date, just to give the\n # idea of simulating some hypothetical future time. It might be nice to allow\n # the start point to be changed, particular if we're simulating something on\n # a larger timescale that includes a level of weekly or monthly seasonality.\n\n # We need to keep the original minute column in existance because it's important for sorting\n if time_display_units == \"dhm\":\n full_patient_df_plus_pos['minute'] = dt.date.today() + pd.DateOffset(days=165) + pd.TimedeltaIndex(full_patient_df_plus_pos['minute'], unit='m')\n # https://strftime.org/\n full_patient_df_plus_pos['minute_display'] = full_patient_df_plus_pos['minute'].apply(\n lambda x: dt.datetime.strftime(x, '%d %B %Y\\n%H:%M')\n )\n full_patient_df_plus_pos['minute'] = full_patient_df_plus_pos['minute'].apply(\n lambda x: dt.datetime.strftime(x, '%Y-%m-%d %H:%M')\n )\n else:\n full_patient_df_plus_pos['minute_display'] = full_patient_df_plus_pos['minute']\n\n # full_patient_df_plus_pos['size'] = 24\n\n # We are effectively making use of an animated plotly express scatterploy\n # to do all of the heavy lifting\n # Because of the way plots animate in this, it deals with all of the difficulty\n # of paths between individual positions - so we just have to tell it where to put\n # people at each defined step of the process, and the scattergraph will move them\n\n fig = px.scatter(\n full_patient_df_plus_pos.sort_values('minute'),\n x=\"x_final\",\n y=\"y_final\",\n # Each frame is one step of time, with the gap being determined\n # in the reshape_for_animation function\n animation_frame=\"minute_display\",\n # Important to group by patient here\n animation_group=\"patient\",\n text=\"icon\",\n # Can't have colours because it causes bugs with\n # lots of points failing to appear\n #color=\"event\",\n hover_name=\"event\",\n hover_data=[\"patient\", \"pathway\", \"time\", \"minute\", \"resource_id\"],\n # The approach of putting in the people as symbols didn't work\n # Went with making emoji text labels instead - this works better!\n # But leaving in as a reminder that the symbol approach doens't work.\n #symbol=\"rep\",\n #symbol_sequence=[\"⚽\"],\n #symbol_map=dict(rep_choice = \"⚽\"),\n range_x=[0, x_max],\n range_y=[0, y_max],\n height=plotly_height,\n width=plotly_width,\n # This sets the opacity of the points that sit behind\n opacity=0\n # size=\"size\"\n )\n\n # Now add labels identifying each stage (optional - can either be used\n # in conjunction with a background image or as a way to see stage names\n # without the need to create a background image)\n if display_stage_labels:\n fig.add_trace(go.Scatter(\n x=[pos+10 for pos in event_position_df['x'].to_list()],\n y=event_position_df['y'].to_list(),\n mode=\"text\",\n name=\"\",\n text=event_position_df['label'].to_list(),\n textposition=\"middle right\",\n hoverinfo='none'\n ))\n\n # Update the size of the icons and labels\n # This is what determines the size of the individual emojis that \n # represent our people!\n fig.update_traces(textfont_size=icon_and_text_size)\n\n # Finally add in icons to indicate the available resources\n # Make an additional dataframe that has one row per resource type\n # Then, starting from the initial position, make that many large circles\n # make them semi-transparent or you won't see the people using them! \n events_with_resources = event_position_df[event_position_df['resource'].notnull()].copy()\n events_with_resources['resource_count'] = events_with_resources['resource'].apply(lambda x: getattr(scenario, x))\n\n events_with_resources = events_with_resources.join(events_with_resources.apply(\n lambda r: pd.Series({'x_final': [r['x']-(10*(i+1)) for i in range(r['resource_count'])]}), axis=1).explode('x_final'),\n how='right')\n\n # This just adds an additional scatter trace that creates large dots\n # that represent the individual resources\n fig.add_trace(go.Scatter(\n x=events_with_resources['x_final'].to_list(),\n # Place these slightly below the y position for each entity\n # that will be using the resource\n y=[i-10 for i in events_with_resources['y'].to_list()],\n mode=\"markers\",\n # Define what the marker will look like\n marker=dict(\n color='LightSkyBlue',\n size=15),\n opacity=0.8,\n hoverinfo='none'\n ))\n\n # Optional step to add a background image\n # This can help to better visualise the layout/structure of a pathway\n # Simple FOSS tool for creating these background images is draw.io\n # Ideally your queueing steps should always be ABOVE your resource use steps\n # as this then results in people nicely flowing from the front of the queue \n # to the next stage\n if add_background_image is not None:\n fig.add_layout_image(\n dict(\n source=add_background_image,\n xref=\"x domain\",\n yref=\"y domain\",\n x=1,\n y=1,\n sizex=1,\n sizey=1,\n xanchor=\"right\",\n yanchor=\"top\",\n sizing=\"stretch\",\n opacity=0.5,\n layer=\"below\")\n )\n\n # We don't need any gridlines or tickmarks for the final output, so remove\n # However, can be useful for the initial setup phase of the outputs, so give the \n # option to inlcude\n if not setup_mode:\n fig.update_xaxes(showticklabels=False, showgrid=False, zeroline=False, \n # Prevent zoom\n fixedrange=True)\n fig.update_yaxes(showticklabels=False, showgrid=False, zeroline=False, \n # Prevent zoom\n fixedrange=True)\n\n fig.update_layout(yaxis_title=None, xaxis_title=None, showlegend=False,\n # Increase the size of the play button and animation timeline\n sliders=[dict(currentvalue=dict(font=dict(size=35) ,\n prefix=\"\"))]\n )\n\n # You can get rid of the play button if desired\n # Was more useful in older versions of the function\n if not include_play_button:\n fig[\"layout\"].pop(\"updatemenus\")\n\n # Adjust speed of animation\n fig.layout.updatemenus[0].buttons[0].args[1]['frame']['duration'] = frame_duration\n fig.layout.updatemenus[0].buttons[0].args[1]['transition']['duration'] = frame_transition_duration\n\n return fig" } ]
import gc import asyncio import pandas as pd import plotly.express as px import plotly.graph_objects as go import streamlit as st import numpy as np from helper_functions import add_logo, mermaid, center_running from model_classes import Scenario, multiple_replications from output_animation_functions import reshape_for_animations, animate_activity_log
11,412
current_state = st.session_state['session_results'] current_state.append(results_for_state) del results_for_state gc.collect() st.session_state['session_results'] = current_state del current_state gc.collect() # print(len(st.session_state['session_results'])) # UTILISATION AUDIT - BRING BACK WHEN NEEDED # full_utilisation_audit = pd.concat([detailed_outputs[i]['results']['utilisation_audit'].assign(Rep= i+1) # for i in range(n_reps)]) # animation_dfs_queue = reshape_for_animations( # full_event_log[ # (full_event_log['rep']==1) & # ((full_event_log['event_type']=='queue') | (full_event_log['event_type']=='arrival_departure')) # ] # ) my_bar.progress(80, text="Creating Animations...") animation_dfs_log = reshape_for_animations( full_event_log=full_event_log[ (full_event_log['rep']==1) & ((full_event_log['event_type']=='queue') | (full_event_log['event_type']=='resource_use') | (full_event_log['event_type']=='arrival_departure')) & # Limit to first 5 days (full_event_log['time'] <= 60*24*5) ], every_x_minutes=5 )['full_patient_df'] del full_event_log gc.collect() my_bar.progress(100, text="Simulation Complete!") # st.write(results.reset_index()) # st.write(pd.wide_to_long(results, stubnames=['util', 'wait'], # i="rep", j="metric_type", # sep='_', suffix='.*')) # st.write(results.reset_index().melt(id_vars="rep").set_index('variable').filter(like="util", axis=0)) # Add in a box plot showing utilisation tab_playground_results_1, tab_playground_results_2, tab_playground_results_3 = st.tabs([ "Simple Graphs", "Animated Log", "Advanced Graphs" ]) # st.markdown(""" # You can click on the three tabs below ("Simple Graphs", "Animated Log", and "Advanced Graphs") to view different outputs from the model. # """) # st.subheader("Look at Average Results Across Replications") with tab_playground_results_2: event_position_df = pd.DataFrame([ # {'event': 'arrival', 'x': 10, 'y': 250, 'label': "Arrival" }, # Triage - minor and trauma {'event': 'triage_wait_begins', 'x': 160, 'y': 400, 'label': "Waiting for<br>Triage" }, {'event': 'triage_begins', 'x': 160, 'y': 315, 'resource':'n_triage', 'label': "Being Triaged" }, # Minors (non-trauma) pathway {'event': 'MINORS_registration_wait_begins', 'x': 300, 'y': 145, 'label': "Waiting for<br>Registration" }, {'event': 'MINORS_registration_begins', 'x': 300, 'y': 85, 'resource':'n_reg', 'label':'Being<br>Registered' }, {'event': 'MINORS_examination_wait_begins', 'x': 465, 'y': 145, 'label': "Waiting for<br>Examination" }, {'event': 'MINORS_examination_begins', 'x': 465, 'y': 85, 'resource':'n_exam', 'label': "Being<br>Examined" }, {'event': 'MINORS_treatment_wait_begins', 'x': 630, 'y': 145, 'label': "Waiting for<br>Treatment" }, {'event': 'MINORS_treatment_begins', 'x': 630, 'y': 85, 'resource':'n_cubicles_1', 'label': "Being<br>Treated" }, # Trauma pathway {'event': 'TRAUMA_stabilisation_wait_begins', 'x': 300, 'y': 560, 'label': "Waiting for<br>Stabilisation" }, {'event': 'TRAUMA_stabilisation_begins', 'x': 300, 'y': 500, 'resource':'n_trauma', 'label': "Being<br>Stabilised" }, {'event': 'TRAUMA_treatment_wait_begins', 'x': 630, 'y': 560, 'label': "Waiting for<br>Treatment" }, {'event': 'TRAUMA_treatment_begins', 'x': 630, 'y': 500, 'resource':'n_cubicles_2', 'label': "Being<br>Treated" }, {'event': 'exit', 'x': 670, 'y': 330, 'label': "Exit"} ]) # st.dataframe(animation_dfs_log) st.markdown( """ The plot below shows a snapshot every 5 minutes of the position of everyone in our emergency department model. The buttons to the left of the slider below the plot can be used to start and stop the animation. Clicking on the bar below the plot and dragging your cursor to the left or right allows you to rapidly jump through to a different time in the simulation. Only the first replication of the simulation is shown. """ )
''' A Streamlit application based on Monks and Allows users to interact with an increasingly more complex treatment simulation ''' st.set_page_config( page_title="The Full Model", layout="wide", initial_sidebar_state="expanded", ) # Initialise session state if 'session_results' not in st.session_state: st.session_state['session_results'] = [] add_logo() center_running() with open("style.css") as css: st.markdown( f'<style>{css.read()}</style>' , unsafe_allow_html= True) ## We add in a title for our web app's page st.title("Discrete Event Simulation Playground") st.subheader("How can we optimise the full system?") st.markdown("Once you have run more than one scenario, try out the new tab 'compare scenario outputs'.") gc.collect() # tab1, tab2, tab3, tab4 = st.tabs(["Introduction", "Exercises", "Playground", "Compare Scenario Outputs"]) tab1, tab2, tab3, tab4 = st.tabs(["Playground", "Exercise", "Compare Scenario Outputs", "Information"]) with tab4: st.markdown(""" So now we have explored every component of the model: - Generating arrivals - Generating and using resources - Sending people down different paths So now let's create a version of the model that uses all of these aspects. For now, we won't consider nurses separately - we will assume that each nurse on shift has one room that is theirs to always use. """ ) mermaid(height=600, code= """ %%{ init: { 'flowchart': { 'curve': 'step' } } }%% %%{ init: { 'theme': 'base', 'themeVariables': {'lineColor': '#b4b4b4'} } }%% flowchart LR A[Arrival] --> BX[Triage] BX -.-> T([Triage Bay\n<b>RESOURCE</b>]) T -.-> BX BX --> BY{Trauma or non-trauma} BY ----> B1{Trauma Pathway} BY ----> B2{Non-Trauma Pathway} B1 --> C[Stabilisation] C --> E[Treatment] B2 --> D[Registration] D --> G[Examination] G --> H[Treat?] H ----> F H --> I[Non-Trauma Treatment] I --> F C -.-> Z([Trauma Room\n<b>RESOURCE</b>]) Z -.-> C E -.-> Y([Cubicle - 1\n<b>RESOURCE</b>]) Y -.-> E D -.-> X([Clerks\n<b>RESOURCE</b>]) X -.-> D G -.-> W([Exam Room\n<b>RESOURCE</b>]) W -.-> G I -.-> V([Cubicle - 2\n<b>RESOURCE</b>]) V -.-> I E ----> F[Discharge] classDef ZZ1 fill:#8B5E0F,font-family:lexend, color:#FFF classDef ZZ2 fill:#5DFDA0,font-family:lexend classDef ZZ2a fill:#02CD55,font-family:lexend, color:#FFF classDef ZZ3 fill: #D45E5E,font-family:lexend classDef ZZ3a fill: #932727,font-family:lexend, color:#FFF classDef ZZ4 fill: #611D67,font-family:lexend, color:#FFF classDef ZZ5 fill:#47D7FF,font-family:lexend classDef ZZ5a fill:#00AADA,font-family:lexend class A ZZ1 class C,E ZZ2 class D,G ZZ3 class X,W ZZ3a class Z,Y ZZ2a class I,V ZZ4 class BX ZZ5 class T ZZ5a ; """ ) with tab2: st.header("Things to Try") st.markdown( """ - First, just run the model with the default settings. - Look at the graphs and animated patient log. What is the performance of the system like? - Are the queues consistent throughout the day? --- - Due to building work taking place, the hospital will temporarily need to close several bays. It will be possible to have a maximum of 20 bays/cubicles/rooms in total across the whole system. - What is the best configuration you can find to keep the average wait times as low as possible across both trauma and non-trauma pathways? *Make sure you are using the default probabilities for trauma/non-trauma patients (0.3) and treatment of non-trauma patients (0.7)* """ ) with tab1: # n_triage: int # The number of triage cubicles # n_reg: int # The number of registration clerks # n_exam: int # The number of examination rooms # n_trauma: int # The number of trauma bays for stablisation # n_cubicles_1: int # The number of non-trauma treatment cubicles # n_cubicles_2: int # The number of trauma treatment cubicles # non_trauma_treat_p: float # Probability non trauma patient requires treatment # prob_trauma: float # probability that a new arrival is a trauma patient. col1, col2, col3, col4 = st.columns(4) with col1: st.subheader("Triage") n_triage = st.slider("👨‍⚕️👩‍⚕️ Number of Triage Cubicles", 1, 10, step=1, value=4) prob_trauma = st.slider("🚑 Probability that a new arrival is a trauma patient", 0.0, 1.0, step=0.01, value=0.3, help="0 = No arrivals are trauma patients\n\n1 = All arrivals are trauma patients") with col2: st.subheader("Trauma Pathway") n_trauma = st.slider("👨‍⚕️👩‍⚕️ Number of Trauma Bays for Stabilisation", 1, 10, step=1, value=6) n_cubicles_2 = st.slider("👨‍⚕️👩‍⚕️ Number of Treatment Cubicles for Trauma", 1, 10, step=1, value=6) with col3: st.subheader("Non-Trauma Pathway") n_reg = st.slider("👨‍⚕️👩‍⚕️ Number of Registration Cubicles", 1, 10, step=1, value=3) n_exam = st.slider("👨‍⚕️👩‍⚕️ Number of Examination Rooms for non-trauma patients", 1, 10, step=1, value=3) with col4: st.subheader("Non-Trauma Treatment") n_cubicles_1 = st.slider("👨‍⚕️👩‍⚕️ Number of Treatment Cubicles for Non-Trauma", 1, 10, step=1, value=2) non_trauma_treat_p = st.slider("🤕 Probability that a non-trauma patient will need treatment", 0.0, 1.0, step=0.01, value=0.7, help="0 = No non-trauma patients need treatment\n\n1 = All non-trauma patients need treatment") col5, col6 = st.columns(2) with col5: st.write("Total rooms in use is {}".format(n_cubicles_1+n_cubicles_2+n_exam+n_trauma+n_triage+n_reg)) with col6: with st.expander("Advanced Parameters"): seed = st.slider("🎲 Set a random number for the computer to start from", 1, 1000, step=1, value=42) n_reps = st.slider("🔁 How many times should the simulation run? WARNING: Fast/modern computer required to take this above 5 replications.", 1, 10, step=1, value=3) run_time_days = st.slider("🗓️ How many days should we run the simulation for each time?", 1, 60, step=1, value=5) args = Scenario( random_number_set=seed, n_triage=n_triage, n_reg=n_reg, n_exam=n_exam, n_trauma=n_trauma, n_cubicles_1=n_cubicles_1, n_cubicles_2=n_cubicles_2, non_trauma_treat_p=non_trauma_treat_p, prob_trauma=prob_trauma) # A user must press a streamlit button to run the model button_run_pressed = st.button("Run simulation") if button_run_pressed: # add a spinner and then display success box with st.spinner('Simulating the minor injuries unit...'): await asyncio.sleep(0.1) my_bar = st.progress(0, text="Simulating the minor injuries unit...") # run multiple replications of experment detailed_outputs = multiple_replications( args, n_reps=n_reps, rc_period=run_time_days*60*24, return_detailed_logs=True ) my_bar.progress(40, text="Collating Simulation Outputs...") results = pd.concat([detailed_outputs[i]['results']['summary_df'].assign(rep= i+1) for i in range(n_reps)]).set_index('rep') full_event_log = pd.concat([detailed_outputs[i]['results']['full_event_log'].assign(rep= i+1) for i in range(n_reps)]) del detailed_outputs gc.collect() my_bar.progress(60, text="Logging Results...") # print(len(st.session_state['session_results'])) # results_for_state = pd.DataFrame(results.median()).T.drop(['Rep'], axis=1) results_for_state = results original_cols = results_for_state.columns.values results_for_state['Triage\nCubicles'] = args.n_triage results_for_state['Registration\nClerks'] = args.n_reg results_for_state['Examination\nRooms'] = args.n_exam results_for_state['Non-Trauma\nTreatment Cubicles'] = args.n_cubicles_1 results_for_state['Trauma\nStabilisation Bays'] = args.n_trauma results_for_state['Trauma\nTreatment Cubicles'] = args.n_cubicles_2 results_for_state['Probability patient\nis a trauma patient'] = args.prob_trauma results_for_state['Probability non-trauma patients\nrequire treatment'] = args.non_trauma_treat_p results_for_state['Model Run'] = len(st.session_state['session_results']) + 1 results_for_state['Random Seed'] = seed # Reorder columns column_order = ['Model Run', 'Triage\nCubicles', 'Registration\nClerks', 'Examination\nRooms', 'Non-Trauma\nTreatment Cubicles', 'Trauma\nStabilisation Bays', 'Trauma\nTreatment Cubicles', 'Probability patient\nis a trauma patient', 'Probability non-trauma patients\nrequire treatment', 'Random Seed' ] + list(original_cols) results_for_state = results_for_state[column_order] current_state = st.session_state['session_results'] current_state.append(results_for_state) del results_for_state gc.collect() st.session_state['session_results'] = current_state del current_state gc.collect() # print(len(st.session_state['session_results'])) # UTILISATION AUDIT - BRING BACK WHEN NEEDED # full_utilisation_audit = pd.concat([detailed_outputs[i]['results']['utilisation_audit'].assign(Rep= i+1) # for i in range(n_reps)]) # animation_dfs_queue = reshape_for_animations( # full_event_log[ # (full_event_log['rep']==1) & # ((full_event_log['event_type']=='queue') | (full_event_log['event_type']=='arrival_departure')) # ] # ) my_bar.progress(80, text="Creating Animations...") animation_dfs_log = reshape_for_animations( full_event_log=full_event_log[ (full_event_log['rep']==1) & ((full_event_log['event_type']=='queue') | (full_event_log['event_type']=='resource_use') | (full_event_log['event_type']=='arrival_departure')) & # Limit to first 5 days (full_event_log['time'] <= 60*24*5) ], every_x_minutes=5 )['full_patient_df'] del full_event_log gc.collect() my_bar.progress(100, text="Simulation Complete!") # st.write(results.reset_index()) # st.write(pd.wide_to_long(results, stubnames=['util', 'wait'], # i="rep", j="metric_type", # sep='_', suffix='.*')) # st.write(results.reset_index().melt(id_vars="rep").set_index('variable').filter(like="util", axis=0)) # Add in a box plot showing utilisation tab_playground_results_1, tab_playground_results_2, tab_playground_results_3 = st.tabs([ "Simple Graphs", "Animated Log", "Advanced Graphs" ]) # st.markdown(""" # You can click on the three tabs below ("Simple Graphs", "Animated Log", and "Advanced Graphs") to view different outputs from the model. # """) # st.subheader("Look at Average Results Across Replications") with tab_playground_results_2: event_position_df = pd.DataFrame([ # {'event': 'arrival', 'x': 10, 'y': 250, 'label': "Arrival" }, # Triage - minor and trauma {'event': 'triage_wait_begins', 'x': 160, 'y': 400, 'label': "Waiting for<br>Triage" }, {'event': 'triage_begins', 'x': 160, 'y': 315, 'resource':'n_triage', 'label': "Being Triaged" }, # Minors (non-trauma) pathway {'event': 'MINORS_registration_wait_begins', 'x': 300, 'y': 145, 'label': "Waiting for<br>Registration" }, {'event': 'MINORS_registration_begins', 'x': 300, 'y': 85, 'resource':'n_reg', 'label':'Being<br>Registered' }, {'event': 'MINORS_examination_wait_begins', 'x': 465, 'y': 145, 'label': "Waiting for<br>Examination" }, {'event': 'MINORS_examination_begins', 'x': 465, 'y': 85, 'resource':'n_exam', 'label': "Being<br>Examined" }, {'event': 'MINORS_treatment_wait_begins', 'x': 630, 'y': 145, 'label': "Waiting for<br>Treatment" }, {'event': 'MINORS_treatment_begins', 'x': 630, 'y': 85, 'resource':'n_cubicles_1', 'label': "Being<br>Treated" }, # Trauma pathway {'event': 'TRAUMA_stabilisation_wait_begins', 'x': 300, 'y': 560, 'label': "Waiting for<br>Stabilisation" }, {'event': 'TRAUMA_stabilisation_begins', 'x': 300, 'y': 500, 'resource':'n_trauma', 'label': "Being<br>Stabilised" }, {'event': 'TRAUMA_treatment_wait_begins', 'x': 630, 'y': 560, 'label': "Waiting for<br>Treatment" }, {'event': 'TRAUMA_treatment_begins', 'x': 630, 'y': 500, 'resource':'n_cubicles_2', 'label': "Being<br>Treated" }, {'event': 'exit', 'x': 670, 'y': 330, 'label': "Exit"} ]) # st.dataframe(animation_dfs_log) st.markdown( """ The plot below shows a snapshot every 5 minutes of the position of everyone in our emergency department model. The buttons to the left of the slider below the plot can be used to start and stop the animation. Clicking on the bar below the plot and dragging your cursor to the left or right allows you to rapidly jump through to a different time in the simulation. Only the first replication of the simulation is shown. """ )
animated_plot = animate_activity_log(
6
2023-10-26 09:57:52+00:00
16k
hyperspy/exspy
exspy/signals/eds_sem.py
[ { "identifier": "EDSSpectrum", "path": "exspy/signals/eds.py", "snippet": "class EDSSpectrum(Signal1D):\n \"\"\"General signal class for EDS spectra.\"\"\"\n\n _signal_type = \"EDS\"\n\n def __init__(self, *args, **kwards):\n super().__init__(*args, **kwards)\n if self.metadata.Signal.signal_type == \"EDS\":\n warnings.warn(\n \"The microscope type is not set. Use \"\n \"set_signal_type('EDS_TEM') \"\n \"or set_signal_type('EDS_SEM')\"\n )\n self.axes_manager.signal_axes[0].is_binned = True\n self._xray_markers = {}\n\n def _get_line_energy(self, Xray_line, FWHM_MnKa=None):\n \"\"\"\n Get the line energy and the energy resolution of a Xray line.\n\n The return values are in the same units than the signal axis\n\n Parameters\n ----------\n Xray_line : strings\n Valid element X-ray lines e.g. Fe_Kb\n FWHM_MnKa: {None, float, 'auto'}\n The energy resolution of the detector in eV\n if 'auto', used the one in\n 'self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa'\n\n Returns\n -------\n float: the line energy, if FWHM_MnKa is None\n (float,float): the line energy and the energy resolution, if FWHM_MnKa\n is not None\n \"\"\"\n\n units_name = self.axes_manager.signal_axes[0].units\n\n if FWHM_MnKa == \"auto\":\n if self.metadata.Signal.signal_type == \"EDS_SEM\":\n FWHM_MnKa = (\n self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa\n )\n elif self.metadata.Signal.signal_type == \"EDS_TEM\":\n FWHM_MnKa = (\n self.metadata.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa\n )\n else:\n raise NotImplementedError(\n \"This method only works for EDS_TEM or EDS_SEM signals. \"\n \"You can use `set_signal_type('EDS_TEM')` or\"\n \"`set_signal_type('EDS_SEM')` to convert to one of these\"\n \"signal types.\"\n )\n line_energy = utils_eds._get_energy_xray_line(Xray_line)\n if units_name == \"eV\":\n line_energy *= 1000\n if FWHM_MnKa is not None:\n line_FWHM = (\n utils_eds.get_FWHM_at_Energy(FWHM_MnKa, line_energy / 1000) * 1000\n )\n elif units_name == \"keV\":\n if FWHM_MnKa is not None:\n line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa, line_energy)\n else:\n raise ValueError(\n f\"{units_name} is not a valid units for the energy axis. \"\n \"Only `eV` and `keV` are supported. \"\n \"If `s` is the variable containing this EDS spectrum:\\n \"\n \">>> s.axes_manager.signal_axes[0].units = 'keV' \\n\"\n )\n if FWHM_MnKa is None:\n return line_energy\n else:\n return line_energy, line_FWHM\n\n def _get_beam_energy(self):\n \"\"\"\n Get the beam energy.\n\n The return value is in the same units than the signal axis\n \"\"\"\n\n if \"Acquisition_instrument.SEM.beam_energy\" in self.metadata:\n beam_energy = self.metadata.Acquisition_instrument.SEM.beam_energy\n elif \"Acquisition_instrument.TEM.beam_energy\" in self.metadata:\n beam_energy = self.metadata.Acquisition_instrument.TEM.beam_energy\n else:\n raise AttributeError(\n \"The beam energy is not defined in `metadata`. \"\n \"Use `set_microscope_parameters` to set it.\"\n )\n\n units_name = self.axes_manager.signal_axes[0].units\n\n if units_name == \"eV\":\n beam_energy *= 1000\n return beam_energy\n\n def _get_xray_lines_in_spectral_range(self, xray_lines):\n \"\"\"\n Return the lines in the energy range\n\n Parameters\n ----------\n xray_lines: List of string\n The xray_lines\n\n Return\n ------\n The list of xray_lines in the energy range\n \"\"\"\n ax = self.axes_manager.signal_axes[0]\n low_value = ax.low_value\n high_value = ax.high_value\n try:\n if self._get_beam_energy() < high_value:\n high_value = self._get_beam_energy()\n except AttributeError:\n # in case the beam energy is not defined in the metadata\n pass\n xray_lines_in_range = []\n xray_lines_not_in_range = []\n for xray_line in xray_lines:\n line_energy = self._get_line_energy(xray_line)\n if low_value < line_energy < high_value:\n xray_lines_in_range.append(xray_line)\n else:\n xray_lines_not_in_range.append(xray_line)\n return xray_lines_in_range, xray_lines_not_in_range\n\n def sum(self, axis=None, out=None, rechunk=False):\n if axis is None:\n axis = self.axes_manager.navigation_axes\n s = super().sum(axis=axis, out=out, rechunk=rechunk)\n s = out or s\n\n # Update live time by the change in navigation axes dimensions\n time_factor = np.prod(\n [ax.size for ax in self.axes_manager.navigation_axes]\n ) / np.prod([ax.size for ax in s.axes_manager.navigation_axes])\n aimd = s.metadata.get_item(\"Acquisition_instrument\", None)\n if aimd is not None:\n aimd = s.metadata.Acquisition_instrument\n if \"SEM.Detector.EDS.live_time\" in aimd:\n aimd.SEM.Detector.EDS.live_time *= time_factor\n elif \"TEM.Detector.EDS.live_time\" in aimd:\n aimd.TEM.Detector.EDS.live_time *= time_factor\n else:\n _logger.info(\n \"Live_time could not be found in the metadata and \"\n \"has not been updated.\"\n )\n\n if out is None:\n return s\n\n sum.__doc__ = Signal1D.sum.__doc__\n\n def rebin(self, new_shape=None, scale=None, crop=True, dtype=None, out=None):\n factors = self._validate_rebin_args_and_get_factors(\n new_shape=new_shape,\n scale=scale,\n )\n m = super().rebin(\n new_shape=new_shape, scale=scale, crop=crop, dtype=dtype, out=out\n )\n m = out or m\n time_factor = np.prod(\n [factors[axis.index_in_array] for axis in m.axes_manager.navigation_axes]\n )\n aimd = m.metadata.Acquisition_instrument\n if \"Acquisition_instrument.SEM.Detector.EDS.real_time\" in m.metadata:\n aimd.SEM.Detector.EDS.real_time *= time_factor\n elif \"Acquisition_instrument.TEM.Detector.EDS.real_time\" in m.metadata:\n aimd.TEM.Detector.EDS.real_time *= time_factor\n else:\n _logger.info(\n \"real_time could not be found in the metadata and has not been updated.\"\n )\n if \"Acquisition_instrument.SEM.Detector.EDS.live_time\" in m.metadata:\n aimd.SEM.Detector.EDS.live_time *= time_factor\n elif \"Acquisition_instrument.TEM.Detector.EDS.live_time\" in m.metadata:\n aimd.TEM.Detector.EDS.live_time *= time_factor\n else:\n _logger.info(\n \"Live_time could not be found in the metadata and has not been updated.\"\n )\n\n if out is None:\n return m\n else:\n out.events.data_changed.trigger(obj=out)\n return m\n\n rebin.__doc__ = BaseSignal.rebin.__doc__\n\n def set_elements(self, elements):\n \"\"\"Erase all elements and set them.\n\n Parameters\n ----------\n elements : list of strings\n A list of chemical element symbols.\n\n See also\n --------\n add_elements, set_lines, add_lines\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> print(s.metadata.Sample.elements)\n >>> s.set_elements(['Al'])\n >>> print(s.metadata.Sample.elements)\n ['Al' 'C' 'Cu' 'Mn' 'Zr']\n ['Al']\n\n \"\"\"\n # Erase previous elements and X-ray lines\n if \"Sample.elements\" in self.metadata:\n del self.metadata.Sample.elements\n self.add_elements(elements)\n\n def add_elements(self, elements):\n \"\"\"Add elements and the corresponding X-ray lines.\n\n The list of elements is stored in `metadata.Sample.elements`\n\n Parameters\n ----------\n elements : list of strings\n The symbol of the elements.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> print(s.metadata.Sample.elements)\n >>> s.add_elements(['Ar'])\n >>> print(s.metadata.Sample.elements)\n ['Al' 'C' 'Cu' 'Mn' 'Zr']\n ['Al', 'Ar', 'C', 'Cu', 'Mn', 'Zr']\n\n See also\n --------\n set_elements, add_lines, set_lines\n\n \"\"\"\n if not isiterable(elements) or isinstance(elements, str):\n raise ValueError(\n \"Input must be in the form of a list. For example, \"\n \"if `s` is the variable containing this EDS spectrum:\\n \"\n \">>> s.add_elements(('C',))\\n\"\n \"See the docstring for more information.\"\n )\n if \"Sample.elements\" in self.metadata:\n elements_ = set(self.metadata.Sample.elements)\n else:\n elements_ = set()\n for element in elements:\n if element in elements_db:\n elements_.add(element)\n else:\n raise ValueError(f\"{element} is not a valid chemical element symbol.\")\n self.metadata.set_item(\"Sample.elements\", sorted(list(elements_)))\n\n def _get_xray_lines(self, xray_lines=None, only_one=None, only_lines=(\"a\",)):\n if xray_lines is None:\n if \"Sample.xray_lines\" in self.metadata:\n xray_lines = self.metadata.Sample.xray_lines\n elif \"Sample.elements\" in self.metadata:\n xray_lines = self._get_lines_from_elements(\n self.metadata.Sample.elements,\n only_one=only_one,\n only_lines=only_lines,\n )\n else:\n raise ValueError(\"Not X-ray line, set them with `add_elements`.\")\n return xray_lines\n\n def set_lines(self, lines, only_one=True, only_lines=(\"a\",)):\n \"\"\"Erase all Xrays lines and set them.\n\n See add_lines for details.\n\n Parameters\n ----------\n lines : list of strings\n A list of valid element X-ray lines to add e.g. Fe_Kb.\n Additionally, if `metadata.Sample.elements` is\n defined, add the lines of those elements that where not\n given in this list.\n only_one: bool\n If False, add all the lines of each element in\n `metadata.Sample.elements` that has not line\n defined in lines. If True (default),\n only add the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, only the given lines will be added.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n >>> s.set_lines(['Cu_Ka'])\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n ['Al_Ka', 'C_Ka', 'Cu_Ka', 'Mn_La', 'Zr_La']\n\n See also\n --------\n add_lines, add_elements, set_elements\n\n \"\"\"\n only_lines = utils_eds._parse_only_lines(only_lines)\n if \"Sample.xray_lines\" in self.metadata:\n del self.metadata.Sample.xray_lines\n self.add_lines(lines=lines, only_one=only_one, only_lines=only_lines)\n\n def add_lines(self, lines=(), only_one=True, only_lines=(\"a\",)):\n \"\"\"Add X-rays lines to the internal list.\n\n Although most functions do not require an internal list of\n X-ray lines because they can be calculated from the internal\n list of elements, ocassionally it might be useful to customize the\n X-ray lines to be use by all functions by default using this method.\n The list of X-ray lines is stored in\n `metadata.Sample.xray_lines`\n\n Parameters\n ----------\n lines : list of strings\n A list of valid element X-ray lines to add e.g. Fe_Kb.\n Additionally, if `metadata.Sample.elements` is\n defined, add the lines of those elements that where not\n given in this list. If the list is empty (default), and\n `metadata.Sample.elements` is\n defined, add the lines of all those elements.\n only_one: bool\n If False, add all the lines of each element in\n `metadata.Sample.elements` that has not line\n defined in lines. If True (default),\n only add the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, only the given lines will be added.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.set_microscope_parameters(beam_energy=30)\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_Ka', 'Mn_Ka', 'Zr_La']\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n >>> s.add_lines(['Cu_Ka'])\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n ['Al_Ka', 'C_Ka', 'Cu_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n\n See also\n --------\n set_lines, add_elements, set_elements\n\n \"\"\"\n only_lines = utils_eds._parse_only_lines(only_lines)\n if \"Sample.xray_lines\" in self.metadata:\n xray_lines = set(self.metadata.Sample.xray_lines)\n else:\n xray_lines = set()\n # Define the elements which Xray lines has been customized\n # So that we don't attempt to add new lines automatically\n elements = set()\n for line in xray_lines:\n elements.add(line.split(\"_\")[0])\n for line in lines:\n try:\n element, subshell = line.split(\"_\")\n except ValueError:\n raise ValueError(\n \"Invalid line symbol. \"\n \"Please provide a valid line symbol e.g. Fe_Ka\"\n )\n if element in elements_db:\n elements.add(element)\n if subshell in elements_db[element][\"Atomic_properties\"][\"Xray_lines\"]:\n lines_len = len(xray_lines)\n xray_lines.add(line)\n if lines_len != len(xray_lines):\n _logger.info(f\"{line} line added,\")\n else:\n _logger.info(f\"{line} line already in.\")\n else:\n raise ValueError(f\"{line} is not a valid line of {element}.\")\n else:\n raise ValueError(f\"{element} is not a valid symbol of an element.\")\n xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines)[1]\n for xray in xray_not_here:\n warnings.warn(f\"{xray} is not in the data energy range.\", UserWarning)\n if \"Sample.elements\" in self.metadata:\n extra_elements = set(self.metadata.Sample.elements) - elements\n if extra_elements:\n new_lines = self._get_lines_from_elements(\n extra_elements, only_one=only_one, only_lines=only_lines\n )\n if new_lines:\n self.add_lines(list(new_lines) + list(lines))\n self.add_elements(elements)\n if not hasattr(self.metadata, \"Sample\"):\n self.metadata.add_node(\"Sample\")\n if \"Sample.xray_lines\" in self.metadata:\n xray_lines = xray_lines.union(self.metadata.Sample.xray_lines)\n self.metadata.Sample.xray_lines = sorted(list(xray_lines))\n\n def _get_lines_from_elements(self, elements, only_one=False, only_lines=(\"a\",)):\n \"\"\"Returns the X-ray lines of the given elements in spectral range\n of the data.\n\n Parameters\n ----------\n elements : list of strings\n A list containing the symbol of the chemical elements.\n only_one : bool\n If False, add all the lines of each element in the data spectral\n range. If True only add the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, only the given lines will be returned.\n\n Returns\n -------\n list of X-ray lines alphabetically sorted\n\n \"\"\"\n\n only_lines = utils_eds._parse_only_lines(only_lines)\n try:\n beam_energy = self._get_beam_energy()\n except BaseException:\n # Fall back to the high_value of the energy axis\n beam_energy = self.axes_manager.signal_axes[0].high_value\n lines = []\n elements = [el if isinstance(el, str) else el.decode() for el in elements]\n for element in elements:\n # Possible line (existing and excited by electron)\n element_lines = []\n for subshell in list(\n elements_db[element][\"Atomic_properties\"][\"Xray_lines\"].keys()\n ):\n if only_lines and subshell not in only_lines:\n continue\n element_lines.append(element + \"_\" + subshell)\n element_lines = self._get_xray_lines_in_spectral_range(element_lines)[0]\n if only_one and element_lines:\n # Choose the best line\n select_this = -1\n element_lines.sort()\n for i, line in enumerate(element_lines):\n if self._get_line_energy(line) < beam_energy / 2:\n select_this = i\n break\n element_lines = [\n element_lines[select_this],\n ]\n\n if not element_lines:\n _logger.info(\n f\"There is no X-ray line for element {element} \"\n \"in the data spectral range\"\n )\n else:\n lines.extend(element_lines)\n lines.sort()\n return lines\n\n def _parse_xray_lines(self, xray_lines, only_one, only_lines):\n only_lines = utils_eds._parse_only_lines(only_lines)\n xray_lines = self._get_xray_lines(\n xray_lines, only_one=only_one, only_lines=only_lines\n )\n xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines)\n for xray in xray_not_here:\n warnings.warn(\n f\"{xray} is not in the data energy range. \"\n \"You can remove it with: \"\n f\"`s.metadata.Sample.xray_lines.remove('{xray}')`\"\n )\n return xray_lines\n\n def get_lines_intensity(\n self,\n xray_lines=None,\n integration_windows=2.0,\n background_windows=None,\n plot_result=False,\n only_one=True,\n only_lines=(\"a\",),\n **kwargs,\n ):\n \"\"\"Return the intensity map of selected Xray lines.\n\n The intensities, the number of X-ray counts, are computed by\n suming the spectrum over the\n different X-ray lines. The sum window width\n is calculated from the energy resolution of the detector\n as defined in 'energy_resolution_MnKa' of the metadata.\n Backgrounds average in provided windows can be subtracted from the\n intensities.\n\n Parameters\n ----------\n xray_lines: {None, Iterable* of strings}\n If None,\n if `metadata.Sample.elements.xray_lines` contains a\n list of lines use those.\n If `metadata.Sample.elements.xray_lines` is undefined\n or empty but `metadata.Sample.elements` is defined,\n use the same syntax as `add_line` to select a subset of lines\n for the operation.\n Alternatively, provide an iterable containing\n a list of valid X-ray lines symbols.\n * Note that while dictionaries and strings are iterable,\n their use is ambiguous and specifically not allowed.\n integration_windows: Float or array\n If float, the width of the integration windows is the\n 'integration_windows_width' times the calculated FWHM of the line.\n Else provide an array for which each row corresponds to a X-ray\n line. Each row contains the left and right value of the window.\n background_windows: None or 2D array of float\n If None, no background subtraction. Else, the backgrounds average\n in the windows are subtracted from the return intensities.\n 'background_windows' provides the position of the windows in\n energy. Each line corresponds to a X-ray line. In a line, the two\n first values correspond to the limits of the left window and the\n two last values correspond to the limits of the right window.\n plot_result : bool\n If True, plot the calculated line intensities. If the current\n object is a single spectrum it prints the result instead.\n only_one : bool\n If False, use all the lines of each element in the data spectral\n range. If True use only the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, use only the given lines.\n kwargs\n The extra keyword arguments for plotting. See\n `utils.plot.plot_signals`\n\n Returns\n -------\n intensities : list\n A list containing the intensities as BaseSignal subclasses.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.get_lines_intensity(['Mn_Ka'], plot_result=True)\n Mn_La at 0.63316 keV : Intensity = 96700.00\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot(['Mn_Ka'], integration_windows=2.1)\n >>> s.get_lines_intensity(['Mn_Ka'],\n >>> integration_windows=2.1, plot_result=True)\n Mn_Ka at 5.8987 keV : Intensity = 53597.00\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.set_elements(['Mn'])\n >>> s.set_lines(['Mn_Ka'])\n >>> bw = s.estimate_background_windows()\n >>> s.plot(background_windows=bw)\n >>> s.get_lines_intensity(background_windows=bw, plot_result=True)\n Mn_Ka at 5.8987 keV : Intensity = 46716.00\n\n See also\n --------\n set_elements, add_elements, estimate_background_windows,\n plot\n\n \"\"\"\n if xray_lines is not None and (\n not isinstance(xray_lines, Iterable) or isinstance(xray_lines, (str, dict))\n ):\n raise TypeError(\n \"xray_lines must be a compatible iterable, but was \"\n f\"mistakenly provided as a {type(xray_lines)}.\"\n )\n\n xray_lines = self._parse_xray_lines(xray_lines, only_one, only_lines)\n if hasattr(integration_windows, \"__iter__\") is False:\n integration_windows = self.estimate_integration_windows(\n windows_width=integration_windows, xray_lines=xray_lines\n )\n intensities = []\n ax = self.axes_manager.signal_axes[0]\n # test Signal1D (0D problem)\n # signal_to_index = self.axes_manager.navigation_dimension - 2\n for i, (Xray_line, window) in enumerate(zip(xray_lines, integration_windows)):\n element, line = utils_eds._get_element_and_line(Xray_line)\n line_energy = self._get_line_energy(Xray_line)\n # Replace with `map` function for lazy large datasets\n img = self.isig[window[0] : window[1]].integrate1D(\n -1\n ) # integrate over window.\n if np.issubdtype(img.data.dtype, np.integer):\n # The operations below require a float dtype with the default\n # numpy casting rule ('same_kind')\n img.change_dtype(\"float\")\n if background_windows is not None:\n bw = background_windows[i]\n # TODO: test to prevent slicing bug. To be reomved when fixed\n indexes = [float(ax.value2index(de)) for de in list(bw) + window]\n if indexes[0] == indexes[1]:\n bck1 = self.isig[bw[0]]\n else:\n bck1 = self.isig[bw[0] : bw[1]].integrate1D(-1)\n if indexes[2] == indexes[3]:\n bck2 = self.isig[bw[2]]\n else:\n bck2 = self.isig[bw[2] : bw[3]].integrate1D(-1)\n corr_factor = (indexes[5] - indexes[4]) / (\n (indexes[1] - indexes[0]) + (indexes[3] - indexes[2])\n )\n img = img - (bck1 + bck2) * corr_factor\n img.metadata.General.title = (\n f\"X-ray line intensity of {self.metadata.General.title}: \"\n f\"{Xray_line} at {line_energy:.2f} \"\n f\"{self.axes_manager.signal_axes[0].units}\"\n )\n img = img.transpose(signal_axes=[])\n if plot_result and img.axes_manager.navigation_size == 1:\n if img._lazy:\n img.compute()\n print(\n f\"{Xray_line} at {line_energy} {ax.units} : \"\n f\"Intensity = {img.data[0]:.2f}\"\n )\n img.metadata.set_item(\"Sample.elements\", ([element]))\n img.metadata.set_item(\"Sample.xray_lines\", ([Xray_line]))\n intensities.append(img)\n if plot_result and img.axes_manager.navigation_size != 1:\n utils.plot.plot_signals(intensities, **kwargs)\n return intensities\n\n def get_take_off_angle(self):\n \"\"\"Calculate the take-off-angle (TOA).\n\n TOA is the angle with which the X-rays leave the surface towards\n the detector. Parameters are read in 'SEM.Stage.tilt_alpha',\n 'Acquisition_instrument.SEM.Detector.EDS.azimuth_angle' and\n 'SEM.Detector.EDS.elevation_angle' and 'SEM.Stage.tilt_beta in\n 'metadata'.\n\n Returns\n -------\n take_off_angle: float\n in Degree\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.get_take_off_angle()\n 37.0\n >>> s.set_microscope_parameters(tilt_stage=20.)\n >>> s.get_take_off_angle()\n 57.0\n\n See also\n --------\n hs.eds.take_off_angle\n \"\"\"\n if self.metadata.Signal.signal_type == \"EDS_SEM\":\n mp = self.metadata.Acquisition_instrument.SEM\n elif self.metadata.Signal.signal_type == \"EDS_TEM\":\n mp = self.metadata.Acquisition_instrument.TEM\n\n tilt_stage = mp.get_item(\"Stage.tilt_alpha\", None)\n azimuth_angle = mp.get_item(\"Detector.EDS.azimuth_angle\", None)\n elevation_angle = mp.get_item(\"Detector.EDS.elevation_angle\", None)\n beta_tilt = mp.get_item(\"Stage.tilt_beta\", 0.0)\n\n return utils_eds.take_off_angle(\n tilt_stage, azimuth_angle, elevation_angle, beta_tilt\n )\n\n def estimate_integration_windows(self, windows_width=2.0, xray_lines=None):\n \"\"\"\n Estimate a window of integration for each X-ray line.\n\n Parameters\n ----------\n windows_width: float\n The width of the integration windows is the 'windows_width' times\n the calculated FWHM of the line.\n xray_lines: None or list of string\n If None, use 'metadata.Sample.elements.xray_lines'. Else,\n provide an iterable containing a list of valid X-ray lines\n symbols.\n\n Return\n ------\n integration_windows: 2D array of float\n The positions of the windows in energy. Each row corresponds to a\n X-ray line. Each row contains the left and right value of the\n window.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> iw = s.estimate_integration_windows()\n >>> s.plot(integration_windows=iw)\n >>> s.get_lines_intensity(integration_windows=iw, plot_result=True)\n Fe_Ka at 6.4039 keV : Intensity = 3710.00\n Pt_La at 9.4421 keV : Intensity = 15872.00\n\n See also\n --------\n plot, get_lines_intensity\n \"\"\"\n xray_lines = self._get_xray_lines(xray_lines)\n integration_windows = []\n for Xray_line in xray_lines:\n line_energy, line_FWHM = self._get_line_energy(Xray_line, FWHM_MnKa=\"auto\")\n element, line = utils_eds._get_element_and_line(Xray_line)\n det = windows_width * line_FWHM / 2.0\n integration_windows.append([line_energy - det, line_energy + det])\n return integration_windows\n\n def estimate_background_windows(\n self, line_width=[2, 2], windows_width=1, xray_lines=None\n ):\n \"\"\"\n Estimate two windows around each X-ray line containing only the\n background.\n\n Parameters\n ----------\n line_width: list of two floats\n The position of the two windows around the X-ray line is given by\n the `line_width` (left and right) times the calculated FWHM of the\n line.\n windows_width: float\n The width of the windows is is the `windows_width` times the\n calculated FWHM of the line.\n xray_lines: None or list of string\n If None, use `metadata.Sample.elements.xray_lines`. Else,\n provide an iterable containing a list of valid X-ray lines\n symbols.\n\n Return\n ------\n windows_position: 2D array of float\n The position of the windows in energy. Each line corresponds to a\n X-ray line. In a line, the two first values correspond to the\n limits of the left window and the two last values correspond to\n the limits of the right window.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> bw = s.estimate_background_windows(line_width=[5.0, 2.0])\n >>> s.plot(background_windows=bw)\n >>> s.get_lines_intensity(background_windows=bw, plot_result=True)\n Fe_Ka at 6.4039 keV : Intensity = 2754.00\n Pt_La at 9.4421 keV : Intensity = 15090.00\n\n See also\n --------\n plot, get_lines_intensity\n \"\"\"\n xray_lines = self._get_xray_lines(xray_lines)\n windows_position = []\n for xray_line in xray_lines:\n line_energy, line_FWHM = self._get_line_energy(xray_line, FWHM_MnKa=\"auto\")\n tmp = [\n line_energy - line_FWHM * line_width[0] - line_FWHM * windows_width,\n line_energy - line_FWHM * line_width[0],\n line_energy + line_FWHM * line_width[1],\n line_energy + line_FWHM * line_width[1] + line_FWHM * windows_width,\n ]\n windows_position.append(tmp)\n windows_position = np.array(windows_position)\n # merge ovelapping windows\n index = windows_position.argsort(axis=0)[:, 0]\n for i in range(len(index) - 1):\n ia, ib = index[i], index[i + 1]\n if windows_position[ia, 2] > windows_position[ib, 0]:\n interv = np.append(windows_position[ia, :2], windows_position[ib, 2:])\n windows_position[ia] = interv\n windows_position[ib] = interv\n return windows_position\n\n def plot(\n self,\n xray_lines=False,\n only_lines=(\"a\", \"b\"),\n only_one=False,\n background_windows=None,\n integration_windows=None,\n navigator=\"auto\",\n plot_markers=True,\n autoscale=\"v\",\n norm=\"auto\",\n axes_manager=None,\n navigator_kwds={},\n **kwargs,\n ):\n \"\"\"Plot the EDS spectrum. The following markers can be added\n\n - The position of the X-ray lines and their names.\n - The background windows associated with each X-ray lines. A black line\n links the left and right window with the average value in each window.\n\n Parameters\n ----------\n xray_lines: {False, True, 'from_elements', list of string}\n If not False, indicate the position and the name of the X-ray\n lines.\n If True, if `metadata.Sample.elements.xray_lines` contains a\n list of lines use those. If `metadata.Sample.elements.xray_lines`\n is undefined or empty or if xray_lines equals 'from_elements' and\n `metadata.Sample.elements` is defined, use the same syntax as\n `add_line` to select a subset of lines for the operation.\n Alternatively, provide an iterable containing a list of valid X-ray\n lines symbols.\n only_lines : None or list of strings\n If not None, use only the given lines (eg. ('a','Kb')).\n If None, use all lines.\n only_one : bool\n If False, use all the lines of each element in the data spectral\n range. If True use only the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n background_windows: None or 2D array of float\n If not None, add markers at the position of the windows in energy.\n Each line corresponds to a X-ray lines. In a line, the two first\n value corresponds to the limit of the left window and the two\n last values corresponds to the limit of the right window.\n integration_windows: None or 'auto' or float or 2D array of float\n If not None, add markers at the position of the integration\n windows.\n If 'auto' (or float), the width of the integration windows is 2.0\n (or float) times the calculated FWHM of the line. see\n 'estimate_integration_windows'.\n Else provide an array for which each row corresponds to a X-ray\n line. Each row contains the left and right value of the window.\n %s\n %s\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot()\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot(True)\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> bw = s.estimate_background_windows()\n >>> s.plot(background_windows=bw)\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot(['Mn_Ka'], integration_windows='auto')\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> bw = s.estimate_background_windows()\n >>> s.plot(background_windows=bw, integration_windows=2.1)\n\n See also\n --------\n set_elements, add_elements, estimate_integration_windows,\n get_lines_intensity, estimate_background_windows\n \"\"\"\n super().plot(\n navigator=navigator,\n plot_markers=plot_markers,\n autoscale=autoscale,\n norm=norm,\n axes_manager=axes_manager,\n navigator_kwds=navigator_kwds,\n **kwargs,\n )\n self._plot_xray_lines(\n xray_lines,\n only_lines,\n only_one,\n background_windows,\n integration_windows,\n render_figure=False,\n )\n self._render_figure(plot=[\"signal_plot\"])\n\n plot.__doc__ %= (BASE_PLOT_DOCSTRING_PARAMETERS, PLOT1D_DOCSTRING)\n\n def _plot_xray_lines(\n self,\n xray_lines=False,\n only_lines=(\"a\", \"b\"),\n only_one=False,\n background_windows=None,\n integration_windows=None,\n render_figure=True,\n ):\n if (\n xray_lines is not False\n or background_windows is not None\n or integration_windows is not None\n ):\n if xray_lines is False:\n xray_lines = True\n only_lines = utils_eds._parse_only_lines(only_lines)\n if xray_lines is True or xray_lines == \"from_elements\":\n if (\n \"Sample.xray_lines\" in self.metadata\n and xray_lines != \"from_elements\"\n ):\n xray_lines = self.metadata.Sample.xray_lines\n elif \"Sample.elements\" in self.metadata:\n xray_lines = self._get_lines_from_elements(\n self.metadata.Sample.elements,\n only_one=only_one,\n only_lines=only_lines,\n )\n else:\n _logger.warning(\"No elements defined, set them with `add_elements`\")\n # No X-rays lines, nothing to do then\n return\n\n xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(\n xray_lines\n )\n for xray in xray_not_here:\n _logger.warning(f\"{xray} is not in the data energy range.\")\n\n xray_lines = np.unique(xray_lines)\n\n self.add_xray_lines_markers(xray_lines, render_figure=False)\n if background_windows is not None:\n self._add_background_windows_markers(\n background_windows, render_figure=False\n )\n if integration_windows is not None:\n if integration_windows == \"auto\":\n integration_windows = 2.0\n if hasattr(integration_windows, \"__iter__\") is False:\n integration_windows = self.estimate_integration_windows(\n windows_width=integration_windows, xray_lines=xray_lines\n )\n self._add_vertical_lines_groups(\n integration_windows, linestyle=\"--\", render_figure=False\n )\n # Render figure only at the end\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _add_vertical_lines_groups(self, position, render_figure=True, **kwargs):\n \"\"\"\n Add vertical markers for each group that shares the color.\n\n Parameters\n ----------\n position: 2D array of float\n The position on the signal axis. Each row corresponds to a\n group.\n kwargs\n keywords argument for :py:class:`~.api.plot.markers.VerticalLine`\n \"\"\"\n colors = itertools.cycle(\n np.sort(plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"])\n )\n\n for x, color in zip(position, colors):\n line = VerticalLines(offsets=x, color=color, **kwargs)\n self.add_marker(line, render_figure=False)\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def add_xray_lines_markers(self, xray_lines, render_figure=True):\n \"\"\"\n Add marker on a spec.plot() with the name of the selected X-ray\n lines\n\n Parameters\n ----------\n xray_lines: list of string\n A valid list of X-ray lines\n \"\"\"\n if self._plot is None or not self._plot.is_active:\n raise RuntimeError(\"The signal needs to be plotted.\")\n norm = self._plot.signal_plot.ax_lines[0].norm\n minimum_intensity = self.data[self.data > 0].min() if norm == \"log\" else 0\n line_names = []\n segments = np.empty((len(xray_lines), 2, 2))\n offsets = np.empty((len(xray_lines), 2))\n # might want to set the intensity based on the alpha line intensity\n for i, xray_line in enumerate(xray_lines):\n element, line = utils_eds._get_element_and_line(xray_line)\n relative_factor = elements_db[element][\"Atomic_properties\"][\"Xray_lines\"][\n line\n ][\"weight\"]\n eng = self._get_line_energy(f\"{element}_{line}\")\n segments[i] = [[eng, 0], [eng, 1]]\n offsets[i] = [eng, 1]\n line_names.append(\n r\"$\\mathrm{%s}_{\\mathrm{%s}}$\"\n % utils_eds._get_element_and_line(xray_line)\n )\n\n line_markers = Lines(\n segments=segments,\n transform=\"relative\",\n color=\"black\",\n )\n text_markers = Texts(\n offsets=offsets,\n texts=line_names,\n offset_transform=\"relative\",\n rotation=np.pi / 2,\n horizontalalignment=\"left\",\n verticalalignment=\"bottom\",\n facecolor=\"black\",\n shift=0.005,\n )\n\n self.add_marker(line_markers, render_figure=False)\n self.add_marker(text_markers, render_figure=False)\n\n # Connect events to remove the markers when the line is closed\n line_markers.events.closed.connect(self._xray_marker_closed)\n text_markers.events.closed.connect(self._xray_marker_closed)\n self._xray_markers[\"lines\"] = line_markers\n self._xray_markers[\"texts\"] = text_markers\n self._xray_markers[\"names\"] = xray_lines\n\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _xray_marker_closed(self, obj):\n self._xray_markers = {}\n\n def remove_xray_lines_markers(self, xray_lines, render_figure=True):\n \"\"\"\n Remove marker previously added on a spec.plot() with the name of the\n selected X-ray lines\n\n Parameters\n ----------\n xray_lines: list of string\n A valid list of X-ray lines to remove\n render_figure: bool\n If True, render the figure after removing the markers\n \"\"\"\n ind = np.where(np.isin(self._xray_markers[\"names\"], xray_lines))\n self._xray_markers[\"lines\"].remove_items(ind)\n self._xray_markers[\"texts\"].remove_items(ind)\n self._xray_markers[\"names\"] = np.delete(self._xray_markers[\"names\"], ind)\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _add_background_windows_markers(self, windows_position, render_figure=True):\n \"\"\"\n Plot the background windows associated with each X-ray lines.\n\n For X-ray lines, a black line links the left and right window with the\n average value in each window.\n\n Parameters\n ----------\n windows_position: 2D array of float\n The position of the windows in energy. Each line corresponds to a\n X-ray lines. In a line, the two first value corresponds to the\n limit of the left window and the two last values corresponds to the\n limit of the right window.\n\n See also\n --------\n estimate_background_windows, get_lines_intensity\n \"\"\"\n self._add_vertical_lines_groups(windows_position)\n ax = self.axes_manager.signal_axes[0]\n segments = []\n for bw in windows_position:\n # TODO: test to prevent slicing bug. To be removed when fixed\n if ax.value2index(bw[0]) == ax.value2index(bw[1]):\n y1 = self.isig[bw[0]].data\n else:\n y1 = self.isig[bw[0] : bw[1]].mean(-1).data\n if ax.value2index(bw[2]) == ax.value2index(bw[3]):\n y2 = self.isig[bw[2]].data\n else:\n y2 = self.isig[bw[2] : bw[3]].mean(-1).data\n x1 = (bw[0] + bw[1]) / 2.0\n x2 = (bw[2] + bw[3]) / 2.0\n segments.append([[x1, y1[0]], [x2, y2[0]]])\n segments = np.array(segments)\n lines = Lines(segments=segments, color=\"black\")\n self.add_marker(lines, render_figure=False)\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])" }, { "identifier": "LazyEDSSpectrum", "path": "exspy/signals/eds.py", "snippet": "class LazyEDSSpectrum(EDSSpectrum, LazySignal1D):\n \"\"\"Lazy general signal class for EDS spectra.\"\"\"\n\n __doc__ += LAZYSIGNAL_DOC.replace(\"__BASECLASS__\", \"EDSSpectrum\")" }, { "identifier": "preferences", "path": "exspy/_defaults_parser.py", "snippet": "def guess_gos_path():\ndef template2config(template, config):\ndef config2template(template, config):\n def save(self):\nclass EELSConfig(t.HasTraits):\nclass EDSConfig(t.HasTraits):\nclass Preferences(t.HasTraits):\n EELS = t.Instance(EELSConfig)\n EDS = t.Instance(EDSConfig)" } ]
import logging import traits.api as t from hyperspy.docstrings.signal import LAZYSIGNAL_DOC from hyperspy.signal import BaseSetMetadataItems from hyperspy.ui_registry import add_gui_method, DISPLAY_DT, TOOLKIT_DT from .eds import EDSSpectrum, LazyEDSSpectrum from exspy._defaults_parser import preferences from exspy.models.edssemmodel import EDSSEMModel
13,729
tem_par = EDSSEMParametersUI(self) return tem_par.gui(toolkit=toolkit, display=display) md = self.metadata if beam_energy is not None: md.set_item("Acquisition_instrument.SEM.beam_energy", beam_energy) if live_time is not None: md.set_item("Acquisition_instrument.SEM.Detector.EDS.live_time", live_time) if tilt_stage is not None: md.set_item("Acquisition_instrument.SEM.Stage.tilt_alpha", tilt_stage) if azimuth_angle is not None: md.set_item( "Acquisition_instrument.SEM.Detector.EDS.azimuth_angle", azimuth_angle ) if elevation_angle is not None: md.set_item( "Acquisition_instrument.SEM.Detector.EDS.elevation_angle", elevation_angle, ) if energy_resolution_MnKa is not None: md.set_item( "Acquisition_instrument.SEM.Detector.EDS." "energy_resolution_MnKa", energy_resolution_MnKa, ) set_microscope_parameters.__doc__ = """ Set the microscope parameters. If no arguments are given, raises an interactive mode to fill the values. Parameters ---------- beam_energy: float The energy of the electron beam in keV live_time : float In second tilt_stage : float In degree azimuth_angle : float In degree elevation_angle : float In degree energy_resolution_MnKa : float In eV {} {} Examples -------- >>> s = exspy.data.EDS_TEM_FePt_nanoparticles() >>> print('Default value %s eV' % >>> s.metadata.Acquisition_instrument. >>> SEM.Detector.EDS.energy_resolution_MnKa) >>> s.set_microscope_parameters(energy_resolution_MnKa=135.) >>> print('Now set to %s eV' % >>> s.metadata.Acquisition_instrument. >>> SEM.Detector.EDS.energy_resolution_MnKa) Default value 130.0 eV Now set to 135.0 eV """.format( DISPLAY_DT, TOOLKIT_DT ) def _are_microscope_parameters_missing(self): """Check if the EDS parameters necessary for quantification are defined in metadata. If not, in interactive mode raises an UI item to fill the values """ must_exist = ( "Acquisition_instrument.SEM.beam_energy", "Acquisition_instrument.SEM.Detector.EDS.live_time", ) missing_parameters = [] for item in must_exist: exists = self.metadata.has_item(item) if exists is False: missing_parameters.append(item) if missing_parameters: _logger.info("Missing parameters {}".format(missing_parameters)) return True else: return False def create_model(self, auto_background=True, auto_add_lines=True, *args, **kwargs): """Create a model for the current SEM EDS data. Parameters ---------- auto_background : boolean, default True If True, adds automatically a polynomial order 6 to the model, using the edsmodel.add_polynomial_background method. auto_add_lines : boolean, default True If True, automatically add Gaussians for all X-rays generated in the energy range by an element using the edsmodel.add_family_lines method. dictionary : {None, dict}, optional A dictionary to be used to recreate a model. Usually generated using :meth:`hyperspy.model.as_dictionary` Returns ------- model : `EDSSEMModel` instance. """ model = EDSSEMModel( self, auto_background=auto_background, auto_add_lines=auto_add_lines, *args, **kwargs, ) return model
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. _logger = logging.getLogger(__name__) @add_gui_method(toolkey="exspy.microscope_parameters_EDS_SEM") class EDSSEMParametersUI(BaseSetMetadataItems): beam_energy = t.Float(t.Undefined, label="Beam energy (keV)") live_time = t.Float(t.Undefined, label="Live time (s)") tilt_stage = t.Float(t.Undefined, label="Stage tilt (degree)") azimuth_angle = t.Float(t.Undefined, label="Azimuth angle (degree)") elevation_angle = t.Float(t.Undefined, label="Elevation angle (degree)") energy_resolution_MnKa = t.Float(t.Undefined, label="Energy resolution MnKa (eV)") mapping = { "Acquisition_instrument.SEM.beam_energy": "beam_energy", "Acquisition_instrument.TEM.Stage.tilt_alpha": "tilt_stage", "Acquisition_instrument.SEM.Detector.EDS.live_time": "live_time", "Acquisition_instrument.SEM.Detector.EDS.azimuth_angle": "azimuth_angle", "Acquisition_instrument.SEM.Detector.EDS.elevation_angle": "elevation_angle", "Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa": "energy_resolution_MnKa", } class EDSSEMSpectrum(EDSSpectrum): """Signal class for EDS spectra measured in an SEM.""" _signal_type = "EDS_SEM" def __init__(self, *args, **kwards): super().__init__(*args, **kwards) # Attributes defaults if "Acquisition_instrument.SEM.Detector.EDS" not in self.metadata: if "Acquisition_instrument.TEM" in self.metadata: self.metadata.set_item( "Acquisition_instrument.SEM", self.metadata.Acquisition_instrument.TEM, ) del self.metadata.Acquisition_instrument.TEM self._set_default_param() def get_calibration_from(self, ref, nb_pix=1): """Copy the calibration and all metadata of a reference. Primary use: To add a calibration to ripple file from INCA software Parameters ---------- ref : signal The reference contains the calibration in its metadata nb_pix : int The live time (real time corrected from the "dead time") is divided by the number of pixel (spectrums), giving an average live time. Raises ------ NotImplementedError If the signal axis is a non-uniform axis. Examples -------- >>> ref = exspy.data.EDS_SEM_TM002() >>> s = exspy.signals.EDSSEMSpectrum(ref.data) >>> print(s.axes_manager[0].scale) >>> s.get_calibration_from(ref) >>> print(s.axes_manager[0].scale) 1.0 0.01 """ self._original_metadata = ref.original_metadata.deepcopy() # Setup the axes_manager ax_m = self.axes_manager.signal_axes[0] ax_ref = ref.axes_manager.signal_axes[0] for _axis in [ax_m, ax_ref]: if not _axis.is_uniform: raise NotImplementedError( "The function is not implemented for non-uniform axes." ) ax_m.scale = ax_ref.scale ax_m.units = ax_ref.units ax_m.offset = ax_ref.offset # Setup metadata if "Acquisition_instrument.SEM" in ref.metadata: mp_ref = ref.metadata.Acquisition_instrument.SEM elif "Acquisition_instrument.TEM" in ref.metadata: mp_ref = ref.metadata.Acquisition_instrument.TEM else: raise ValueError( "The reference has no metadata.Acquisition_instrument.TEM" "\n nor metadata.Acquisition_instrument.SEM " ) mp = self.metadata mp.Acquisition_instrument.SEM = mp_ref.deepcopy() if hasattr(mp_ref.Detector.EDS, "live_time"): mp.Acquisition_instrument.SEM.Detector.EDS.live_time = ( mp_ref.Detector.EDS.live_time / nb_pix ) def _load_from_TEM_param(self): """Transfer metadata.Acquisition_instrument.TEM to metadata.Acquisition_instrument.SEM """ mp = self.metadata if mp.has_item("Acquisition_instrument.SEM") is False: mp.add_node("Acquisition_instrument.SEM") if mp.has_item("Acquisition_instrument.SEM.Detector.EDS") is False: mp.Acquisition_instrument.SEM.add_node("EDS") mp.Signal.signal_type = "EDS_SEM" # Transfer if "Acquisition_instrument.TEM" in mp: mp.Acquisition_instrument.SEM = mp.Acquisition_instrument.TEM del mp.Acquisition_instrument.TEM def _set_default_param(self): """Set to value to default (defined in preferences)""" mp = self.metadata if "Acquisition_instrument.SEM.Stage.tilt_alpha" not in mp: mp.set_item( "Acquisition_instrument.SEM.Stage.tilt_alpha", preferences.EDS.eds_tilt_stage, ) if "Acquisition_instrument.SEM.Detector.EDS.elevation_angle" not in mp: mp.set_item( "Acquisition_instrument.SEM.Detector.EDS.elevation_angle", preferences.EDS.eds_detector_elevation, ) if "Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa" not in mp: mp.set_item( "Acquisition_instrument.SEM.Detector.EDS." "energy_resolution_MnKa", preferences.EDS.eds_mn_ka, ) if "Acquisition_instrument.SEM.Detector.EDS.azimuth_angle" not in mp: mp.set_item( "Acquisition_instrument.SEM.Detector.EDS.azimuth_angle", preferences.EDS.eds_detector_azimuth, ) def set_microscope_parameters( self, beam_energy=None, live_time=None, tilt_stage=None, azimuth_angle=None, elevation_angle=None, energy_resolution_MnKa=None, display=True, toolkit=None, ): if set( [ beam_energy, live_time, tilt_stage, azimuth_angle, elevation_angle, energy_resolution_MnKa, ] ) == {None}: tem_par = EDSSEMParametersUI(self) return tem_par.gui(toolkit=toolkit, display=display) md = self.metadata if beam_energy is not None: md.set_item("Acquisition_instrument.SEM.beam_energy", beam_energy) if live_time is not None: md.set_item("Acquisition_instrument.SEM.Detector.EDS.live_time", live_time) if tilt_stage is not None: md.set_item("Acquisition_instrument.SEM.Stage.tilt_alpha", tilt_stage) if azimuth_angle is not None: md.set_item( "Acquisition_instrument.SEM.Detector.EDS.azimuth_angle", azimuth_angle ) if elevation_angle is not None: md.set_item( "Acquisition_instrument.SEM.Detector.EDS.elevation_angle", elevation_angle, ) if energy_resolution_MnKa is not None: md.set_item( "Acquisition_instrument.SEM.Detector.EDS." "energy_resolution_MnKa", energy_resolution_MnKa, ) set_microscope_parameters.__doc__ = """ Set the microscope parameters. If no arguments are given, raises an interactive mode to fill the values. Parameters ---------- beam_energy: float The energy of the electron beam in keV live_time : float In second tilt_stage : float In degree azimuth_angle : float In degree elevation_angle : float In degree energy_resolution_MnKa : float In eV {} {} Examples -------- >>> s = exspy.data.EDS_TEM_FePt_nanoparticles() >>> print('Default value %s eV' % >>> s.metadata.Acquisition_instrument. >>> SEM.Detector.EDS.energy_resolution_MnKa) >>> s.set_microscope_parameters(energy_resolution_MnKa=135.) >>> print('Now set to %s eV' % >>> s.metadata.Acquisition_instrument. >>> SEM.Detector.EDS.energy_resolution_MnKa) Default value 130.0 eV Now set to 135.0 eV """.format( DISPLAY_DT, TOOLKIT_DT ) def _are_microscope_parameters_missing(self): """Check if the EDS parameters necessary for quantification are defined in metadata. If not, in interactive mode raises an UI item to fill the values """ must_exist = ( "Acquisition_instrument.SEM.beam_energy", "Acquisition_instrument.SEM.Detector.EDS.live_time", ) missing_parameters = [] for item in must_exist: exists = self.metadata.has_item(item) if exists is False: missing_parameters.append(item) if missing_parameters: _logger.info("Missing parameters {}".format(missing_parameters)) return True else: return False def create_model(self, auto_background=True, auto_add_lines=True, *args, **kwargs): """Create a model for the current SEM EDS data. Parameters ---------- auto_background : boolean, default True If True, adds automatically a polynomial order 6 to the model, using the edsmodel.add_polynomial_background method. auto_add_lines : boolean, default True If True, automatically add Gaussians for all X-rays generated in the energy range by an element using the edsmodel.add_family_lines method. dictionary : {None, dict}, optional A dictionary to be used to recreate a model. Usually generated using :meth:`hyperspy.model.as_dictionary` Returns ------- model : `EDSSEMModel` instance. """ model = EDSSEMModel( self, auto_background=auto_background, auto_add_lines=auto_add_lines, *args, **kwargs, ) return model
class LazyEDSSEMSpectrum(EDSSEMSpectrum, LazyEDSSpectrum):
1
2023-10-28 20:04:10+00:00
16k
Sllambias/yucca
yucca/training/augmentation/YuccaAugmentationComposer.py
[ { "identifier": "get_max_rotated_size", "path": "yucca/image_processing/matrix_ops.py", "snippet": "def get_max_rotated_size(patch_size):\n if len(patch_size) == 2:\n max_dim = int(np.sqrt(patch_size[0] ** 2 + patch_size[1] ** 2))\n return (max_dim, max_dim)\n\n max_dim_0 = max(\n int(np.sqrt(patch_size[0] ** 2 + patch_size[1] ** 2)),\n int(np.sqrt(patch_size[0] ** 2 + patch_size[2] ** 2)),\n )\n\n max_dim_1 = max(\n int(np.sqrt(patch_size[1] ** 2 + patch_size[0] ** 2)),\n int(np.sqrt(patch_size[1] ** 2 + patch_size[2] ** 2)),\n )\n\n max_dim_2 = max(\n int(np.sqrt(patch_size[2] ** 2 + patch_size[0] ** 2)),\n int(np.sqrt(patch_size[2] ** 2 + patch_size[1] ** 2)),\n )\n\n return (max_dim_0, max_dim_1, max_dim_2)" }, { "identifier": "AddBatchDimension", "path": "yucca/image_processing/transforms/formatting.py", "snippet": "class AddBatchDimension(YuccaTransform):\n def __init__(self, data_key=\"image\", label_key=\"label\"):\n self.data_key = data_key\n self.label_key = label_key\n\n @staticmethod\n def get_params():\n pass\n\n def __unsqueeze__(self, data, label):\n data = data[np.newaxis]\n if label is None:\n return data, label\n if isinstance(label, list):\n label = [s[np.newaxis] for s in label]\n else:\n label = label[np.newaxis]\n return data, label\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n data_dict[self.data_key], data_dict[self.label_key] = self.__unsqueeze__(\n data_dict[self.data_key], data_dict[self.label_key]\n )\n return data_dict" }, { "identifier": "RemoveBatchDimension", "path": "yucca/image_processing/transforms/formatting.py", "snippet": "class RemoveBatchDimension(YuccaTransform):\n def __init__(self, data_key=\"image\", label_key=\"label\"):\n self.data_key = data_key\n self.label_key = label_key\n\n @staticmethod\n def get_params():\n pass\n\n def __squeeze__(self, data, label):\n data = data[0]\n if isinstance(label, list):\n label = [s[0] for s in label]\n else:\n label = label[0]\n return data, label\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n data_dict[self.data_key], data_dict[self.label_key] = self.__squeeze__(\n data_dict[self.data_key], data_dict[self.label_key]\n )\n return data_dict" }, { "identifier": "BiasField", "path": "yucca/image_processing/transforms/BiasField.py", "snippet": "class BiasField(YuccaTransform):\n \"\"\"\n variables in DIKU_3D_augmentation_params:\n do_biasField\n biasField_p_per_sample\n \"\"\"\n\n def __init__(self, data_key=\"image\", p_per_sample=1):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n\n @staticmethod\n def get_params():\n # No parameters to retrieve\n pass\n\n def __biasField__(self, imageVolume):\n if len(imageVolume.shape) == 3:\n x, y, z = imageVolume.shape\n X, Y, Z = np.meshgrid(\n np.linspace(0, x, x, endpoint=False),\n np.linspace(0, y, y, endpoint=False),\n np.linspace(0, z, z, endpoint=False),\n indexing=\"ij\",\n )\n x0 = np.random.randint(0, x)\n y0 = np.random.randint(0, y)\n z0 = np.random.randint(0, z)\n G = 1 - (np.power((X - x0), 2) / (x**2) + np.power((Y - y0), 2) / (y**2) + np.power((Z - z0), 2) / (z**2))\n else:\n x, y = imageVolume.shape\n X, Y = np.meshgrid(\n np.linspace(0, x, x, endpoint=False),\n np.linspace(0, y, y, endpoint=False),\n indexing=\"ij\",\n )\n x0 = np.random.randint(0, x)\n y0 = np.random.randint(0, y)\n G = 1 - (np.power((X - x0), 2) / (x**2) + np.power((Y - y0), 2) / (y**2))\n return np.multiply(G, imageVolume)\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape. \\nShould be (b, c, x, y, z) or (b, c, x, y) and is:\\\n {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n for c in range(data_dict[self.data_key][b].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n data_dict[self.data_key][b, c] = self.__biasField__(data_dict[self.data_key][b, c])\n return data_dict" }, { "identifier": "Blur", "path": "yucca/image_processing/transforms/Blur.py", "snippet": "class Blur(YuccaTransform):\n \"\"\"\n WRAPPER FOR NNUNET AUGMENT GAMMA: https://github.com/MIC-DKFZ/batchgenerators/blob/8822a08a7dbfa4986db014e6a74b040778164ca6/batchgenerators/augmentations/color_augmentations.py\n\n Augments by changing 'gamma' of the image (same as gamma correction in photos or computer monitors\n\n :param gamma_range: range to sample gamma from. If one value is smaller than 1 and the other one is\n larger then half the samples will have gamma <1 and the other >1 (in the inverval that was specified).\n Tuple of float. If one value is < 1 and the other > 1 then half the images will be augmented with gamma values\n smaller than 1 and the other half with > 1\n :param invert_image: whether to invert the image before applying gamma augmentation\n :param retain_stats: Gamma transformation will alter the mean and std of the data in the patch. If retain_stats=True,\n the data will be transformed to match the mean and standard deviation before gamma augmentation. retain_stats\n can also be callable (signature retain_stats() -> bool)\n \"\"\"\n\n def __init__(self, data_key=\"image\", p_per_sample=1, p_per_channel=0.5, sigma=(0.5, 1.0)):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.p_per_channel = p_per_channel\n self.sigma = sigma\n\n @staticmethod\n def get_params(sigma: Tuple[float]):\n sigma = np.random.uniform(*sigma)\n return sigma\n\n def __blur__(self, imageVolume, sigma):\n for c in range(imageVolume.shape[0]):\n if np.random.uniform() < self.p_per_channel:\n imageVolume[c] = gaussian_filter(imageVolume[c], sigma, order=0)\n return imageVolume\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n sigma = self.get_params(self.sigma)\n data_dict[self.data_key][b] = self.__blur__(data_dict[self.data_key][b], sigma)\n return data_dict" }, { "identifier": "CopyImageToSeg", "path": "yucca/image_processing/transforms/CopyImageToSeg.py", "snippet": "class CopyImageToSeg(YuccaTransform):\n \"\"\"\n variables in CopyImageToSeg\n data_key\n label_key\n\n \"\"\"\n\n def __init__(self, copy=False, data_key=\"image\", label_key=\"label\"):\n self.copy = copy\n self.data_key = data_key\n self.label_key = label_key\n\n @staticmethod\n def get_params():\n # No parameters to retrieve\n pass\n\n def __copy__(self, imageVolume):\n return imageVolume, imageVolume.copy()\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n if self.copy:\n data_dict[self.data_key], data_dict[self.label_key] = self.__copy__(data_dict[self.data_key])\n return data_dict" }, { "identifier": "Gamma", "path": "yucca/image_processing/transforms/Gamma.py", "snippet": "class Gamma(YuccaTransform):\n \"\"\"\n WRAPPER FOR NNUNET AUGMENT GAMMA: https://github.com/MIC-DKFZ/batchgenerators/blob/8822a08a7dbfa4986db014e6a74b040778164ca6/batchgenerators/augmentations/color_augmentations.py\n\n Augments by changing 'gamma' of the image (same as gamma correction in photos or computer monitors\n\n :param gamma_range: range to sample gamma from. If one value is smaller than 1 and the other one is\n larger then half the samples will have gamma <1 and the other >1 (in the inverval that was specified).\n Tuple of float. If one value is < 1 and the other > 1 then half the images will be augmented with gamma values\n smaller than 1 and the other half with > 1\n :param invert_image: whether to invert the image before applying gamma augmentation\n :param retain_stats: Gamma transformation will alter the mean and std of the data in the patch. If retain_stats=True,\n the data will be transformed to match the mean and standard deviation before gamma augmentation. retain_stats\n can also be callable (signature retain_stats() -> bool)\n \"\"\"\n\n def __init__(\n self,\n data_key=\"image\",\n p_per_sample=1,\n p_invert_image=0.05,\n gamma_range=(0.5, 2.0),\n per_channel=True,\n ):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.gamma_range = gamma_range\n self.p_invert_image = p_invert_image\n self.per_channel = per_channel\n\n @staticmethod\n def get_params(p_invert_image):\n # No parameters to retrieve\n do_invert = False\n if np.random.uniform() < p_invert_image:\n do_invert = True\n return do_invert\n\n def __gamma__(self, imageVolume, gamma_range, invert_image, per_channel):\n return augment_gamma(imageVolume, gamma_range, invert_image, per_channel, retain_stats=False)\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n do_invert = self.get_params(self.p_invert_image)\n data_dict[self.data_key][b] = self.__gamma__(\n data_dict[self.data_key][b],\n self.gamma_range,\n do_invert,\n per_channel=self.per_channel,\n )\n return data_dict" }, { "identifier": "MotionGhosting", "path": "yucca/image_processing/transforms/Ghosting.py", "snippet": "class MotionGhosting(YuccaTransform):\n \"\"\"\n variables in DIKU_3D_augmentation_params:\n do_motionGhosting\n motionGhosting_p_per_sample\n motionGhosting_alpha\n motionGhosting_numReps\n motionGhosting_axes\n \"\"\"\n\n def __init__(\n self,\n data_key=\"image\",\n p_per_sample=1,\n alpha=(0.85, 0.95),\n numReps=(2, 5),\n axes=(0, 3),\n ):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.alpha = alpha\n self.numReps = numReps\n self.axes = axes\n\n @staticmethod\n def get_params(alpha: Tuple[float], numReps: Tuple[float], axes: Tuple[float]) -> Tuple[float]:\n alpha = np.random.uniform(*alpha)\n numReps = np.random.randint(*numReps)\n axis = np.random.randint(*axes)\n return alpha, numReps, axis\n\n def __motionGhosting__(self, imageVolume, alpha, numReps, axis):\n if len(imageVolume.shape) == 3:\n assert axis in [0, 1, 2], \"Incorrect or no axis\"\n\n h, w, d = imageVolume.shape\n\n imageVolume = np.fft.fftn(imageVolume, s=[h, w, d])\n\n if axis == 0:\n imageVolume[0:-1:numReps, :, :] = alpha * imageVolume[0:-1:numReps, :, :]\n elif axis == 1:\n imageVolume[:, 0:-1:numReps, :] = alpha * imageVolume[:, 0:-1:numReps, :]\n else:\n imageVolume[:, :, 0:-1:numReps] = alpha * imageVolume[:, :, 0:-1:numReps]\n\n imageVolume = abs(np.fft.ifftn(imageVolume, s=[h, w, d]))\n if len(imageVolume.shape) == 2:\n assert axis in [0, 1], \"Incorrect or no axis\"\n h, w = imageVolume.shape\n imageVolume = np.fft.fftn(imageVolume, s=[h, w])\n\n if axis == 0:\n imageVolume[0:-1:numReps, :] = alpha * imageVolume[0:-1:numReps, :]\n else:\n imageVolume[:, 0:-1:numReps] = alpha * imageVolume[:, 0:-1:numReps]\n imageVolume = abs(np.fft.ifftn(imageVolume, s=[h, w]))\n return imageVolume\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n for c in range(data_dict[self.data_key][b].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n alpha, numReps, axis = self.get_params(self.alpha, self.numReps, self.axes)\n data_dict[self.data_key][b, c] = self.__motionGhosting__(\n data_dict[self.data_key][b, c], alpha, numReps, axis\n )\n return data_dict" }, { "identifier": "Masking", "path": "yucca/image_processing/transforms/Masking.py", "snippet": "class Masking(YuccaTransform):\n \"\"\"\n CURRENTLY NOT IMPLEMENTED\n \"\"\"\n\n def __init__(self, mask=False, data_key=\"image\", mask_ratio: tuple | float = 0.25):\n self.mask = mask\n self.data_key = data_key\n self.mask_ratio = mask_ratio\n\n @staticmethod\n def get_params(shape, ratio, start_idx):\n pass\n\n def __mask__(self, image, label, crop_start_idx):\n pass\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n if self.mask:\n raise NotImplementedError(\"Masking is not implemented yet. It should not be enabled\")\n return data_dict" }, { "identifier": "Mirror", "path": "yucca/image_processing/transforms/Mirror.py", "snippet": "class Mirror(YuccaTransform):\n \"\"\"\n variables in DIKU_3D_augmentation_params:\n do_multiplicativeNoise\n multiplicativeNoise_p_per_sample\n multiplicativeNoise_mean\n multiplicativeNoise_sigma\n \"\"\"\n\n def __init__(\n self,\n data_key=\"image\",\n label_key=\"label\",\n p_per_sample=1,\n axes=(0, 1, 2),\n p_mirror_per_axis=0.33,\n skip_label=False,\n ):\n self.data_key = data_key\n self.label_key = label_key\n self.p_per_sample = p_per_sample\n self.p_mirror_per_axis = p_mirror_per_axis\n self.axes = axes\n self.skip_label = skip_label\n\n @staticmethod\n def get_params():\n # No parameters to retrieve\n pass\n\n def __mirror__(self, imageVolume, labelVolume, axes):\n # Input will be [c, x, y, z] or [c, x, y]\n if 0 in axes and np.random.uniform() < self.p_mirror_per_axis:\n imageVolume[:, :] = imageVolume[:, ::-1]\n labelVolume[:, :] = labelVolume[:, ::-1]\n if 1 in axes and np.random.uniform() < self.p_mirror_per_axis:\n imageVolume[:, :, :] = imageVolume[:, :, ::-1]\n labelVolume[:, :, :] = labelVolume[:, :, ::-1]\n if 2 in axes and np.random.uniform() < self.p_mirror_per_axis:\n imageVolume[:, :, :, :] = imageVolume[:, :, :, ::-1]\n labelVolume[:, :, :, :] = labelVolume[:, :, :, ::-1]\n return imageVolume, labelVolume\n\n def __mirrorimage__(self, imageVolume, axes):\n # Input will be [c, x, y, z] or [c, x, y]\n if 0 in axes and np.random.uniform() < self.p_mirror_per_axis:\n imageVolume[:, :] = imageVolume[:, ::-1]\n if 1 in axes and np.random.uniform() < self.p_mirror_per_axis:\n imageVolume[:, :, :] = imageVolume[:, :, ::-1]\n if 2 in axes and np.random.uniform() < self.p_mirror_per_axis:\n imageVolume[:, :, :, :] = imageVolume[:, :, :, ::-1]\n return imageVolume\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n if self.skip_label:\n data_dict[self.data_key][b] = self.__mirrorimage__(data_dict[self.data_key][b], self.axes)\n else:\n (\n data_dict[self.data_key][b],\n data_dict[self.label_key][b],\n ) = self.__mirror__(\n data_dict[self.data_key][b],\n data_dict[self.label_key][b],\n self.axes,\n )\n return data_dict" }, { "identifier": "AdditiveNoise", "path": "yucca/image_processing/transforms/Noise.py", "snippet": "class AdditiveNoise(YuccaTransform):\n \"\"\"\n variables in DIKU_3D_augmentation_params:\n do_additiveNoise\n additiveNoise_p_per_sample\n additiveNoise_mean\n additiveNoise_sigma\n \"\"\"\n\n def __init__(self, data_key=\"image\", p_per_sample=1, mean=(0.0, 0.0), sigma=(1e-3, 1e-4)):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.mean = mean\n self.sigma = sigma\n\n @staticmethod\n def get_params(mean: Tuple[float], sigma: Tuple[float]) -> Tuple[float]:\n mean = float(np.random.uniform(*mean))\n sigma = float(np.random.uniform(*sigma))\n return mean, sigma\n\n def __additiveNoise__(self, imageVolume, mean, sigma):\n # J = I+n\n gauss = np.random.normal(mean, sigma, imageVolume.shape)\n return imageVolume + gauss\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (c, x, y, z) or (c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n for c in range(data_dict[self.data_key][b].shape[0]):\n mean, sigma = self.get_params(self.mean, self.sigma)\n if np.random.uniform() < self.p_per_sample:\n data_dict[self.data_key][b, c] = self.__additiveNoise__(data_dict[self.data_key][b, c], mean, sigma)\n return data_dict" }, { "identifier": "MultiplicativeNoise", "path": "yucca/image_processing/transforms/Noise.py", "snippet": "class MultiplicativeNoise(YuccaTransform):\n \"\"\"\n variables in DIKU_3D_augmentation_params:\n do_multiplicativeNoise\n multiplicativeNoise_p_per_sample\n multiplicativeNoise_mean\n multiplicativeNoise_sigma\n \"\"\"\n\n def __init__(self, data_key=\"image\", p_per_sample=1, mean=(0.0, 0.0), sigma=(1e-3, 1e-4)):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.mean = mean\n self.sigma = sigma\n\n @staticmethod\n def get_params(mean: Tuple[float], sigma: Tuple[float]) -> Tuple[float]:\n mean = float(np.random.uniform(*mean))\n sigma = float(np.random.uniform(*sigma))\n return mean, sigma\n\n def __multiplicativeNoise__(self, imageVolume, mean, sigma):\n # J = I + I*n\n gauss = np.random.normal(mean, sigma, imageVolume.shape)\n return imageVolume + imageVolume * gauss\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n for c in range(data_dict[self.data_key][b].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n mean, sigma = self.get_params(self.mean, self.sigma)\n data_dict[self.data_key][b, c] = self.__multiplicativeNoise__(data_dict[self.data_key][b, c], mean, sigma)\n return data_dict" }, { "identifier": "GibbsRinging", "path": "yucca/image_processing/transforms/Ringing.py", "snippet": "class GibbsRinging(YuccaTransform):\n \"\"\"\n variables in DIKU_3D_augmentation_params:\n do_gibbsRinging\n gibbsRinging_p_per_sample\n gibbsRinging_cutFreq\n gibbsRinging_axes\n \"\"\"\n\n def __init__(self, data_key=\"image\", p_per_sample=1, cutFreq=(96, 129), axes=(0, 3)):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.cutFreq = cutFreq\n self.axes = axes\n\n @staticmethod\n def get_params(cutFreq, axes):\n cutFreq = np.random.randint(*cutFreq)\n axis = np.random.randint(*axes)\n return cutFreq, axis\n\n def __gibbsRinging__(self, imageVolume, numSample, axis):\n if len(imageVolume.shape) == 3:\n assert axis in [0, 1, 2], \"Incorrect or no axis\"\n\n h, w, d = imageVolume.shape\n if axis == 0:\n imageVolume = imageVolume.transpose(0, 2, 1)\n imageVolume = np.fft.fftshift(np.fft.fftn(imageVolume, s=[h, d, w]))\n imageVolume[:, :, 0 : int(np.ceil(w / 2) - np.ceil(numSample / 2))] = 0\n imageVolume[:, :, int(np.ceil(w / 2) + np.ceil(numSample / 2)) : w] = 0\n imageVolume = abs(np.fft.ifftn(np.fft.ifftshift(imageVolume), s=[h, d, w]))\n imageVolume = imageVolume.transpose(0, 2, 1)\n elif axis == 1:\n imageVolume = imageVolume.transpose(1, 2, 0)\n imageVolume = np.fft.fftshift(np.fft.fftn(imageVolume, s=[w, d, h]))\n imageVolume[:, :, 0 : int(np.ceil(h / 2) - np.ceil(numSample / 2))] = 0\n imageVolume[:, :, int(np.ceil(h / 2) + np.ceil(numSample / 2)) : h] = 0\n imageVolume = abs(np.fft.ifftn(np.fft.ifftshift(imageVolume), s=[w, d, h]))\n imageVolume = imageVolume.transpose(2, 0, 1)\n else:\n imageVolume = np.fft.fftshift(np.fft.fftn(imageVolume, s=[h, w, d]))\n imageVolume[:, :, 0 : int(np.ceil(d / 2) - np.ceil(numSample / 2))] = 0\n imageVolume[:, :, int(np.ceil(d / 2) + np.ceil(numSample / 2)) : d] = 0\n imageVolume = abs(np.fft.ifftn(np.fft.ifftshift(imageVolume), s=[h, w, d]))\n elif len(imageVolume.shape) == 2:\n assert axis in [0, 1], \"incorrect or no axis\"\n h, w = imageVolume.shape\n if axis == 0:\n imageVolume = np.fft.fftshift(np.fft.fftn(imageVolume, s=[h, w]))\n imageVolume[:, 0 : int(np.ceil(w / 2) - np.ceil(numSample / 2))] = 0\n imageVolume[:, int(np.ceil(w / 2) + np.ceil(numSample / 2)) : w] = 0\n imageVolume = abs(np.fft.ifftn(np.fft.ifftshift(imageVolume), s=[h, w]))\n else:\n imageVolume = imageVolume.conj().T\n imageVolume = np.fft.fftshift(np.fft.fftn(imageVolume, s=[w, h]))\n imageVolume[:, 0 : int(np.ceil(h / 2) - np.ceil(numSample / 2))] = 0\n imageVolume[:, int(np.ceil(h / 2) + np.ceil(numSample / 2)) : h] = 0\n imageVolume = abs(np.fft.ifftn(np.fft.ifftshift(imageVolume), s=[w, h]))\n imageVolume = imageVolume.conj().T\n return imageVolume\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n for c in range(data_dict[self.data_key][b].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n cutFreq, axis = self.get_params(self.cutFreq, self.axes)\n data_dict[self.data_key][b, c] = self.__gibbsRinging__(data_dict[self.data_key][b, c], cutFreq, axis)\n return data_dict" }, { "identifier": "DownsampleSegForDS", "path": "yucca/image_processing/transforms/sampling.py", "snippet": "class DownsampleSegForDS(YuccaTransform):\n \"\"\" \"\"\"\n\n def __init__(self, deep_supervision: bool = False, label_key=\"label\", factors=(1, 0.5, 0.25, 0.125, 0.0625)):\n self.deep_supervision = deep_supervision\n self.label_key = label_key\n self.factors = factors\n\n @staticmethod\n def get_params():\n # No parameters to retrieve\n pass\n\n def __downsample__(self, label, factors):\n orig_type = label.dtype\n orig_shape = label.shape\n downsampled_labels = []\n for factor in factors:\n target_shape = np.array(orig_shape).astype(int)\n for i in range(2, len(orig_shape)):\n target_shape[i] *= factor\n if np.all(target_shape == orig_shape):\n downsampled_labels.append(label)\n else:\n canvas = np.zeros(target_shape)\n for b in range(label.shape[0]):\n for c in range(label[b].shape[0]):\n canvas[b, c] = resize(\n label[b, c].astype(float),\n target_shape[2:],\n 0,\n mode=\"edge\",\n clip=True,\n anti_aliasing=False,\n ).astype(orig_type)\n downsampled_labels.append(canvas)\n return downsampled_labels\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n if self.deep_supervision:\n data_dict[self.label_key] = self.__downsample__(data_dict[self.label_key], self.factors)\n return data_dict" }, { "identifier": "SimulateLowres", "path": "yucca/image_processing/transforms/SimulateLowres.py", "snippet": "class SimulateLowres(YuccaTransform):\n \"\"\" \"\"\"\n\n def __init__(\n self,\n data_key=\"image\",\n p_per_sample=1,\n p_per_channel=0.5,\n p_per_axis=0.33,\n zoom_range=(0.5, 1.0),\n ):\n self.data_key = data_key\n self.p_per_sample = p_per_sample\n self.p_per_channel = p_per_channel\n self.p_per_axis = p_per_axis\n self.zoom_range = zoom_range\n\n @staticmethod\n def get_params(zoom_range, shape, p_per_axis):\n # No parameters to retrieve\n if isinstance(shape, (list, tuple)):\n shape = np.array(shape)\n zoom = np.random.uniform(*zoom_range)\n dim = len(shape)\n zoomed_shape = np.round(shape * zoom).astype(int)\n for i in range(dim):\n if np.random.uniform() < p_per_axis:\n shape[i] = zoomed_shape[i]\n return shape\n\n def __simulatelowres__(self, imageVolume, target_shape):\n shape = imageVolume.shape\n downsampled = resize(\n imageVolume.astype(float),\n target_shape,\n order=0,\n mode=\"edge\",\n anti_aliasing=False,\n )\n imageVolume = resize(downsampled, shape, order=3, mode=\"edge\", anti_aliasing=False)\n return imageVolume\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n \\nShould be (b, c, x, y, z) or (b, c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n for b in range(data_dict[self.data_key].shape[0]):\n if np.random.uniform() < self.p_per_sample:\n for c in range(data_dict[self.data_key][b].shape[0]):\n if np.random.uniform() < self.p_per_channel:\n target_shape = self.get_params(\n self.zoom_range,\n data_dict[self.data_key][b, c].shape,\n self.p_per_axis,\n )\n data_dict[self.data_key][b, c] = self.__simulatelowres__(data_dict[self.data_key][b, c], target_shape)\n return data_dict" }, { "identifier": "Spatial", "path": "yucca/image_processing/transforms/Spatial.py", "snippet": "class Spatial(YuccaTransform):\n \"\"\"\n variables in aug_params:\n do_Rotation\n Rotation_p_per_sample\n Rotation_p_per_channel\n Rotation_x_rot\n Rotation_y_rot\n Rotation_z_rot\n \"\"\"\n\n def __init__(\n self,\n data_key=\"image\",\n label_key=\"label\",\n crop=False,\n patch_size: Tuple[int] = None,\n random_crop=True,\n p_deform_per_sample=1,\n deform_sigma=(20, 30),\n deform_alpha=(300, 600),\n p_rot_per_sample=1,\n p_rot_per_axis=1,\n x_rot_in_degrees=(0.0, 10.0),\n y_rot_in_degrees=(0.0, 10.0),\n z_rot_in_degrees=(0.0, 10.0),\n p_scale_per_sample=1,\n scale_factor=(0.85, 1.15),\n skip_label=False,\n ):\n self.data_key = data_key\n self.label_key = label_key\n self.skip_label = skip_label\n self.do_crop = crop\n self.patch_size = patch_size\n self.random_crop = random_crop\n\n self.p_deform_per_sample = p_deform_per_sample\n self.deform_sigma = deform_sigma\n self.deform_alpha = deform_alpha\n\n self.p_rot_per_sample = p_rot_per_sample\n self.p_rot_per_axis = p_rot_per_axis\n self.x_rot_in_degrees = x_rot_in_degrees\n self.y_rot_in_degrees = y_rot_in_degrees\n self.z_rot_in_degrees = z_rot_in_degrees\n\n self.p_scale_per_sample = p_scale_per_sample\n self.scale_factor = scale_factor\n\n @staticmethod\n def get_params(\n deform_alpha: Tuple[float],\n deform_sigma: Tuple[float],\n x_rot: Tuple[float],\n y_rot: Tuple[float],\n z_rot: Tuple[float],\n scale_factor: Tuple[float],\n ) -> Tuple[float]:\n if deform_alpha:\n deform_alpha = float(np.random.uniform(*deform_alpha))\n if deform_sigma:\n deform_sigma = float(np.random.uniform(*deform_sigma))\n\n if x_rot:\n x_rot = float(np.random.uniform(*x_rot)) * (np.pi / 180)\n if y_rot:\n y_rot = float(np.random.uniform(*y_rot)) * (np.pi / 180)\n if z_rot:\n z_rot = float(np.random.uniform(*z_rot)) * (np.pi / 180)\n\n if scale_factor:\n scale_factor = float(np.random.uniform(*scale_factor))\n\n return deform_alpha, deform_sigma, x_rot, y_rot, z_rot, scale_factor\n\n def __CropDeformRotateScale__(\n self,\n imageVolume,\n labelVolume,\n patch_size,\n alpha,\n sigma,\n x_rot,\n y_rot,\n z_rot,\n scale_factor,\n skip_label,\n ):\n if not self.do_crop:\n patch_size = imageVolume.shape[2:]\n\n coords = create_zero_centered_coordinate_matrix(patch_size)\n imageCanvas = np.zeros((imageVolume.shape[0], imageVolume.shape[1], *patch_size), dtype=np.float32)\n\n # First we apply deformation to the coordinate matrix\n if np.random.uniform() < self.p_deform_per_sample:\n coords = deform_coordinate_matrix(coords, alpha=alpha, sigma=sigma)\n\n # Then we rotate the coordinate matrix around one or more axes\n if np.random.uniform() < self.p_rot_per_sample:\n rot_matrix = np.eye(len(patch_size))\n if len(patch_size) == 2:\n rot_matrix = np.dot(rot_matrix, Rz2D(z_rot))\n else:\n if np.random.uniform() < self.p_rot_per_axis:\n rot_matrix = np.dot(rot_matrix, Rx(x_rot))\n if np.random.uniform() < self.p_rot_per_axis:\n rot_matrix = np.dot(rot_matrix, Ry(y_rot))\n if np.random.uniform() < self.p_rot_per_axis:\n rot_matrix = np.dot(rot_matrix, Rz(z_rot))\n\n coords = np.dot(coords.reshape(len(patch_size), -1).transpose(), rot_matrix).transpose().reshape(coords.shape)\n\n # And finally scale it\n # Scaling effect is \"inverted\"\n # i.e. a scale factor of 0.9 will zoom in\n if np.random.uniform() < self.p_scale_per_sample:\n coords *= scale_factor\n\n if self.random_crop and self.do_crop:\n for d in range(len(patch_size)):\n crop_center_idx = [\n np.random.randint(\n int(patch_size[d] / 2),\n imageVolume.shape[d + 2] - int(patch_size[d] / 2) + 1,\n )\n ]\n coords[d] += crop_center_idx\n else:\n # Reversing the zero-centering of the coordinates\n for d in range(len(patch_size)):\n coords[d] += imageVolume.shape[d + 2] / 2.0 - 0.5\n\n # Mapping the images to the distorted coordinates\n for b in range(imageVolume.shape[0]):\n for c in range(imageVolume.shape[1]):\n imageCanvas[b, c] = map_coordinates(\n imageVolume[b, c].astype(float),\n coords,\n order=3,\n mode=\"constant\",\n cval=0.0,\n ).astype(imageVolume.dtype)\n\n if not skip_label:\n labelCanvas = np.zeros(\n (labelVolume.shape[0], labelVolume.shape[1], *patch_size),\n dtype=np.float32,\n )\n\n # Mapping the labelmentations to the distorted coordinates\n for b in range(labelVolume.shape[0]):\n for c in range(labelVolume.shape[1]):\n labelCanvas[b, c] = map_coordinates(labelVolume[b, c], coords, order=0, mode=\"constant\", cval=0.0).astype(\n labelVolume.dtype\n )\n return imageCanvas, labelCanvas\n return imageCanvas, labelVolume\n\n def __call__(self, packed_data_dict=None, **unpacked_data_dict):\n data_dict = packed_data_dict if packed_data_dict else unpacked_data_dict\n assert (\n len(data_dict[self.data_key].shape) == 5 or len(data_dict[self.data_key].shape) == 4\n ), f\"Incorrect data size or shape.\\\n\t\t\t\\nShould be (c, x, y, z) or (c, x, y) and is: {data_dict[self.data_key].shape}\"\n\n (\n deform_alpha,\n deform_sigma,\n x_rot_rad,\n y_rot_rad,\n z_rot_rad,\n scale_factor,\n ) = self.get_params(\n deform_alpha=self.deform_alpha,\n deform_sigma=self.deform_sigma,\n x_rot=self.x_rot_in_degrees,\n y_rot=self.y_rot_in_degrees,\n z_rot=self.z_rot_in_degrees,\n scale_factor=self.scale_factor,\n )\n\n (\n data_dict[self.data_key],\n data_dict[self.label_key],\n ) = self.__CropDeformRotateScale__(\n data_dict[self.data_key],\n data_dict[self.label_key],\n self.patch_size,\n deform_alpha,\n deform_sigma,\n x_rot_rad,\n y_rot_rad,\n z_rot_rad,\n scale_factor,\n self.skip_label,\n )\n return data_dict" }, { "identifier": "find_optimal_tensor_dims", "path": "yucca/network_architectures/utils/model_memory_estimation.py", "snippet": "def find_optimal_tensor_dims(\n dimensionality,\n num_classes,\n modalities,\n model_name,\n max_patch_size,\n fixed_patch_size: tuple | list = None,\n fixed_batch_size: tuple | list = None,\n max_memory_usage_in_gb=None,\n):\n if max_memory_usage_in_gb is None:\n try:\n gpu_vram_in_gb = int(torch.cuda.get_device_properties(0).total_memory / 1024**2 * 0.001)\n except RuntimeError:\n gpu_vram_in_gb = 12\n # Don't wanna utilize more than 12GB, to ensure epoch times are kept relatively low\n max_memory_usage_in_gb = min(12, gpu_vram_in_gb)\n\n # Use this offset to factor the overhead from CUDA and other libraries taking a substantial amount of VRAM\n offset = 2.5\n\n OOM_OR_MAXED = False\n final_batch_size = None\n final_patch_size = None\n\n if dimensionality == \"2D\":\n if len(max_patch_size) == 3:\n max_patch_size = max_patch_size[1:]\n conv = nn.Conv2d\n dropout = nn.Dropout2d\n norm = nn.InstanceNorm2d\n batch_size = 16\n max_batch_size = 512\n patch_size = [32, 32] if not model_name == \"UNetR\" else [64, 64]\n if dimensionality == \"3D\":\n conv = nn.Conv3d\n dropout = nn.Dropout3d\n norm = nn.InstanceNorm3d\n batch_size = 2\n max_batch_size = 2\n patch_size = [32, 32, 32] if not model_name == \"UNetR\" else [64, 64, 64]\n\n if fixed_batch_size:\n batch_size = fixed_batch_size\n max_batch_size = fixed_batch_size\n\n absolute_max = 128**3\n\n model = recursive_find_python_class(\n folder=[join(yucca.__path__[0], \"network_architectures\")],\n class_name=model_name,\n current_module=\"yucca.network_architectures\",\n )\n model_kwargs = {\n \"input_channels\": modalities,\n \"num_classes\": num_classes,\n \"conv_op\": conv,\n \"patch_size\": patch_size,\n \"dropout_op\": dropout,\n \"norm_op\": norm,\n }\n model_kwargs = filter_kwargs(model, model_kwargs)\n model = model(**model_kwargs)\n\n est = 0\n idx = 0\n maxed_idxs = []\n if fixed_patch_size is not None:\n patch_size = fixed_patch_size\n # first fix dimensions so they are divisible by 16 (otherwise issues with standard pools and strides)\n patch_size = [math.ceil(i / 16) * 16 for i in patch_size]\n max_patch_size = patch_size\n while not OOM_OR_MAXED:\n try:\n if np.prod(patch_size) >= absolute_max:\n max_patch_size = patch_size\n\n inp = torch.zeros((batch_size, modalities, *patch_size))\n est = estimate_memory_training(model, inp)\n\n # If estimated usage is still within acceptable bounds we set the (maybe temporary) final dimensions\n if est < max_memory_usage_in_gb - offset:\n final_batch_size = batch_size\n final_patch_size = tuple(patch_size)\n else:\n OOM_OR_MAXED = True\n\n if patch_size[idx] + 16 < max_patch_size[idx]:\n patch_size[idx] += 16\n if model_name == \"UNetR\": # we need to re-instantiate it because of the ViT\n model = recursive_find_python_class(\n folder=[join(yucca.__path__[0], \"network_architectures\")],\n class_name=model_name,\n current_module=\"yucca.network_architectures\",\n )\n model = model(**model_kwargs)\n\n if idx < len(patch_size) - 1:\n idx += 1\n else:\n idx = 0\n else:\n # here we mark that one dimension has been maxed out\n if idx not in maxed_idxs:\n maxed_idxs.append(idx)\n # if not all dimensions are maxed out for the patch_size,\n # we try the next dimension\n if not len(maxed_idxs) == len(patch_size):\n if idx < len(patch_size) - 1:\n idx += 1\n else:\n idx = 0\n\n # when all dimensions of the patch are maxed\n # we try increasing the batch_size instead\n if len(maxed_idxs) == len(patch_size):\n # Unless batch_size is maxed\n if not max_batch_size > batch_size:\n final_batch_size = batch_size\n final_patch_size = tuple(patch_size)\n OOM_OR_MAXED = True\n if len(patch_size) == 3:\n batch_size += 2\n else:\n batch_size += 8\n except torch.cuda.OutOfMemoryError:\n OOM_OR_MAXED = True\n if final_batch_size is None or final_batch_size is None:\n print(\n \"\\n\"\n \"Final batch and/or patch size was not found. \\n\"\n \"This is likely caused by supplying large fixed parameters causing (or almost causing) OOM errors. \\n\"\n \"Will attempt to run with supplied parameters, but this might cause issues.\"\n )\n print(\n f\"Estimated GPU memory usage for parameters is: {est}GB and the max requested vram is: {max_memory_usage_in_gb-offset}GB. \\n\"\n f\"This includes an offset of {offset}GB to account for vram used by PyTorch and CUDA. \\n\"\n \"Consider increasing the max vram or working with a smaller batch and/or patch size.\"\n \"\\n\"\n )\n if final_batch_size is None:\n final_batch_size = batch_size\n if final_patch_size is None:\n final_patch_size = tuple(patch_size)\n return final_batch_size, final_patch_size" } ]
from torchvision import transforms from yucca.image_processing.matrix_ops import get_max_rotated_size from yucca.image_processing.transforms.formatting import ( AddBatchDimension, RemoveBatchDimension, ) from yucca.image_processing.transforms.BiasField import BiasField from yucca.image_processing.transforms.Blur import Blur from yucca.image_processing.transforms.CopyImageToSeg import CopyImageToSeg from yucca.image_processing.transforms.Gamma import Gamma from yucca.image_processing.transforms.Ghosting import MotionGhosting from yucca.image_processing.transforms.Masking import Masking from yucca.image_processing.transforms.Mirror import Mirror from yucca.image_processing.transforms.Noise import ( AdditiveNoise, MultiplicativeNoise, ) from yucca.image_processing.transforms.Ringing import GibbsRinging from yucca.image_processing.transforms.sampling import DownsampleSegForDS from yucca.image_processing.transforms.SimulateLowres import SimulateLowres from yucca.image_processing.transforms.Spatial import Spatial from yucca.network_architectures.utils.model_memory_estimation import ( find_optimal_tensor_dims, )
12,875
class YuccaAugmentationComposer: def __init__( self, patch_size: list | tuple, deep_supervision: bool = False, is_2D: bool = False, parameter_dict: dict = {}, task_type_preset: str = None, ): self._pre_aug_patch_size = None self.deep_supervision = deep_supervision self.setup_default_params(is_2D, patch_size) self.apply_task_type_specific_preset(task_type_preset) self.overwrite_params(parameter_dict) self.train_transforms = self.compose_train_transforms() self.val_transforms = self.compose_val_transforms() def setup_default_params(self, is_2d, patch_size): print("Composing Transforms") # Define whether we crop before or after applying augmentations # Define if cropping is random or always centered self.random_crop = True self.mask_image_for_reconstruction = False self.patch_size = patch_size # label/segmentation transforms self.skip_label = False self.label_dtype = int self.copy_image_to_label = False self.additive_noise_p_per_sample = 0.2 self.additive_noise_mean = (0.0, 0.0) self.additive_noise_sigma = (1e-3, 1e-4) self.biasfield_p_per_sample = 0.33 self.blurring_p_per_sample = 0.2 self.blurring_sigma = (0.0, 1.0) self.blurring_p_per_channel = 0.5 self.elastic_deform_p_per_sample = 0.33 self.elastic_deform_alpha = (200, 600) self.elastic_deform_sigma = (20, 30) self.gamma_p_per_sample = 0.2 self.gamma_p_invert_image = 0.05 self.gamma_range = (0.5, 2.0) self.gibbs_ringing_p_per_sample = 0.2 self.gibbs_ringing_cutfreq = (96, 129) self.gibbs_ringing_axes = (0, 2) if is_2d else (0, 3) self.mirror_p_per_sample = 0.0 self.mirror_p_per_axis = 0.33 self.mirror_axes = (0, 1) if is_2d else (0, 1, 2) self.motion_ghosting_p_per_sample = 0.2 self.motion_ghosting_alpha = (0.85, 0.95) self.motion_ghosting_numreps = (2, 11) self.motion_ghosting_axes = (0, 2) if is_2d else (0, 3) self.multiplicative_noise_p_per_sample = 0.2 self.multiplicative_noise_mean = (0, 0) self.multiplicative_noise_sigma = (1e-3, 1e-4) self.rotation_p_per_sample = 0.2 self.rotation_p_per_axis = 0.66 self.rotation_x = (-30.0, 30.0) self.rotation_y = (-0.0, 0.0) if is_2d else (-30.0, 30.0) self.rotation_z = (-0.0, 0.0) if is_2d else (-30.0, 30.0) self.scale_p_per_sample = 0.2 self.scale_factor = (0.9, 1.1) self.simulate_lowres_p_per_sample = 0.2 self.simulate_lowres_p_per_channel = 0.5 self.simulate_lowres_p_per_axis = 0.33 self.simulate_lowres_zoom_range = (0.5, 1.0) @property def pre_aug_patch_size(self): # First check if any spatial transforms are included if self.elastic_deform_p_per_sample > 0 or self.rotation_p_per_sample > 0 or self.scale_p_per_sample > 0: self._pre_aug_patch_size = get_max_rotated_size(self.patch_size) return self._pre_aug_patch_size def apply_task_type_specific_preset(self, task_type_preset): if task_type_preset == "classification": self.skip_label = True if task_type_preset == "unsupervised": self.skip_label = True self.copy_image_to_label = True # This should be uncommented when masking is properly implemented # augmentation_parameter_dict["mask_image_for_reconstruction"] = True def overwrite_params(self, parameter_dict): for key, value in parameter_dict.items(): setattr(self, key, value) def compose_train_transforms(self): tr_transforms = transforms.Compose( [
class YuccaAugmentationComposer: def __init__( self, patch_size: list | tuple, deep_supervision: bool = False, is_2D: bool = False, parameter_dict: dict = {}, task_type_preset: str = None, ): self._pre_aug_patch_size = None self.deep_supervision = deep_supervision self.setup_default_params(is_2D, patch_size) self.apply_task_type_specific_preset(task_type_preset) self.overwrite_params(parameter_dict) self.train_transforms = self.compose_train_transforms() self.val_transforms = self.compose_val_transforms() def setup_default_params(self, is_2d, patch_size): print("Composing Transforms") # Define whether we crop before or after applying augmentations # Define if cropping is random or always centered self.random_crop = True self.mask_image_for_reconstruction = False self.patch_size = patch_size # label/segmentation transforms self.skip_label = False self.label_dtype = int self.copy_image_to_label = False self.additive_noise_p_per_sample = 0.2 self.additive_noise_mean = (0.0, 0.0) self.additive_noise_sigma = (1e-3, 1e-4) self.biasfield_p_per_sample = 0.33 self.blurring_p_per_sample = 0.2 self.blurring_sigma = (0.0, 1.0) self.blurring_p_per_channel = 0.5 self.elastic_deform_p_per_sample = 0.33 self.elastic_deform_alpha = (200, 600) self.elastic_deform_sigma = (20, 30) self.gamma_p_per_sample = 0.2 self.gamma_p_invert_image = 0.05 self.gamma_range = (0.5, 2.0) self.gibbs_ringing_p_per_sample = 0.2 self.gibbs_ringing_cutfreq = (96, 129) self.gibbs_ringing_axes = (0, 2) if is_2d else (0, 3) self.mirror_p_per_sample = 0.0 self.mirror_p_per_axis = 0.33 self.mirror_axes = (0, 1) if is_2d else (0, 1, 2) self.motion_ghosting_p_per_sample = 0.2 self.motion_ghosting_alpha = (0.85, 0.95) self.motion_ghosting_numreps = (2, 11) self.motion_ghosting_axes = (0, 2) if is_2d else (0, 3) self.multiplicative_noise_p_per_sample = 0.2 self.multiplicative_noise_mean = (0, 0) self.multiplicative_noise_sigma = (1e-3, 1e-4) self.rotation_p_per_sample = 0.2 self.rotation_p_per_axis = 0.66 self.rotation_x = (-30.0, 30.0) self.rotation_y = (-0.0, 0.0) if is_2d else (-30.0, 30.0) self.rotation_z = (-0.0, 0.0) if is_2d else (-30.0, 30.0) self.scale_p_per_sample = 0.2 self.scale_factor = (0.9, 1.1) self.simulate_lowres_p_per_sample = 0.2 self.simulate_lowres_p_per_channel = 0.5 self.simulate_lowres_p_per_axis = 0.33 self.simulate_lowres_zoom_range = (0.5, 1.0) @property def pre_aug_patch_size(self): # First check if any spatial transforms are included if self.elastic_deform_p_per_sample > 0 or self.rotation_p_per_sample > 0 or self.scale_p_per_sample > 0: self._pre_aug_patch_size = get_max_rotated_size(self.patch_size) return self._pre_aug_patch_size def apply_task_type_specific_preset(self, task_type_preset): if task_type_preset == "classification": self.skip_label = True if task_type_preset == "unsupervised": self.skip_label = True self.copy_image_to_label = True # This should be uncommented when masking is properly implemented # augmentation_parameter_dict["mask_image_for_reconstruction"] = True def overwrite_params(self, parameter_dict): for key, value in parameter_dict.items(): setattr(self, key, value) def compose_train_transforms(self): tr_transforms = transforms.Compose( [
AddBatchDimension(),
1
2023-10-26 08:13:03+00:00
16k
Elfenreigen/UniChest
optim/optim_factory.py
[ { "identifier": "Adafactor", "path": "optim/adafactor.py", "snippet": "class Adafactor(torch.optim.Optimizer):\n \"\"\"Implements Adafactor algorithm.\n This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`\n (see https://arxiv.org/abs/1804.04235)\n\n Note that this optimizer internally adjusts the learning rate depending on the\n *scale_parameter*, *relative_step* and *warmup_init* options.\n\n To use a manual (external) learning rate schedule you should set `scale_parameter=False` and\n `relative_step=False`.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining parameter groups\n lr (float, optional): external learning rate (default: None)\n eps (tuple[float, float]): regularization constants for square gradient\n and parameter scale respectively (default: (1e-30, 1e-3))\n clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0)\n decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8)\n beta1 (float): coefficient used for computing running averages of gradient (default: None)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True)\n relative_step (bool): if True, time-dependent learning rate is computed\n instead of external learning rate (default: True)\n warmup_init (bool): time-dependent learning rate computation depends on\n whether warm-up initialization is being used (default: False)\n \"\"\"\n\n def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0,\n decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False):\n relative_step = lr is None\n if warmup_init and not relative_step:\n raise ValueError('warmup_init requires relative_step=True')\n\n beta1 = None if betas is None else betas[0] # make it compat with standard betas arg\n defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate,\n beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter,\n relative_step=relative_step, warmup_init=warmup_init)\n super(Adafactor, self).__init__(params, defaults)\n\n @staticmethod\n def _get_lr(param_group, param_state):\n if param_group['relative_step']:\n min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2\n lr_t = min(min_step, 1.0 / math.sqrt(param_state['step']))\n param_scale = 1.0\n if param_group['scale_parameter']:\n param_scale = max(param_group['eps_scale'], param_state['RMS'])\n param_group['lr'] = lr_t * param_scale\n return param_group['lr']\n\n @staticmethod\n def _get_options(param_group, param_shape):\n factored = len(param_shape) >= 2\n use_first_moment = param_group['beta1'] is not None\n return factored, use_first_moment\n\n @staticmethod\n def _rms(tensor):\n return tensor.norm(2) / (tensor.numel() ** 0.5)\n\n def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):\n r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)\n c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()\n return torch.mul(r_factor, c_factor)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.dtype in {torch.float16, torch.bfloat16}:\n grad = grad.float()\n if grad.is_sparse:\n raise RuntimeError('Adafactor does not support sparse gradients.')\n\n state = self.state[p]\n grad_shape = grad.shape\n\n factored, use_first_moment = self._get_options(group, grad_shape)\n # State Initialization\n if len(state) == 0:\n state['step'] = 0\n\n if use_first_moment:\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(grad)\n if factored:\n state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1]).to(grad)\n state['exp_avg_sq_col'] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad)\n else:\n state['exp_avg_sq'] = torch.zeros_like(grad)\n\n state['RMS'] = 0\n else:\n if use_first_moment:\n state['exp_avg'] = state['exp_avg'].to(grad)\n if factored:\n state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)\n state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)\n else:\n state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)\n\n p_data_fp32 = p.data\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p_data_fp32 = p_data_fp32.float()\n\n state['step'] += 1\n state['RMS'] = self._rms(p_data_fp32)\n lr_t = self._get_lr(group, state)\n\n beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])\n update = grad ** 2 + group['eps']\n if factored:\n exp_avg_sq_row = state['exp_avg_sq_row']\n exp_avg_sq_col = state['exp_avg_sq_col']\n\n exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1))\n exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2))\n #exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) # pytorch 1.6+\n #exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t)\n\n # Approximation of exponential moving average of square of gradient\n update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)\n update.mul_(grad)\n else:\n exp_avg_sq = state['exp_avg_sq']\n\n exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update)\n #exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) # pytorch 1.6+\n update = exp_avg_sq.rsqrt().mul_(grad)\n\n update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))\n update.mul_(lr_t)\n\n if use_first_moment:\n exp_avg = state['exp_avg']\n exp_avg.mul_(group[\"beta1\"]).add_(1 - group[\"beta1\"], update)\n #exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) # pytorch 1.6+\n update = exp_avg\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group[\"weight_decay\"] * lr_t, p_data_fp32)\n #p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * lr_t) # pytorch 1.6+\n\n p_data_fp32.add_(-update)\n\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p.data.copy_(p_data_fp32)\n\n return loss" }, { "identifier": "Adahessian", "path": "optim/adahessian.py", "snippet": "class Adahessian(torch.optim.Optimizer):\n \"\"\"\n Implements the AdaHessian algorithm from \"ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning\"\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining parameter groups\n lr (float, optional): learning rate (default: 0.1)\n betas ((float, float), optional): coefficients used for computing running averages of gradient and the\n squared hessian trace (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0)\n hessian_power (float, optional): exponent of the hessian trace (default: 1.0)\n update_each (int, optional): compute the hessian trace approximation only after *this* number of steps\n (to save time) (default: 1)\n n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1)\n \"\"\"\n\n def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0,\n hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False):\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(f\"Invalid beta parameter at index 0: {betas[0]}\")\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(f\"Invalid beta parameter at index 1: {betas[1]}\")\n if not 0.0 <= hessian_power <= 1.0:\n raise ValueError(f\"Invalid Hessian power value: {hessian_power}\")\n\n self.n_samples = n_samples\n self.update_each = update_each\n self.avg_conv_kernel = avg_conv_kernel\n\n # use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training\n self.seed = 2147483647\n self.generator = torch.Generator().manual_seed(self.seed)\n\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power)\n super(Adahessian, self).__init__(params, defaults)\n\n for p in self.get_params():\n p.hess = 0.0\n self.state[p][\"hessian step\"] = 0\n\n @property\n def is_second_order(self):\n return True\n\n def get_params(self):\n \"\"\"\n Gets all parameters in all param_groups with gradients\n \"\"\"\n\n return (p for group in self.param_groups for p in group['params'] if p.requires_grad)\n\n def zero_hessian(self):\n \"\"\"\n Zeros out the accumalated hessian traces.\n \"\"\"\n\n for p in self.get_params():\n if not isinstance(p.hess, float) and self.state[p][\"hessian step\"] % self.update_each == 0:\n p.hess.zero_()\n\n @torch.no_grad()\n def set_hessian(self):\n \"\"\"\n Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter.\n \"\"\"\n\n params = []\n for p in filter(lambda p: p.grad is not None, self.get_params()):\n if self.state[p][\"hessian step\"] % self.update_each == 0: # compute the trace only each `update_each` step\n params.append(p)\n self.state[p][\"hessian step\"] += 1\n\n if len(params) == 0:\n return\n\n if self.generator.device != params[0].device: # hackish way of casting the generator to the right device\n self.generator = torch.Generator(params[0].device).manual_seed(self.seed)\n\n grads = [p.grad for p in params]\n\n for i in range(self.n_samples):\n # Rademacher distribution {-1.0, 1.0}\n zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params]\n h_zs = torch.autograd.grad(\n grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1)\n for h_z, z, p in zip(h_zs, zs, params):\n p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z)\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"\n Performs a single optimization step.\n Arguments:\n closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None)\n \"\"\"\n\n loss = None\n if closure is not None:\n loss = closure()\n\n self.zero_hessian()\n self.set_hessian()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None or p.hess is None:\n continue\n\n if self.avg_conv_kernel and p.dim() == 4:\n p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone()\n\n # Perform correct stepweight decay as in AdamW\n p.mul_(1 - group['lr'] * group['weight_decay'])\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 1:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p)\n # Exponential moving average of Hessian diagonal square values\n state['exp_hessian_diag_sq'] = torch.zeros_like(p)\n\n exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq']\n beta1, beta2 = group['betas']\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1)\n exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2)\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n\n k = group['hessian_power']\n denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps'])\n\n # make update\n step_size = group['lr'] / bias_correction1\n p.addcdiv_(exp_avg, denom, value=-step_size)\n\n return loss" }, { "identifier": "AdamP", "path": "optim/adamp.py", "snippet": "class AdamP(Optimizer):\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,\n delta=delta, wd_ratio=wd_ratio, nesterov=nesterov)\n super(AdamP, self).__init__(params, defaults)\n\n def _channel_view(self, x):\n return x.view(x.size(0), -1)\n\n def _layer_view(self, x):\n return x.view(1, -1)\n\n def _cosine_similarity(self, x, y, eps, view_func):\n x = view_func(x)\n y = view_func(y)\n\n x_norm = x.norm(dim=1).add_(eps)\n y_norm = y.norm(dim=1).add_(eps)\n dot = (x * y).sum(dim=1)\n\n return dot.abs() / x_norm / y_norm\n\n def _projection(self, p, grad, perturb, delta, wd_ratio, eps):\n wd = 1\n expand_size = [-1] + [1] * (len(p.shape) - 1)\n for view_func in [self._channel_view, self._layer_view]:\n\n cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)\n\n if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):\n p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)\n perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)\n wd = wd_ratio\n\n return perturb, wd\n\n return perturb, wd\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n\n grad = p.grad.data\n beta1, beta2 = group['betas']\n nesterov = group['nesterov']\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p.data)\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n\n # Adam\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n\n state['step'] += 1\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])\n step_size = group['lr'] / bias_correction1\n\n if nesterov:\n perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom\n else:\n perturb = exp_avg / denom\n\n # Projection\n wd_ratio = 1\n if len(p.shape) > 1:\n perturb, wd_ratio = self._projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps'])\n\n # Weight decay\n if group['weight_decay'] > 0:\n p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio)\n\n # Step\n p.data.add_(-step_size, perturb)\n\n return loss" }, { "identifier": "Lookahead", "path": "optim/lookahead.py", "snippet": "class Lookahead(Optimizer):\n def __init__(self, base_optimizer, alpha=0.5, k=6):\n if not 0.0 <= alpha <= 1.0:\n raise ValueError(f'Invalid slow update rate: {alpha}')\n if not 1 <= k:\n raise ValueError(f'Invalid lookahead steps: {k}')\n defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0)\n self.base_optimizer = base_optimizer\n self.param_groups = self.base_optimizer.param_groups\n self.defaults = base_optimizer.defaults\n self.defaults.update(defaults)\n self.state = defaultdict(dict)\n # manually add our defaults to the param groups\n for name, default in defaults.items():\n for group in self.param_groups:\n group.setdefault(name, default)\n\n def update_slow(self, group):\n for fast_p in group[\"params\"]:\n if fast_p.grad is None:\n continue\n param_state = self.state[fast_p]\n if 'slow_buffer' not in param_state:\n param_state['slow_buffer'] = torch.empty_like(fast_p.data)\n param_state['slow_buffer'].copy_(fast_p.data)\n slow = param_state['slow_buffer']\n slow.add_(group['lookahead_alpha'], fast_p.data - slow)\n fast_p.data.copy_(slow)\n\n def sync_lookahead(self):\n for group in self.param_groups:\n self.update_slow(group)\n\n def step(self, closure=None):\n #assert id(self.param_groups) == id(self.base_optimizer.param_groups)\n loss = self.base_optimizer.step(closure)\n for group in self.param_groups:\n group['lookahead_step'] += 1\n if group['lookahead_step'] % group['lookahead_k'] == 0:\n self.update_slow(group)\n return loss\n\n def state_dict(self):\n fast_state_dict = self.base_optimizer.state_dict()\n slow_state = {\n (id(k) if isinstance(k, torch.Tensor) else k): v\n for k, v in self.state.items()\n }\n fast_state = fast_state_dict['state']\n param_groups = fast_state_dict['param_groups']\n return {\n 'state': fast_state,\n 'slow_state': slow_state,\n 'param_groups': param_groups,\n }\n\n def load_state_dict(self, state_dict):\n fast_state_dict = {\n 'state': state_dict['state'],\n 'param_groups': state_dict['param_groups'],\n }\n self.base_optimizer.load_state_dict(fast_state_dict)\n\n # We want to restore the slow state, but share param_groups reference\n # with base_optimizer. This is a bit redundant but least code\n slow_state_new = False\n if 'slow_state' not in state_dict:\n print('Loading state_dict from optimizer without Lookahead applied.')\n state_dict['slow_state'] = defaultdict(dict)\n slow_state_new = True\n slow_state_dict = {\n 'state': state_dict['slow_state'],\n 'param_groups': state_dict['param_groups'], # this is pointless but saves code\n }\n super(Lookahead, self).load_state_dict(slow_state_dict)\n self.param_groups = self.base_optimizer.param_groups # make both ref same container\n if slow_state_new:\n # reapply defaults to catch missing lookahead specific ones\n for name, default in self.defaults.items():\n for group in self.param_groups:\n group.setdefault(name, default)" }, { "identifier": "Nadam", "path": "optim/nadam.py", "snippet": "class Nadam(Optimizer):\n \"\"\"Implements Nadam algorithm (a variant of Adam based on Nesterov momentum).\n\n It has been proposed in `Incorporating Nesterov Momentum into Adam`__.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 2e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n schedule_decay (float, optional): momentum schedule decay (default: 4e-3)\n\n __ http://cs229.stanford.edu/proj2015/054_report.pdf\n __ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf\n\n Originally taken from: https://github.com/pytorch/pytorch/pull/1408\n NOTE: Has potential issues but does work well on some problems.\n \"\"\"\n\n def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=0, schedule_decay=4e-3):\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, schedule_decay=schedule_decay)\n super(Nadam, self).__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n state['m_schedule'] = 1.\n state['exp_avg'] = grad.new().resize_as_(grad).zero_()\n state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()\n\n # Warming momentum schedule\n m_schedule = state['m_schedule']\n schedule_decay = group['schedule_decay']\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n eps = group['eps']\n state['step'] += 1\n t = state['step']\n\n if group['weight_decay'] != 0:\n grad = grad.add(group['weight_decay'], p.data)\n\n momentum_cache_t = beta1 * \\\n (1. - 0.5 * (0.96 ** (t * schedule_decay)))\n momentum_cache_t_1 = beta1 * \\\n (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay)))\n m_schedule_new = m_schedule * momentum_cache_t\n m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1\n state['m_schedule'] = m_schedule_new\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1. - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1. - beta2, grad, grad)\n exp_avg_sq_prime = exp_avg_sq / (1. - beta2 ** t)\n denom = exp_avg_sq_prime.sqrt_().add_(eps)\n\n p.data.addcdiv_(-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new), grad, denom)\n p.data.addcdiv_(-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next), exp_avg, denom)\n\n return loss" }, { "identifier": "NovoGrad", "path": "optim/novograd.py", "snippet": "class NovoGrad(Optimizer):\n def __init__(self, params, grad_averaging=False, lr=0.1, betas=(0.95, 0.98), eps=1e-8, weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n super(NovoGrad, self).__init__(params, defaults)\n self._lr = lr\n self._beta1 = betas[0]\n self._beta2 = betas[1]\n self._eps = eps\n self._wd = weight_decay\n self._grad_averaging = grad_averaging\n\n self._momentum_initialized = False\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n if not self._momentum_initialized:\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n state = self.state[p]\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('NovoGrad does not support sparse gradients')\n\n v = torch.norm(grad)**2\n m = grad/(torch.sqrt(v) + self._eps) + self._wd * p.data\n state['step'] = 0\n state['v'] = v\n state['m'] = m\n state['grad_ema'] = None\n self._momentum_initialized = True\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n state = self.state[p]\n state['step'] += 1\n\n step, v, m = state['step'], state['v'], state['m']\n grad_ema = state['grad_ema']\n\n grad = p.grad.data\n g2 = torch.norm(grad)**2\n grad_ema = g2 if grad_ema is None else grad_ema * \\\n self._beta2 + g2 * (1. - self._beta2)\n grad *= 1.0 / (torch.sqrt(grad_ema) + self._eps)\n\n if self._grad_averaging:\n grad *= (1. - self._beta1)\n\n g2 = torch.norm(grad)**2\n v = self._beta2*v + (1. - self._beta2)*g2\n m = self._beta1*m + (grad / (torch.sqrt(v) + self._eps) + self._wd * p.data)\n bias_correction1 = 1 - self._beta1 ** step\n bias_correction2 = 1 - self._beta2 ** step\n step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n\n state['v'], state['m'] = v, m\n state['grad_ema'] = grad_ema\n p.data.add_(-step_size, m)\n return loss" }, { "identifier": "NvNovoGrad", "path": "optim/nvnovograd.py", "snippet": "class NvNovoGrad(Optimizer):\n \"\"\"\n Implements Novograd algorithm.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.95, 0.98))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n grad_averaging: gradient averaging\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8,\n weight_decay=0, grad_averaging=False, amsgrad=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay,\n grad_averaging=grad_averaging,\n amsgrad=amsgrad)\n\n super(NvNovoGrad, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(NvNovoGrad, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('amsgrad', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Sparse gradients are not supported.')\n amsgrad = group['amsgrad']\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n if amsgrad:\n max_exp_avg_sq = state['max_exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n norm = torch.sum(torch.pow(grad, 2))\n\n if exp_avg_sq == 0:\n exp_avg_sq.copy_(norm)\n else:\n exp_avg_sq.mul_(beta2).add_(1 - beta2, norm)\n\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = max_exp_avg_sq.sqrt().add_(group['eps'])\n else:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n grad.div_(denom)\n if group['weight_decay'] != 0:\n grad.add_(group['weight_decay'], p.data)\n if group['grad_averaging']:\n grad.mul_(1 - beta1)\n exp_avg.mul_(beta1).add_(grad)\n\n p.data.add_(-group['lr'], exp_avg)\n\n return loss" }, { "identifier": "RAdam", "path": "optim/radam.py", "snippet": "class RAdam(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n self.buffer = [[None, None, None] for ind in range(10)]\n super(RAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(RAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n state['step'] += 1\n buffered = self.buffer[int(state['step'] % 10)]\n if state['step'] == buffered[0]:\n N_sma, step_size = buffered[1], buffered[2]\n else:\n buffered[0] = state['step']\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n buffered[1] = N_sma\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n step_size = group['lr'] * math.sqrt(\n (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n N_sma_max - 2)) / (1 - beta1 ** state['step'])\n else:\n step_size = group['lr'] / (1 - beta1 ** state['step'])\n buffered[2] = step_size\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n else:\n p_data_fp32.add_(-step_size, exp_avg)\n\n p.data.copy_(p_data_fp32)\n\n return loss" }, { "identifier": "RMSpropTF", "path": "optim/rmsprop_tf.py", "snippet": "class RMSpropTF(Optimizer):\n \"\"\"Implements RMSprop algorithm (TensorFlow style epsilon)\n\n NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt\n and a few other modifications to closer match Tensorflow for matching hyper-params.\n\n Noteworthy changes include:\n 1. Epsilon applied inside square-root\n 2. square_avg initialized to ones\n 3. LR scaling of update accumulated in momentum buffer\n\n Proposed by G. Hinton in his\n `course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.\n\n The centered version first appears in `Generating Sequences\n With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-2)\n momentum (float, optional): momentum factor (default: 0)\n alpha (float, optional): smoothing (decay) constant (default: 0.9)\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-10)\n centered (bool, optional) : if ``True``, compute the centered RMSProp,\n the gradient is normalized by an estimation of its variance\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101\n lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer\n update as per defaults in Tensorflow\n\n \"\"\"\n\n def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False,\n decoupled_decay=False, lr_in_momentum=True):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= momentum:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if not 0.0 <= weight_decay:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n if not 0.0 <= alpha:\n raise ValueError(\"Invalid alpha value: {}\".format(alpha))\n\n defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay,\n decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum)\n super(RMSpropTF, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(RMSpropTF, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('momentum', 0)\n group.setdefault('centered', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('RMSprop does not support sparse gradients')\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n state['square_avg'] = torch.ones_like(p.data) # PyTorch inits to zero\n if group['momentum'] > 0:\n state['momentum_buffer'] = torch.zeros_like(p.data)\n if group['centered']:\n state['grad_avg'] = torch.zeros_like(p.data)\n\n square_avg = state['square_avg']\n one_minus_alpha = 1. - group['alpha']\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n if 'decoupled_decay' in group and group['decoupled_decay']:\n p.data.add_(-group['weight_decay'], p.data)\n else:\n grad = grad.add(group['weight_decay'], p.data)\n\n # Tensorflow order of ops for updating squared avg\n square_avg.add_(one_minus_alpha, grad.pow(2) - square_avg)\n # square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad) # PyTorch original\n\n if group['centered']:\n grad_avg = state['grad_avg']\n grad_avg.add_(one_minus_alpha, grad - grad_avg)\n # grad_avg.mul_(alpha).add_(1 - alpha, grad) # PyTorch original\n avg = square_avg.addcmul(-1, grad_avg, grad_avg).add(group['eps']).sqrt_() # eps moved in sqrt\n else:\n avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt\n\n if group['momentum'] > 0:\n buf = state['momentum_buffer']\n # Tensorflow accumulates the LR scaling in the momentum buffer\n if 'lr_in_momentum' in group and group['lr_in_momentum']:\n buf.mul_(group['momentum']).addcdiv_(group['lr'], grad, avg)\n p.data.add_(-buf)\n else:\n # PyTorch scales the param update by LR\n buf.mul_(group['momentum']).addcdiv_(grad, avg)\n p.data.add_(-group['lr'], buf)\n else:\n p.data.addcdiv_(-group['lr'], grad, avg)\n\n return loss" }, { "identifier": "SGDP", "path": "optim/sgdp.py", "snippet": "class SGDP(Optimizer):\n def __init__(self, params, lr=required, momentum=0, dampening=0,\n weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1):\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay,\n nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio)\n super(SGDP, self).__init__(params, defaults)\n\n def _channel_view(self, x):\n return x.view(x.size(0), -1)\n\n def _layer_view(self, x):\n return x.view(1, -1)\n\n def _cosine_similarity(self, x, y, eps, view_func):\n x = view_func(x)\n y = view_func(y)\n\n x_norm = x.norm(dim=1).add_(eps)\n y_norm = y.norm(dim=1).add_(eps)\n dot = (x * y).sum(dim=1)\n\n return dot.abs() / x_norm / y_norm\n\n def _projection(self, p, grad, perturb, delta, wd_ratio, eps):\n wd = 1\n expand_size = [-1] + [1] * (len(p.shape) - 1)\n for view_func in [self._channel_view, self._layer_view]:\n\n cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)\n\n if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):\n p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)\n perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)\n wd = wd_ratio\n\n return perturb, wd\n\n return perturb, wd\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['momentum'] = torch.zeros_like(p.data)\n\n # SGD\n buf = state['momentum']\n buf.mul_(momentum).add_(1 - dampening, grad)\n if nesterov:\n d_p = grad + momentum * buf\n else:\n d_p = buf\n\n # Projection\n wd_ratio = 1\n if len(p.shape) > 1:\n d_p, wd_ratio = self._projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps'])\n\n # Weight decay\n if weight_decay != 0:\n p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum))\n\n # Step\n p.data.add_(-group['lr'], d_p)\n\n return loss" } ]
import torch from torch import optim as optim from .adafactor import Adafactor from .adahessian import Adahessian from .adamp import AdamP from .lookahead import Lookahead from .nadam import Nadam from .novograd import NovoGrad from .nvnovograd import NvNovoGrad from .radam import RAdam from .rmsprop_tf import RMSpropTF from .sgdp import SGDP from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
12,344
""" Optimizer Factory w/ Custom Weight Decay Hacked together by / Copyright 2020 Ross Wightman """ try: has_apex = True except ImportError: has_apex = False def add_weight_decay(model, image_encoder,text_encoder, weight_decay=1e-5, skip_list=()): decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in image_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in text_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) return [ {'params': no_decay, 'weight_decay': 0.}, {'params': decay, 'weight_decay': weight_decay}] def create_optimizer(args, model, image_encoder,text_encoder, filter_bias_and_bn=True): opt_lower = args.opt.lower() weight_decay = args.weight_decay if weight_decay and filter_bias_and_bn: skip = {} if hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() parameters = add_weight_decay(model,image_encoder,text_encoder, weight_decay, skip) weight_decay = 0. else: parameters = [filter(lambda p: p.requires_grad, model.parameters()),filter(lambda p: p.requires_grad, image_encoder.parameters()),filter(lambda p: p.requires_grad, text_encoder.parameters())] #model.parameters() # print(parameters) if 'fused' in opt_lower: assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' opt_args = dict(lr=args.lr, weight_decay=weight_decay) if hasattr(args, 'opt_eps') and args.opt_eps is not None: opt_args['eps'] = args.opt_eps if hasattr(args, 'opt_betas') and args.opt_betas is not None: opt_args['betas'] = args.opt_betas if hasattr(args, 'opt_args') and args.opt_args is not None: opt_args.update(args.opt_args) opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower == 'sgd' or opt_lower == 'nesterov': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'momentum': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif opt_lower == 'adam': optimizer = optim.Adam(parameters, **opt_args) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) elif opt_lower == 'nadam':
""" Optimizer Factory w/ Custom Weight Decay Hacked together by / Copyright 2020 Ross Wightman """ try: has_apex = True except ImportError: has_apex = False def add_weight_decay(model, image_encoder,text_encoder, weight_decay=1e-5, skip_list=()): decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in image_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in text_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) return [ {'params': no_decay, 'weight_decay': 0.}, {'params': decay, 'weight_decay': weight_decay}] def create_optimizer(args, model, image_encoder,text_encoder, filter_bias_and_bn=True): opt_lower = args.opt.lower() weight_decay = args.weight_decay if weight_decay and filter_bias_and_bn: skip = {} if hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() parameters = add_weight_decay(model,image_encoder,text_encoder, weight_decay, skip) weight_decay = 0. else: parameters = [filter(lambda p: p.requires_grad, model.parameters()),filter(lambda p: p.requires_grad, image_encoder.parameters()),filter(lambda p: p.requires_grad, text_encoder.parameters())] #model.parameters() # print(parameters) if 'fused' in opt_lower: assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' opt_args = dict(lr=args.lr, weight_decay=weight_decay) if hasattr(args, 'opt_eps') and args.opt_eps is not None: opt_args['eps'] = args.opt_eps if hasattr(args, 'opt_betas') and args.opt_betas is not None: opt_args['betas'] = args.opt_betas if hasattr(args, 'opt_args') and args.opt_args is not None: opt_args.update(args.opt_args) opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower == 'sgd' or opt_lower == 'nesterov': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'momentum': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif opt_lower == 'adam': optimizer = optim.Adam(parameters, **opt_args) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
4
2023-10-30 00:24:16+00:00
16k
YichenZW/Coh-MGT-Detection
run_detector.py
[ { "identifier": "glue_compute_metrics", "path": "util.py", "snippet": "def glue_compute_metrics(task_name, preds, labels):\n assert len(preds) == len(labels)\n if task_name == \"cola\":\n return {\"mcc\": matthews_corrcoef(labels, preds)}\n elif task_name == \"sst-2\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"mrpc\" or task_name == \"deepfake\":\n return acc_and_f1(preds, labels)\n elif task_name == \"sts-b\":\n return pearson_and_spearman(preds, labels)\n elif task_name == \"qqp\":\n return acc_and_f1(preds, labels)\n elif task_name == \"mnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"mnli-mm\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"qnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"rte\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"wnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"hans\":\n return {\"acc\": simple_accuracy(preds, labels)}\n else:\n raise KeyError(task_name)" }, { "identifier": "glue_convert_examples_to_features", "path": "util.py", "snippet": "def glue_convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n \"\"\"\n Loads a data file into a list of ``InputFeatures``\n Args:\n examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.\n tokenizer: Instance of a tokenizer that will tokenize the examples\n max_length: Maximum example length\n task: GLUE task\n label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method\n output_mode: String indicating the output mode. Either ``regression`` or ``classification``\n pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)\n pad_token: Padding token\n pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)\n mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values\n and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for\n actual values)\n Returns:\n If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``\n containing the task-specific features. If the input is a list of ``InputExamples``, will return\n a list of task-specific ``InputFeatures`` which can be fed to the model.\n \"\"\"\n\n if task is not None:\n processor = glue_processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\n if output_mode is None:\n output_mode = glue_output_modes[task]\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for ex_index, example in enumerate(examples):\n len_examples = 0\n\n len_examples = len(examples)\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d/%d\" % (ex_index, len_examples))\n\n inputs = tokenizer.encode_plus(\n example.text_a,\n add_special_tokens=True,\n max_length=max_length,\n return_token_type_ids=True,\n )\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # Tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = (\n [0 if mask_padding_with_zero else 1] * padding_length\n ) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + (\n [0 if mask_padding_with_zero else 1] * padding_length\n )\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(\n len(input_ids), max_length\n )\n assert (\n len(attention_mask) == max_length\n ), \"Error with input length {} vs {}\".format(len(attention_mask), max_length)\n assert (\n len(token_type_ids) == max_length\n ), \"Error with input length {} vs {}\".format(len(token_type_ids), max_length)\n\n if output_mode == \"classification\":\n label = label_map[example.label]\n elif output_mode == \"regression\":\n label = float(example.label)\n else:\n raise KeyError(output_mode)\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\n \"attention_mask: %s\" % \" \".join([str(x) for x in attention_mask])\n )\n logger.info(\n \"token_type_ids: %s\" % \" \".join([str(x) for x in token_type_ids])\n )\n logger.info(\"label: %s (id = %d)\" % (example.label, label))\n\n features.append(\n InputFeatures(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n label=label,\n nodes_index=example.nodes_index,\n adj_metric=example.adj_metric,\n sen2node=example.sen2node,\n nodes_ent=example.nodes_ent,\n )\n )\n\n return features" }, { "identifier": "glue_output_modes", "path": "util.py", "snippet": "class InputExample(object):\nclass InputFeatures(object):\nclass DeepFakeProcessor(DataProcessor):\n def __init__(\n self,\n guid,\n text_a,\n text_b=None,\n label=None,\n nodes_index=None,\n adj_metric=None,\n all_tokens=None,\n sen2node=None,\n nodes_ent=None,\n ):\n def __repr__(self):\n def to_dict(self):\n def to_json_string(self):\n def __init__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n label=None,\n nodes_index=None,\n adj_metric=None,\n sen2node=None,\n nodes_ent=None,\n ):\n def __repr__(self):\n def to_dict(self):\n def to_json_string(self):\ndef glue_convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n def get_example_from_tensor_dict(self, tensor_dict):\n def _read_jsonl(self, path):\n def get_train_examples(\n self, with_relation, data_dir, train_file=\"gpt2_500_train_Graph.jsonl\"\n ):\n def get_dev_examples(\n self, with_relation, data_dir, dev_file=\"gpt2_dev_Graph.jsonl\"\n ):\n def get_test_examples(\n self, with_relation, data_dir, test_file=\"gpt2_test_Graph.jsonl\"\n ):\n def get_labels(self):\n def _get_nodes(self, nodes):\n def _get_adj_metric(self, edges, drop_nodes, node_num, with_relation):\n def clean_string(self, string):\n def _create_examples(self, with_relation, inputs, set_type):\ndef simple_accuracy(preds, labels):\ndef acc_and_f1(preds, labels):\ndef pearson_and_spearman(preds, labels):\ndef glue_compute_metrics(task_name, preds, labels):\ndef xnli_compute_metrics(task_name, preds, labels):" }, { "identifier": "glue_processors", "path": "util.py", "snippet": "class InputExample(object):\nclass InputFeatures(object):\nclass DeepFakeProcessor(DataProcessor):\n def __init__(\n self,\n guid,\n text_a,\n text_b=None,\n label=None,\n nodes_index=None,\n adj_metric=None,\n all_tokens=None,\n sen2node=None,\n nodes_ent=None,\n ):\n def __repr__(self):\n def to_dict(self):\n def to_json_string(self):\n def __init__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n label=None,\n nodes_index=None,\n adj_metric=None,\n sen2node=None,\n nodes_ent=None,\n ):\n def __repr__(self):\n def to_dict(self):\n def to_json_string(self):\ndef glue_convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n def get_example_from_tensor_dict(self, tensor_dict):\n def _read_jsonl(self, path):\n def get_train_examples(\n self, with_relation, data_dir, train_file=\"gpt2_500_train_Graph.jsonl\"\n ):\n def get_dev_examples(\n self, with_relation, data_dir, dev_file=\"gpt2_dev_Graph.jsonl\"\n ):\n def get_test_examples(\n self, with_relation, data_dir, test_file=\"gpt2_test_Graph.jsonl\"\n ):\n def get_labels(self):\n def _get_nodes(self, nodes):\n def _get_adj_metric(self, edges, drop_nodes, node_num, with_relation):\n def clean_string(self, string):\n def _create_examples(self, with_relation, inputs, set_type):\ndef simple_accuracy(preds, labels):\ndef acc_and_f1(preds, labels):\ndef pearson_and_spearman(preds, labels):\ndef glue_compute_metrics(task_name, preds, labels):\ndef xnli_compute_metrics(task_name, preds, labels):" }, { "identifier": "RobertaForGraphBasedSequenceClassification", "path": "modeling_roberta.py", "snippet": "class RobertaForGraphBasedSequenceClassification(\n BertPreTrainedModel\n): \n def __init__(self, config):\n config.output_hidden_states = True\n config.output_attentions = True\n\n super(RobertaForGraphBasedSequenceClassification, self).__init__(config)\n self.num_labels = config.num_labels\n self.classifier = RobertaClassificationHead(config, graph_node_size=None)\n self.graph_aggregation = GCNGraphAgg(\n config.hidden_size, self.node_size, self.max_sentence_size\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n ):\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n sequence_output = outputs[0][:, 0, :]\n \n hidden_states = outputs[2][0]\n\n graph_rep = self.graph_aggregation(\n hidden_states,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n )\n whole_rep = torch.cat([sequence_output, graph_rep], dim=-1)\n\n logits = self.classifier(whole_rep, dim=-1)\n\n outputs = (logits,) + outputs[2:]\n if labels is not None:\n if self.num_labels == 1:\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n outputs = (loss,) + outputs\n\n return outputs, whole_rep " }, { "identifier": "RobertaForGraphBasedSequenceClassification_CL", "path": "modeling_roberta.py", "snippet": "class RobertaForGraphBasedSequenceClassification_CL(BertPreTrainedModel):\n def __init__(self, config):\n config.output_hidden_states = True\n config.output_attentions = True\n\n super(RobertaForGraphBasedSequenceClassification_CL, self).__init__(config)\n self.temperature = 0.2\n self.num_labels = config.num_labels\n self.gcn_layer = config.task_specific_params[\"gcn_layer\"]\n self.max_node_num = config.task_specific_params[\"max_nodes_num\"]\n self.max_sentences = config.task_specific_params[\"max_sentences\"]\n self.max_sen_replen = config.task_specific_params[\"max_sen_replen\"]\n self.attention_maxscore = config.task_specific_params[\"attention_maxscore\"]\n self.relation_num = config.task_specific_params[\"relation_num\"]\n\n self.roberta = RobertaModel(config)\n self.classifier = RobertaClassificationHead(\n config, graph_node_size=self.max_sen_replen\n )\n self.graph_aggregation = GCNGraphAgg(\n config.hidden_size,\n self.max_sentences,\n self.gcn_layer,\n self.max_sen_replen,\n self.attention_maxscore,\n self.relation_num,\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n batch_id=None,\n ):\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n sequence_output = outputs[0][:, 0, :]\n hidden_states = outputs[2][0]\n\n graph_rep = self.graph_aggregation(\n hidden_states,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n )\n whole_rep = torch.cat([sequence_output, graph_rep], dim=-1)\n\n logits = self.classifier(torch.cat([sequence_output, graph_rep], dim=-1))\n\n outputs = (logits,) + outputs[2:]\n if labels is not None:\n if self.num_labels == 1:\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n batch_size = len(labels)\n batch_idx_by_label = {}\n for i in range(2):\n batch_idx_by_label[i] = [\n idx\n for idx in range(batch_size)\n if int(labels.view(-1)[idx]) == i\n ] \n\n contraloss = self.contrastive_loss_labelwise_winslide(\n batch_size, batch_idx_by_label, whole_rep\n )\n\n loss_fct = CrossEntropyLoss()\n ce_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n contraloss_weight = 0.6\n loss = (\n 1.0 - contraloss_weight\n ) * ce_loss + contraloss_weight * contraloss\n outputs = (loss,) + outputs\n return outputs, whole_rep \n\n def get_key(self, dic, value):\n return [k for k, v in dic.items() if value in v]\n\n def contrastive_loss_labelwise_winslide(\n self, batch_size, batch_idx_by_label, hidden_feats\n ):\n \"\"\"\n Hidden feats must be normalized\n\n \"\"\"\n hidden_feats = F.normalize(hidden_feats, dim=1)\n sim_matrix = torch.mm(hidden_feats, hidden_feats.T) \n loss = 0.0\n\n for i in range(batch_size):\n label_list = self.get_key(batch_idx_by_label, i)\n label = label_list[0]\n one_same_label = (\n torch.zeros((batch_size,))\n .to(sim_matrix.device)\n .scatter_(\n 0,\n torch.tensor(batch_idx_by_label[label]).to(sim_matrix.device),\n 1.0,\n )\n )\n one_diff_label = (\n torch.ones((batch_size,))\n .to(sim_matrix.device)\n .scatter_(\n 0,\n torch.tensor(batch_idx_by_label[label]).to(sim_matrix.device),\n 0.0,\n )\n )\n one_for_not_i = (\n torch.ones((batch_size,))\n .to(sim_matrix.device)\n .scatter_(0, torch.tensor([i]).to(sim_matrix.device), 0.0)\n ) \n one_for_numerator = one_same_label.mul(one_for_not_i)\n\n numerator = torch.sum(\n one_for_numerator * torch.exp(sim_matrix[i, :] / self.temperature)\n )\n denominator = torch.sum(\n one_for_not_i * torch.exp(sim_matrix[i, :] / self.temperature)\n )\n\n if numerator == 0:\n numerator += 1e-6\n if denominator == 0:\n denominator += 1e-6\n\n loss += -torch.log(numerator / denominator)\n\n return loss / batch_size" }, { "identifier": "RobertaForGraphBasedSequenceClassification_MBCL", "path": "modeling_roberta.py", "snippet": "class RobertaForGraphBasedSequenceClassification_MBCL(BertPreTrainedModel):\n def __init__(self, config, mb_dataloader, train_idx_by_label):\n config.output_hidden_states = True\n config.output_attentions = True\n\n super(RobertaForGraphBasedSequenceClassification_MBCL, self).__init__(config)\n self.temperature = 0.2\n self.num_labels = config.num_labels\n self.gcn_layer = config.task_specific_params[\"gcn_layer\"]\n self.max_node_num = config.task_specific_params[\"max_nodes_num\"]\n self.max_sentences = config.task_specific_params[\"max_sentences\"]\n self.max_sen_replen = config.task_specific_params[\"max_sen_replen\"]\n self.attention_maxscore = config.task_specific_params[\"attention_maxscore\"]\n self.relation_num = config.task_specific_params[\"relation_num\"]\n self.train_idx_by_label = train_idx_by_label\n self.classifier = RobertaClassificationHead(\n config, graph_node_size=self.max_sen_replen\n )\n self.model_q = EncoderForMBCL(config)\n self.model_k = EncoderForMBCL(config)\n for param_q, param_k in zip(\n self.model_q.parameters(), self.model_k.parameters()\n ):\n param_k.data.copy_(param_q.data) \n self.model_q.cuda()\n self.model_k.cuda()\n with torch.no_grad():\n for k, item in enumerate(mb_dataloader):\n input_ids = item[0].cuda()\n attention_mask = item[1].cuda()\n labels = item[3].cuda()\n nodes_index_mask = item[4].cuda()\n adj_metric = item[5].cuda()\n node_mask = item[6].cuda()\n sen2node = item[7].cuda()\n sentence_mask = item[8].cuda()\n sentence_length = item[9].cuda()\n\n output = self.model_q(\n input_ids=input_ids,\n attention_mask=attention_mask,\n labels=labels,\n nodes_index_mask=nodes_index_mask,\n adj_metric=adj_metric,\n node_mask=node_mask,\n sen2node=sen2node,\n sentence_mask=sentence_mask,\n sentence_length=sentence_length,\n )\n init_feat = F.normalize(output[1], dim=1)\n if k == 0:\n self.queue = init_feat\n else:\n self.queue = torch.vstack((self.queue, init_feat))\n\n print(self.queue.size())\n print(\"***queue already builded***\")\n\n self.config = self.model_q.config\n self.feat_dim = self.config.hidden_size\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n batch_id=None,\n ):\n if self.training:\n batch_size = int(input_ids.size(0))\n output_q = self.model_q(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n labels,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n batch_id,\n ) \n q_feat = output_q[1]\n logits = self.classifier(output_q[1])\n outputs = (logits,) + output_q[0]\n loss_fct = CrossEntropyLoss()\n q_ce_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n output_k = self.model_k(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n labels,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n batch_id,\n )\n k_feat = output_k[1]\n self.dequeue_and_enqueue(k_feat, batch_id)\n batch_idx_by_label = {}\n for i in range(2):\n batch_idx_by_label[i] = [\n idx for idx in range(batch_size) if labels[idx] == i\n ] \n contraloss = self.contrastive_loss_es(\n batch_size, batch_idx_by_label, q_feat\n )\n self.momentum_update(m=0.999)\n contraloss_weight = 0.6\n loss = (\n 1.0 - contraloss_weight\n ) * q_ce_loss + contraloss_weight * contraloss\n\n outputs = (loss,) + outputs\n\n return outputs, output_q[1] \n else:\n batch_size = int(input_ids.size(0))\n output_q = self.model_q(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n labels,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n batch_id,\n ) \n q_feat = output_q[1]\n logits = self.classifier(output_q[1])\n outputs = (logits,) + output_q[0]\n loss_fct = CrossEntropyLoss()\n q_ce_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n batch_idx_by_label = {}\n for i in range(2):\n batch_idx_by_label[i] = [\n idx for idx in range(batch_size) if labels[idx] == i\n ] \n contraloss = self.contrastive_loss_es(\n batch_size, batch_idx_by_label, q_feat\n )\n contraloss_weight = 0.6\n loss = (\n 1.0 - contraloss_weight\n ) * q_ce_loss + contraloss_weight * contraloss\n\n outputs = (loss,) + outputs\n\n return outputs, output_q[1] \n\n def get_key(self, dic, value):\n return [k for k, v in dic.items() if value in v]\n\n def contrastive_loss_es(self, batch_size, batch_idx_by_label, hidden_feats):\n hidden_feats = F.normalize(hidden_feats, dim=1)\n change_dic = {0: 1, 1: 0}\n loss = 0\n\n for i in batch_idx_by_label:\n q = hidden_feats[batch_idx_by_label[i]]\n pos_bank = self.queue[self.train_idx_by_label[i]]\n pos_pair = torch.mm(q, pos_bank.transpose(0, 1))\n bottom_k = torch.topk(pos_pair, k=100, dim=1, largest=False).values\n neg_bank = self.queue[self.train_idx_by_label[change_dic[i]]]\n neg_pair = torch.mm(q, neg_bank.transpose(0, 1))\n top_k = torch.topk(neg_pair, k=100, dim=1).values\n numerator = torch.sum(torch.exp(bottom_k / self.temperature), dim=1)\n denominator = (\n torch.sum(torch.exp(top_k / self.temperature), dim=1) + numerator\n )\n\n for nid in range(len(numerator)):\n if numerator[nid] == 0:\n numerator[nid] += 1e-6\n for did in range(len(denominator)):\n if denominator[did] == 0:\n denominator[did] += 1e-6\n loss += torch.sum(-1.0 * torch.log(numerator / denominator))\n\n return loss / batch_size\n\n @torch.no_grad()\n def momentum_update(self, m=0.999):\n \"\"\"\n encoder_k = m * encoder_k + (1 - m) encoder_q\n \"\"\"\n for param_q, param_k in zip(\n self.model_q.parameters(), self.model_k.parameters()\n ):\n param_k.data = param_k.data * m + param_q.data * (1.0 - m)\n\n def dequeue_and_enqueue(self, hidden_batch_feats, selected_batch_idx):\n \"\"\"\n Update memory bank by batch window slide; hidden_batch_feats must be normalized\n \"\"\"\n assert hidden_batch_feats.size()[1] == self.queue.size()[1]\n\n self.queue[selected_batch_idx] = F.normalize(hidden_batch_feats, dim=1)" }, { "identifier": "EncoderForMBCL", "path": "modeling_roberta.py", "snippet": "class EncoderForMBCL(BertPreTrainedModel):\n def __init__(self, config):\n super(EncoderForMBCL, self).__init__(config)\n self.max_sen_replen = config.task_specific_params[\"max_sen_replen\"]\n self.max_sentences = config.task_specific_params[\"max_sentences\"]\n self.gcn_layer = config.task_specific_params[\"gcn_layer\"]\n self.attention_maxscore = config.task_specific_params[\"attention_maxscore\"]\n self.relation_num = config.task_specific_params[\"relation_num\"]\n\n self.roberta = RobertaModel(config)\n self.graph_aggregation = GCNGraphAgg(\n config.hidden_size,\n self.max_sentences,\n self.gcn_layer,\n self.max_sen_replen,\n self.attention_maxscore,\n self.relation_num,\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n batch_id=None,\n ):\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n sequence_output = outputs[0][:, 0, :] \n hidden_states = outputs[2][0] \n\n graph_rep = self.graph_aggregation(\n hidden_states,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n )\n\n whole_rep = torch.cat([sequence_output, graph_rep], dim=-1) \n\n return outputs[2:], whole_rep" }, { "identifier": "RobertaForGraphBasedSequenceClassification_RFCL", "path": "modeling_roberta.py", "snippet": "class RobertaForGraphBasedSequenceClassification_RFCL(BertPreTrainedModel):\n def __init__(self, config):\n config.output_hidden_states = True\n config.output_attentions = True\n\n super(RobertaForGraphBasedSequenceClassification_RFCL, self).__init__(config)\n self.temperature = 0.2\n self.num_labels = config.num_labels\n self.gcn_layer = config.task_specific_params[\"gcn_layer\"]\n self.max_node_num = config.task_specific_params[\"max_nodes_num\"]\n self.max_sentences = config.task_specific_params[\"max_sentences\"]\n self.max_sen_replen = config.task_specific_params[\"max_sen_replen\"]\n self.attention_maxscore = config.task_specific_params[\"attention_maxscore\"]\n self.relation_num = config.task_specific_params[\"relation_num\"]\n\n self.roberta = RobertaModel(config)\n self.classifier = RobertaClassificationHead(\n config, graph_node_size=self.max_sen_replen\n )\n self.graph_aggregation = GCNGraphAgg(\n config.hidden_size,\n self.max_sentences,\n self.gcn_layer,\n self.max_sen_replen,\n self.attention_maxscore,\n self.relation_num,\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n batch_id=None,\n ):\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n sequence_output = outputs[0][:, 0, :]\n hidden_states = outputs[2][0]\n\n graph_rep = self.graph_aggregation(\n hidden_states,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n )\n whole_rep = torch.cat([sequence_output, graph_rep], dim=-1)\n\n logits = self.classifier(torch.cat([sequence_output, graph_rep], dim=-1))\n\n outputs = (logits,) + outputs[2:]\n\n if labels is not None:\n if self.num_labels == 1:\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n batch_size = len(labels)\n batch_idx_by_label = {}\n for i in range(2):\n batch_idx_by_label[i] = [\n idx\n for idx in range(batch_size)\n if int(labels.view(-1)[idx]) == i\n ] \n\n contraloss = self.contrastive_loss_es(\n batch_size, batch_idx_by_label, whole_rep\n )\n\n loss_fct = CrossEntropyLoss()\n ce_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n contraloss_weight = 0.6\n loss = (\n 1.0 - contraloss_weight\n ) * ce_loss + contraloss_weight * contraloss\n\n outputs = (loss,) + outputs\n\n return outputs, whole_rep \n\n def get_key(self, dic, value):\n return [k for k, v in dic.items() if value in v]\n\n def contrastive_loss_es(self, batch_size, batch_idx_by_label, hidden_feats):\n hidden_feats = F.normalize(hidden_feats, dim=1)\n loss = 0\n sim_matrix = torch.mm(hidden_feats, hidden_feats.T) \n loss = 0.0\n\n for i in range(batch_size):\n label_list = self.get_key(batch_idx_by_label, i)\n label = label_list[0]\n one_same_label = (\n torch.zeros((batch_size,))\n .to(sim_matrix.device)\n .scatter_(\n 0,\n torch.tensor(batch_idx_by_label[label]).to(sim_matrix.device),\n 1.0,\n )\n )\n one_diff_label = (\n torch.ones((batch_size,))\n .to(sim_matrix.device)\n .scatter_(\n 0,\n torch.tensor(batch_idx_by_label[label]).to(sim_matrix.device),\n 0.0,\n )\n )\n one_for_not_i = (\n torch.ones((batch_size,))\n .to(sim_matrix.device)\n .scatter_(0, torch.tensor([i]).to(sim_matrix.device), 0.0)\n ) \n one_for_numerator = one_same_label.mul(one_for_not_i)\n one_for_neg = one_diff_label.mul(one_for_not_i)\n\n numerator = torch.sum(\n one_for_numerator * torch.exp(sim_matrix[i, :] / self.temperature)\n )\n denominator = torch.sum(\n one_for_not_i * torch.exp(sim_matrix[i, :] / self.temperature)\n )\n\n if numerator == 0:\n numerator += 1e-6\n if denominator == 0:\n denominator += 1e-6\n\n loss += -torch.log(numerator / denominator)\n\n return loss / batch_size" } ]
import os import torch import argparse import logging import random import wandb import numpy as np import ray from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from torch.optim import AdamW from transformers import ( set_seed, AutoTokenizer, AutoConfig, AutoModel, AutoModelForSequenceClassification, get_linear_schedule_with_warmup, ) from functools import partial from util import glue_compute_metrics as compute_metrics from util import ( glue_convert_examples_to_features as convert_examples_to_features, ) from util import glue_output_modes as output_modes from util import glue_processors as processors from modeling_roberta import ( RobertaForGraphBasedSequenceClassification, RobertaForGraphBasedSequenceClassification_CL, RobertaForGraphBasedSequenceClassification_MBCL, EncoderForMBCL, RobertaForGraphBasedSequenceClassification_RFCL, ) from ray import tune from ray.tune import CLIReporter from ray.tune.schedulers import ASHAScheduler from apex import amp
11,049
torch.save( scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt") ) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break return_res = { "max_acc": max_acc, "max_acc_f1": max_acc_f1, "max_f1": max_f1, "max_f1_acc": max_f1_acc, } if args.do_ray: tune.report( accuracy=max_acc, max_acc_f1=max_acc_f1, f1=max_f1, max_f1_acc=max_f1_acc ) return global_step, tr_loss / global_step, return_res, output_dir def evaluate(args, model, tokenizer, checkpoint=None, prefix="", mode="dev"): eval_task_names = (args.task_name,) eval_outputs_dirs = (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples( args, eval_task, tokenizer, evaluate=True, mode=mode ) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly. eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size ) if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Evaluation logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds, out_label_ids = None, None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids outputs, _ = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append( out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0 ) probs = preds eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) wandb.log( { "eval/acc": result["acc"], "eval/f1": result["f1"], "eval/acc_and_f1": result["acc_and_f1"], } ) return results def load_and_cache_examples( args, task, tokenizer, evaluate=False, mode="train", dataset_name="", rel="" ): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() processor = processors[task]()
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Based on code from the above authors, modifications made by Xi'an Jiaotong University. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def number_h(num): for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: if abs(num) < 1000.0: return "%3.1f%s" % (num, unit) num /= 1000.0 return "%.1f%s" % (num, "Yi") def generate_shaped_nodes_mask(nodes, max_seq_length, max_nodes_num): nodes_mask = np.zeros(shape=(max_nodes_num, max_seq_length)) nodes_num = min(len(nodes), max_nodes_num) for i in range(nodes_num): span = nodes[i] if span[0] != -1: if span[0] < max_seq_length - 1: end_pos = ( span[1] if span[1] < max_seq_length - 1 else max_seq_length - 1 ) nodes_mask[i, span[0] + 1 : end_pos + 1] = 1 else: continue return nodes_mask, nodes_num def generate_shaped_edge_mask(adj_metric, nodes_num, max_nodes_num, relation_n): if nodes_num != 0: if relation_n != 0: new_adj_metric = np.zeros(shape=(relation_n, max_nodes_num, max_nodes_num)) for i in range(relation_n): new_adj_metric[i][:nodes_num, :nodes_num] = adj_metric[i][ :nodes_num, :nodes_num ] else: new_adj_metric = np.zeros(shape=(max_nodes_num, max_nodes_num)) new_adj_metric[:nodes_num, :nodes_num] = adj_metric[:nodes_num, :nodes_num] return new_adj_metric def train(args, train_dataset, model, tokenizer): """Train the model""" total_params = sum(p.numel() for p in model.parameters()) total_trainable_params = sum( p.numel() for p in model.parameters() if p.requires_grad ) print("Total Params:", number_h(total_params)) print("Total Trainable Params:", number_h(total_trainable_params)) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = ( RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) ) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size ) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = ( args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 ) else: t_total = ( len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs ) # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.01, }, ] optimizer = AdamW( optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon ) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile( os.path.join(args.model_name_or_path, "optimizer.pt") ) and os.path.isfile(os.path.join(args.model_name_or_path, "scheduler.pt")): optimizer.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")) ) scheduler.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")) ) if args.fp16: try: except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize( model, optimizer, opt_level=args.fp16_opt_level ) # Multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True, ) # Training logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info( " Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size ) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) best_acc, best_f1 = 0.0, 0.0 global_step, epochs_trained, steps_trained_in_current_epoch = 0, 0, 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to gobal_step of last saved checkpoint from model path global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) epochs_trained = global_step // ( len(train_dataloader) // args.gradient_accumulation_steps ) steps_trained_in_current_epoch = global_step % ( len(train_dataloader) // args.gradient_accumulation_steps ) logger.info( " Continuing training from checkpoint, will skip to saved global_step" ) logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info( " Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch, ) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0], ) set_seed(args) max_acc, max_acc_f1, max_f1, max_f1_acc = 0.0, 0.0, 0.0, 0.0 for idx, _ in enumerate(train_iterator): tr_loss = 0.0 epoch_iterator = tqdm( train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0] ) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], "batch_id": batch[10], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) outputs, _ = model(**inputs) loss = outputs[0] wandb.log({"train/loss": loss}) if args.n_gpu > 1: loss = loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() epoch_iterator.set_description( "loss {}".format( round(tr_loss * args.gradient_accumulation_steps / (step + 1), 4) ) ) if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm ) else: torch.nn.utils.clip_grad_norm_( model.parameters(), args.max_grad_norm ) optimizer.step() scheduler.step() model.zero_grad() global_step += 1 if ( args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0 ): logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): results = evaluate(args, model, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss wandb.log({"eval/loss": loss_scalar}) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.local_rank in [-1, 0] and args.save_steps > 0 and args.do_eval: results = evaluate(args, model, tokenizer, checkpoint=str(idx)) logger.info("the results is {}".format(results)) if results["acc"] > max_acc: max_acc = results["acc"] max_acc_f1 = results["f1"] if results["f1"] > max_f1: max_f1 = results["f1"] max_f1_acc = results["acc"] if results["f1"] > best_f1: best_f1 = results["f1"] output_dir = os.path.join( args.output_dir, "seed-{}".format(args.seed), "checkpoint-{}-{}".format(idx, best_f1), ) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save( args, os.path.join(output_dir, "training_{}.bin".format(idx)) ) logger.info("Saving model checkpoint to %s", output_dir) torch.save( optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt") ) torch.save( scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt") ) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break return_res = { "max_acc": max_acc, "max_acc_f1": max_acc_f1, "max_f1": max_f1, "max_f1_acc": max_f1_acc, } if args.do_ray: tune.report( accuracy=max_acc, max_acc_f1=max_acc_f1, f1=max_f1, max_f1_acc=max_f1_acc ) return global_step, tr_loss / global_step, return_res, output_dir def mb_train(args, train_dataset, encoder_q, encoder_k, dataloader, tokenizer): """Train the model""" global memory_queue encoder_q.train() total_params = sum(p.numel() for p in encoder_q.parameters()) total_trainable_params = sum( p.numel() for p in encoder_q.parameters() if p.requires_grad ) print("Encoder Params:", number_h(total_params)) print("Encoder Trainable Params:", number_h(total_trainable_params)) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = ( RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) ) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size ) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = ( args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 ) else: t_total = ( len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs ) # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in encoder_q.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in encoder_q.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.01, }, ] optimizer = AdamW( optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon ) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Training logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info( " Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size ) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) best_f1 = 0.0 global_step, epochs_trained, steps_trained_in_current_epoch = 0, 0, 0 tr_loss, logging_loss = 0.0, 0.0 encoder_q.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0], ) set_seed(args) max_acc, max_acc_f1, max_f1, max_f1_acc = 0.0, 0.0, 0.0, 0.0 for idx, _ in enumerate(train_iterator): tr_loss = 0.0 epoch_iterator = tqdm( train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0] ) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue encoder_q.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], "batch_id": batch[10], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids q_outputs, q_rep = encoder_q(**inputs) # Model outputs are always tuple in transformers (see doc). if args.n_gpu > 1: loss = loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps loss.backward() tr_loss += loss.item() epoch_iterator.set_description( "loss {}".format( round(tr_loss * args.gradient_accumulation_steps / (step + 1), 4) ) ) if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm ) else: torch.nn.utils.clip_grad_norm_( encoder_q.parameters(), args.max_grad_norm ) optimizer.step() scheduler.step() encoder_q.zero_grad() global_step += 1 if ( args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0 ): logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, encoder_q, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss wandb.log({"train/loss": loss_scalar}) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.local_rank in [-1, 0] and args.save_steps > 0 and args.do_eval: results = evaluate(args, encoder_q, tokenizer, checkpoint=str(idx)) logger.info("the results is {}".format(results)) if results["f1"] > max_f1: max_f1 = results["f1"] max_f1_acc = results["acc"] if results["acc"] > max_acc: max_acc = results["acc"] max_acc_f1 = results["f1"] if results["f1"] > best_f1: best_f1 = results["f1"] output_dir = os.path.join( args.output_dir, "seed-{}".format(args.seed), "checkpoint-{}-{}".format(idx, best_f1), ) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( encoder_q.module if hasattr(encoder_q, "module") else encoder_q ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save( args, os.path.join(output_dir, "training_{}.bin".format(idx)) ) logger.info("Saving model checkpoint to %s", output_dir) torch.save( optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt") ) torch.save( scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt") ) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break return_res = { "max_acc": max_acc, "max_acc_f1": max_acc_f1, "max_f1": max_f1, "max_f1_acc": max_f1_acc, } if args.do_ray: tune.report( accuracy=max_acc, max_acc_f1=max_acc_f1, f1=max_f1, max_f1_acc=max_f1_acc ) return global_step, tr_loss / global_step, return_res, output_dir def evaluate(args, model, tokenizer, checkpoint=None, prefix="", mode="dev"): eval_task_names = (args.task_name,) eval_outputs_dirs = (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples( args, eval_task, tokenizer, evaluate=True, mode=mode ) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly. eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size ) if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Evaluation logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds, out_label_ids = None, None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids outputs, _ = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append( out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0 ) probs = preds eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) wandb.log( { "eval/acc": result["acc"], "eval/f1": result["f1"], "eval/acc_and_f1": result["acc_and_f1"], } ) return results def load_and_cache_examples( args, task, tokenizer, evaluate=False, mode="train", dataset_name="", rel="" ): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() processor = processors[task]()
output_mode = output_modes[task]
0
2023-10-24 14:03:11+00:00
16k
samholt/ActiveObservingInContinuous-timeControl
mppi_dataset_collector.py
[ { "identifier": "dotdict", "path": "config.py", "snippet": "class dotdict(dict):\n \"\"\"dot.notation access to dictionary attributes\"\"\"\n\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__" }, { "identifier": "create_env", "path": "overlay.py", "snippet": "def create_env(env_name, dt=0.05, ts_grid=\"fixed\", noise=0.0, friction=False, device=device):\n if \"oderl\" in env_name:\n env = create_oderl_env(env_name, dt=dt, ts_grid=ts_grid, noise=noise, friction=friction, device=device)\n else:\n env = gym.make(env_name)\n return env" }, { "identifier": "setup_logger", "path": "overlay.py", "snippet": "def setup_logger(file, log_folder=\"logs\", return_path_to_log=False):\n import logging\n import os\n import time\n\n file_name = os.path.basename(os.path.realpath(file)).split(\".py\")[0]\n from pathlib import Path\n\n Path(f\"./{log_folder}\").mkdir(parents=True, exist_ok=True)\n path_run_name = \"{}-{}\".format(file_name, time.strftime(\"%Y%m%d-%H%M%S\"))\n logging.basicConfig(\n format=\"%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s\",\n handlers=[\n logging.FileHandler(f\"{log_folder}/{path_run_name}_log.txt\"),\n logging.StreamHandler(),\n ],\n datefmt=\"%H:%M:%S\",\n level=logging.INFO,\n )\n logger = logging.getLogger()\n logger.info(f\"Starting: Log file at: {log_folder}/{path_run_name}_log.txt\")\n if return_path_to_log:\n return logger, f\"{log_folder}/{path_run_name}_log.txt\"\n else:\n return logger" }, { "identifier": "start_virtual_display", "path": "overlay.py", "snippet": "def start_virtual_display():\n import pyvirtualdisplay\n\n return pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()" }, { "identifier": "step_env", "path": "overlay.py", "snippet": "def step_env(env, action, obs_noise):\n at = torch.from_numpy(action).to(device)\n\n def g(state, t):\n return at\n\n returns = env.integrate_system(2, g, s0=torch.tensor(env.state).to(device), return_states=True)\n state = returns[-1][-1]\n reward = returns[2][-1]\n tsn = returns[-2][-1, -1]\n env.set_state_(state.cpu().numpy())\n state_out = env.get_obs()\n state_out = torch.from_numpy(state_out).to(device)\n state_out += torch.randn_like(state_out) * obs_noise\n env.time_step += 1\n done = True if env.time_step >= env.n_steps else False\n state_out = state_out.cpu().numpy()\n return state_out, reward, done, tsn" }, { "identifier": "MPPI", "path": "planners/mppi.py", "snippet": "class MPPI:\n \"\"\"\n Model Predictive Path Integral control\n This implementation batch samples the trajectories and so scales well with the number of samples K.\n\n Implemented according to algorithm 2 in Williams et al., 2017\n 'Information Theoretic MPC for Model-Based Reinforcement Learning',\n based off of https://github.com/ferreirafabio/mppi_pendulum\n \"\"\"\n\n def __init__(\n self,\n dynamics,\n running_cost,\n nx,\n noise_sigma,\n num_samples=100,\n horizon=15,\n device=\"cuda:0\",\n terminal_state_cost=None,\n lambda_=1.0,\n noise_mu=None,\n u_min=None,\n u_max=None,\n u_init=None,\n U_init=None,\n u_scale=1,\n u_per_command=1,\n step_dependent_dynamics=False,\n rollout_samples=1, # Ensemble size\n rollout_var_cost=0,\n rollout_var_discount=0.95,\n dt=0.05,\n sample_null_action=False,\n noise_abs_cost=False,\n ):\n \"\"\"\n :param dynamics: function(state, action) -> next_state (K x nx) taking in batch state (K x nx) and action (K x nu)\n :param running_cost: function(state, action) -> cost (K) taking in batch state and action (same as dynamics)\n :param nx: state dimension\n :param noise_sigma: (nu x nu) control noise covariance (assume v_t ~ N(u_t, noise_sigma))\n :param num_samples: K, number of trajectories to sample\n :param horizon: T, length of each trajectory\n :param device: pytorch device\n :param terminal_state_cost: function(state) -> cost (K x 1) taking in batch state\n :param lambda_: temperature, positive scalar where larger values will allow more exploration\n :param noise_mu: (nu) control noise mean (used to bias control samples); defaults to zero mean\n :param u_min: (nu) minimum values for each dimension of control to pass into dynamics\n :param u_max: (nu) maximum values for each dimension of control to pass into dynamics\n :param u_init: (nu) what to initialize new end of trajectory control to be; defeaults to zero\n :param U_init: (T x nu) initial control sequence; defaults to noise\n :param step_dependent_dynamics: whether the passed in dynamics needs horizon step passed in (as 3rd arg)\n :param rollout_samples: M, number of state trajectories to rollout for each control trajectory\n (should be 1 for deterministic dynamics and more for models that output a distribution)\n :param rollout_var_cost: Cost attached to the variance of costs across trajectory rollouts\n :param rollout_var_discount: Discount of variance cost over control horizon\n :param sample_null_action: Whether to explicitly sample a null action (bad for starting in a local minima)\n :param noise_abs_cost: Whether to use the absolute value of the action noise to avoid bias when all states have the same cost\n \"\"\"\n self.d = device\n self.dtype = noise_sigma.dtype\n self.K = num_samples # N_SAMPLES\n self.T = horizon # TIMESTEPS\n self.dt = dt\n\n # dimensions of state and control\n self.nx = nx\n self.nu = 1 if len(noise_sigma.shape) == 0 else noise_sigma.shape[0]\n self.lambda_ = lambda_\n\n if noise_mu is None:\n noise_mu = torch.zeros(self.nu, dtype=self.dtype)\n\n if u_init is None:\n u_init = torch.zeros_like(noise_mu)\n\n # handle 1D edge case\n if self.nu == 1:\n noise_mu = noise_mu.view(-1)\n noise_sigma = noise_sigma.view(-1, 1)\n\n # bounds\n self.u_min = u_min\n self.u_max = u_max\n self.u_scale = u_scale\n self.u_per_command = u_per_command\n # make sure if any of them is specified, both are specified\n if self.u_max is not None and self.u_min is None:\n if not torch.is_tensor(self.u_max):\n self.u_max = torch.tensor(self.u_max)\n self.u_min = -self.u_max\n if self.u_min is not None and self.u_max is None:\n if not torch.is_tensor(self.u_min):\n self.u_min = torch.tensor(self.u_min)\n self.u_max = -self.u_min\n if self.u_min is not None:\n self.u_min = self.u_min.to(device=self.d)\n self.u_max = self.u_max.to(device=self.d)\n\n self.noise_mu = noise_mu.to(self.d)\n self.noise_sigma = noise_sigma.to(self.d)\n self.noise_sigma_inv = torch.inverse(self.noise_sigma)\n self.noise_dist = MultivariateNormal(self.noise_mu, covariance_matrix=self.noise_sigma)\n # T x nu control sequence\n self.U = U_init\n self.u_init = u_init.to(self.d)\n\n if self.U is None:\n self.U = self.noise_dist.sample((self.T,))\n\n self.step_dependency = step_dependent_dynamics\n self.F = dynamics\n self.running_cost = running_cost\n self.terminal_state_cost = terminal_state_cost\n self.sample_null_action = sample_null_action\n self.noise_abs_cost = noise_abs_cost\n self.state = None\n\n # handling dynamics models that output a distribution (take multiple trajectory samples)\n self.M = rollout_samples\n self.rollout_var_cost = rollout_var_cost\n self.rollout_var_discount = rollout_var_discount\n\n # sampled results from last command\n self.cost_total = None\n self.cost_total_non_zero = None\n self.omega = None\n self.states_mu = None\n self.states_var = None\n self.actions = None\n\n def _dynamics(self, state, u, t):\n return self.F(state, u, t) if self.step_dependency else self.F(state, u)\n\n # @handle_batch_input\n def _running_cost(self, state, u):\n return self.running_cost(state, u)\n\n def command(self, state):\n \"\"\"\n :param state: (nx) or (K x nx) current state, or samples of states (for propagating a distribution of states)\n :returns action: (nu) best action\n \"\"\"\n # shift command 1 time step\n self.U = torch.roll(self.U, -1, dims=0)\n self.U[-1] = self.u_init\n\n if not torch.is_tensor(state):\n state = torch.tensor(state)\n self.state = state.to(dtype=self.dtype, device=self.d)\n\n cost_total = self._compute_total_cost_batch()\n logger.debug(f\"cost_total: {cost_total.shape}\")\n\n beta = torch.min(cost_total)\n self.cost_total_non_zero = _ensure_non_zero(cost_total, beta, 1 / self.lambda_)\n\n eta = torch.sum(self.cost_total_non_zero)\n self.omega = (1.0 / eta) * self.cost_total_non_zero\n for t in range(self.T):\n self.U[t] += torch.sum(self.omega.view(-1, 1) * self.noise[:, t], dim=0)\n action = self.U[: self.u_per_command]\n # reduce dimensionality if we only need the first command\n if self.u_per_command == 1:\n action = action[0]\n\n logger.debug(f\"action: {action}\")\n return action * self.u_scale\n\n def reset(self):\n \"\"\"\n Clear controller state after finishing a trial\n \"\"\"\n self.U = self.noise_dist.sample((self.T,))\n\n def _compute_rollout_costs(self, perturbed_actions):\n K, T, nu = perturbed_actions.shape\n assert nu == self.nu\n\n cost_total = torch.zeros(K, device=self.d, dtype=self.dtype)\n cost_samples = cost_total.repeat(self.M, 1)\n cost_var = torch.zeros_like(cost_total)\n\n # allow propagation of a sample of states (ex. to carry a distribution), or to start with a single state\n if self.state.shape == (K, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(K, 1)\n\n logger.debug(f\"state: {state_mu.shape}\")\n\n states_mu = []\n states_var = []\n actions = []\n perturbed_actions = self.u_scale * perturbed_actions\n for t in range(T):\n u = perturbed_actions[:, t, :]\n state_mu, state_var = self._dynamics(state_mu, u, t)\n c = self._running_cost(state_mu, u)\n cost_samples += c\n if self.M > 1:\n cost_var += c.var(dim=0) * (self.rollout_var_discount**t)\n\n # Save total states/actions\n states_mu.append(state_mu)\n states_var.append(state_var)\n actions.append(u)\n\n # Actions is K x T x nu\n # States is K x T x nx\n actions = torch.stack(actions, dim=-2)\n states_mu = torch.stack(states_mu, dim=-2)\n states_var = torch.stack(states_var, dim=-2)\n logger.debug(f\"states: {states_mu.shape}\")\n\n # action perturbation cost\n if self.terminal_state_cost:\n c = self.terminal_state_cost(states_mu, actions)\n cost_samples += c\n cost_total += cost_samples.mean(dim=0)\n cost_total += cost_var * self.rollout_var_cost\n logger.debug(f\"{cost_total.shape} | {states_mu.shape} | {actions.shape}\")\n return cost_total, states_mu, states_var, actions\n\n def _compute_total_cost_batch(self):\n # parallelize sampling across trajectories\n # resample noise each time we take an action\n self.noise = self.noise_dist.sample((self.K, self.T)) # K x T x nu\n self.perturbed_action = self.U + self.noise\n if self.sample_null_action:\n self.perturbed_action[self.K - 1] = 0\n # naively bound control\n self.perturbed_action = self._bound_action(self.perturbed_action * self.u_scale)\n self.perturbed_action /= self.u_scale\n # bounded noise after bounding (some got cut off, so we don't penalize that in action cost)\n self.noise = self.perturbed_action - self.U\n if self.noise_abs_cost:\n action_cost = self.lambda_ * torch.abs(self.noise) @ self.noise_sigma_inv\n # NOTE: The original paper does self.lambda_ * self.noise @ self.noise_sigma_inv, but this biases\n # the actions with low noise if all states have the same cost. With abs(noise) we prefer actions close to the\n # nominal trajectory.\n else:\n action_cost = self.lambda_ * self.noise @ self.noise_sigma_inv # Like original paper\n logger.debug(f\"action_cost: {action_cost.shape}\")\n\n self.cost_total, self.states_mu, self.states_var, self.actions = self._compute_rollout_costs(\n self.perturbed_action\n )\n self.actions /= self.u_scale\n\n # action perturbation cost\n perturbation_cost = torch.sum(self.U * action_cost, dim=(1, 2))\n self.cost_total += perturbation_cost\n return self.cost_total\n\n def _bound_action(self, action):\n if self.u_max is not None:\n action = torch.clamp(action, min=self.u_min, max=self.u_max)\n return action\n\n def get_rollouts(self, state, num_rollouts=1):\n \"\"\"\n :param state: either (nx) vector or (num_rollouts x nx) for sampled initial states\n :param num_rollouts: Number of rollouts with same action sequence - for generating samples with stochastic\n dynamics\n :returns states: num_rollouts x T x nx vector of trajectories\n\n \"\"\"\n state = state.view(-1, self.nx)\n if state.size(0) == 1:\n state = state.repeat(num_rollouts, 1)\n\n T = self.U.shape[0]\n states = torch.zeros((num_rollouts, T + 1, self.nx), dtype=self.U.dtype, device=self.U.device)\n states[:, 0] = state\n for t in range(T):\n states[:, t + 1] = self._dynamics(\n states[:, t].view(num_rollouts, -1), self.u_scale * self.U[t].view(num_rollouts, -1), t\n )\n return states[:, 1:]" }, { "identifier": "MPPIActiveObserving", "path": "planners/mppi_active_observing.py", "snippet": "class MPPIActiveObserving:\n \"\"\"\n Model Predictive Path Integral control\n This implementation batch samples the trajectories and so scales well with the number of samples K.\n\n Implemented according to algorithm 2 in Williams et al., 2017\n 'Information Theoretic MPC for Model-Based Reinforcement Learning',\n based off of https://github.com/ferreirafabio/mppi_pendulum\n \"\"\"\n\n def __init__(\n self,\n dynamics,\n running_cost,\n nx,\n noise_sigma,\n cost_var_from_state_var=None,\n num_samples=100,\n horizon=15,\n device=\"cuda:0\",\n terminal_state_cost=None,\n observing_var_threshold=1.0,\n lambda_=1.0,\n noise_mu=None,\n u_min=None,\n u_max=None,\n u_init=None,\n U_init=None,\n u_scale=1,\n u_per_command=1,\n rollout_samples=1, # Ensemble size\n rollout_var_cost=0,\n rollout_var_discount=0.95,\n dt_simulation=0.01,\n dt=0.05,\n sampling_policy=\"discrete_planning\",\n continuous_time_threshold=0.5,\n observing_cost=1.0,\n sample_null_action=False,\n observing_fixed_frequency=1,\n discrete_planning=False,\n discrete_interval=1,\n limit_actions_to_only_positive=False,\n fixed_continuous_planning_steps=None,\n debug_mode_return_full_cost_std=False,\n debug_mode_cp_return_continuous_reward_unc=False,\n noise_abs_cost=False,\n ):\n \"\"\"\n :param dynamics: function(state, action) -> next_state (K x nx) taking in batch state (K x nx) and action (K x nu)\n :param running_cost: function(state, action) -> cost (K) taking in batch state and action (same as dynamics)\n :param nx: state dimension\n :param noise_sigma: (nu x nu) control noise covariance (assume v_t ~ N(u_t, noise_sigma))\n :param num_samples: K, number of trajectories to sample\n :param horizon: T, length of each trajectory\n :param device: pytorch device\n :param terminal_state_cost: function(state) -> cost (K x 1) taking in batch state\n :param lambda_: temperature, positive scalar where larger values will allow more exploration\n :param noise_mu: (nu) control noise mean (used to bias control samples); defaults to zero mean\n :param u_min: (nu) minimum values for each dimension of control to pass into dynamics\n :param u_max: (nu) maximum values for each dimension of control to pass into dynamics\n :param u_init: (nu) what to initialize new end of trajectory control to be; defeaults to zero\n :param U_init: (T x nu) initial control sequence; defaults to noise\n :param rollout_samples: M, number of state trajectories to rollout for each control trajectory\n (should be 1 for deterministic dynamics and more for models that output a distribution)\n :param rollout_var_cost: Cost attached to the variance of costs across trajectory rollouts\n :param rollout_var_discount: Discount of variance cost over control horizon\n :param sample_null_action: Whether to explicitly sample a null action (bad for starting in a local minima)\n :param noise_abs_cost: Whether to use the absolute value of the action noise to avoid bias when all states have the same cost\n \"\"\"\n self.d = device\n self.dt_simulation = dt_simulation\n if discrete_planning:\n dt_plan = dt_simulation * discrete_interval\n else:\n dt_plan = dt\n self.discrete_planning = discrete_planning\n self.discrete_interval = discrete_interval\n self.limit_actions_to_only_positive = limit_actions_to_only_positive\n self.continuous_time_interval = max(int(continuous_time_threshold * discrete_interval), 1)\n self.dtype = noise_sigma.dtype\n self.K = num_samples # N_SAMPLES\n self.T = horizon # TIMESTEPS\n self.dt = dt_plan\n self.observing_cost = observing_cost # Hyperparameter to be tuned\n self.observing_var_threshold = observing_var_threshold # Hyperparameter to be tuned\n self.observing_fixed_frequency = observing_fixed_frequency\n\n # dimensions of state and control\n self.nx = nx\n self.nu = 1 if len(noise_sigma.shape) == 0 else noise_sigma.shape[0]\n self.lambda_ = lambda_\n\n if noise_mu is None:\n noise_mu = torch.zeros(self.nu, dtype=self.dtype)\n\n if u_init is None:\n u_init = torch.zeros_like(noise_mu)\n\n # handle 1D edge case\n if self.nu == 1:\n noise_mu = noise_mu.view(-1)\n noise_sigma = noise_sigma.view(-1, 1)\n\n # bounds\n self.u_min = u_min\n self.u_max = u_max\n self.u_scale = u_scale\n self.u_per_command = u_per_command\n # make sure if any of them is specified, both are specified\n if self.u_max is not None and self.u_min is None:\n if not torch.is_tensor(self.u_max):\n self.u_max = torch.tensor(self.u_max)\n self.u_min = -self.u_max\n if self.u_min is not None and self.u_max is None:\n if not torch.is_tensor(self.u_min):\n self.u_min = torch.tensor(self.u_min)\n self.u_max = -self.u_min\n if self.u_min is not None:\n self.u_min = self.u_min.to(device=self.d)\n self.u_max = self.u_max.to(device=self.d)\n\n self.noise_mu = noise_mu.to(self.d)\n self.noise_sigma = noise_sigma.to(self.d)\n self.noise_sigma_inv = torch.inverse(self.noise_sigma)\n self.noise_dist = MultivariateNormal(self.noise_mu, covariance_matrix=self.noise_sigma)\n # T x nu control sequence\n self.U = U_init\n self.u_init = u_init.to(self.d)\n\n if self.U is None:\n self.U = self.noise_dist.sample((self.T,))\n\n self.F = dynamics\n self.running_cost = running_cost\n self.terminal_state_cost = terminal_state_cost\n self.sample_null_action = sample_null_action\n self.noise_abs_cost = noise_abs_cost\n self.state = None\n\n # handling dynamics models that output a distribution (take multiple trajectory samples)\n self.M = rollout_samples\n self.rollout_var_cost = rollout_var_cost\n self.rollout_var_discount = rollout_var_discount\n\n # sampled results from last command\n self.cost_total = None\n self.cost_total_non_zero = None\n self.omega = None\n self.states_mu = None\n self.states_var = None\n self.actions = None\n\n self.sampling_policy = sampling_policy\n self.cost_var_from_state_var = cost_var_from_state_var\n\n self.previous_step = 0\n self.fixed_continuous_planning_steps = fixed_continuous_planning_steps\n self.debug_mode_return_full_cost_std = debug_mode_return_full_cost_std\n self.debug_mode_cp_return_continuous_reward_unc = debug_mode_cp_return_continuous_reward_unc\n\n def _dynamics(self, state, u, ts_pred, return_var=True):\n if self.limit_actions_to_only_positive:\n u[u <= 0] = 0\n return self.F(state, u, ts_pred, return_var=return_var)\n\n def _cost_var_from_state_var(self, state_var):\n if not self.cost_var_from_state_var is None:\n return self.cost_var_from_state_var(state_var)\n else:\n return state_var.sum()\n\n # @handle_batch_input\n def _running_cost(self, state, u):\n return self.running_cost(state, u)\n\n def reset(self):\n \"\"\"\n Clear controller state after finishing a trial\n \"\"\"\n self.U = self.noise_dist.sample((self.T,))\n\n def _compute_rollout_costs(self, perturbed_actions):\n K, T, nu = perturbed_actions.shape\n assert nu == self.nu\n\n cost_total = torch.zeros(K, device=self.d, dtype=self.dtype)\n cost_samples = cost_total.repeat(self.M, 1)\n cost_var = torch.zeros_like(cost_total)\n\n # allow propagation of a sample of states (ex. to carry a distribution), or to start with a single state\n if self.state.shape == (K, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(K, 1)\n\n logger.debug(f\"state: {state_mu.shape}\")\n\n states_mu = []\n # states_var = []\n actions = []\n perturbed_actions = self.u_scale * perturbed_actions\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(K, 1)\n\n for t in range(T):\n u = perturbed_actions[:, t, :]\n state_mu, _ = self._dynamics(state_mu, u, ts_pred, return_var=False)\n c = self._running_cost(state_mu, u)\n cost_samples += c\n if self.M > 1:\n cost_var += c.var(dim=0) * (self.rollout_var_discount**t)\n\n # Save total states/actions\n states_mu.append(state_mu)\n actions.append(u)\n\n # Actions is K x T x nu\n # States is K x T x nx\n actions = torch.stack(actions, dim=-2)\n states_mu = torch.stack(states_mu, dim=-2)\n logger.debug(f\"states: {states_mu.shape}\")\n\n # action perturbation cost\n if self.terminal_state_cost:\n c = self.terminal_state_cost(states_mu, actions)\n cost_samples += c\n cost_total += cost_samples.mean(dim=0)\n cost_total += cost_var * self.rollout_var_cost\n logger.debug(f\"{cost_total.shape} | {states_mu.shape} | {actions.shape}\")\n return cost_total, states_mu, actions\n\n def _compute_total_cost_batch(self):\n # parallelize sampling across trajectories\n # resample noise each time we take an action\n self.noise = self.noise_dist.sample((self.K, self.T)) # K x T x nu\n self.perturbed_action = self.U + self.noise\n if self.sample_null_action:\n self.perturbed_action[self.K - 1] = 0\n # naively bound control\n self.perturbed_action = self._bound_action(self.perturbed_action * self.u_scale)\n self.perturbed_action /= self.u_scale\n # bounded noise after bounding (some got cut off, so we don't penalize that in action cost)\n self.noise = self.perturbed_action - self.U\n if self.noise_abs_cost:\n action_cost = self.lambda_ * torch.abs(self.noise) @ self.noise_sigma_inv\n # NOTE: The original paper does self.lambda_ * self.noise @ self.noise_sigma_inv, but this biases\n # the actions with low noise if all states have the same cost. With abs(noise) we prefer actions close to the\n # nominal trajectory.\n else:\n action_cost = self.lambda_ * self.noise @ self.noise_sigma_inv # Like original paper\n logger.debug(f\"action_cost: {action_cost.shape}\")\n\n self.cost_total, self.states_mu, self.actions = self._compute_rollout_costs(self.perturbed_action)\n self.actions /= self.u_scale\n\n # action perturbation cost\n perturbation_cost = torch.sum(self.U * action_cost, dim=(1, 2)) # wonder if can remove?\n self.cost_total += perturbation_cost\n return self.cost_total\n\n def _bound_action(self, action):\n if self.u_max is not None:\n action = torch.clamp(action, min=self.u_min, max=self.u_max)\n return action\n\n def get_rollouts(self, state, num_rollouts=1):\n \"\"\"\n :param state: either (nx) vector or (num_rollouts x nx) for sampled initial states\n :param num_rollouts: Number of rollouts with same action sequence - for generating samples with stochastic\n dynamics\n :returns states: num_rollouts x T x nx vector of trajectories\n\n \"\"\"\n state = state.view(-1, self.nx)\n if state.size(0) == 1:\n state = state.repeat(num_rollouts, 1)\n\n T = self.U.shape[0]\n states = torch.zeros((num_rollouts, T + 1, self.nx), dtype=self.U.dtype, device=self.U.device)\n states[:, 0] = state\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(num_rollouts, 1)\n for t in range(T):\n states[:, t + 1] = self._dynamics(\n states[:, t].view(num_rollouts, -1), self.u_scale * self.U[t].view(num_rollouts, -1), ts_pred\n )\n return states[:, 1:]\n\n def command(self, state):\n \"\"\"\n :param state: (nx) or (K x nx) current state, or samples of states (for propagating a distribution of states)\n :returns action: (nu) best action\n \"\"\"\n self.U = torch.zeros_like(self.U)\n\n if not torch.is_tensor(state):\n state = torch.tensor(state)\n self.state = state.to(dtype=self.dtype, device=self.d)\n assert not torch.isnan(state).any(), \"Nan detected in state\"\n\n cost_total = self._compute_total_cost_batch()\n logger.debug(f\"cost_total: {cost_total.shape}\")\n\n beta = torch.min(cost_total)\n self.cost_total_non_zero = _ensure_non_zero(cost_total, beta, 1 / self.lambda_)\n\n eta = torch.sum(self.cost_total_non_zero)\n self.omega = (1.0 / eta) * self.cost_total_non_zero\n for t in range(self.T):\n self.U[t] += torch.sum(self.omega.view(-1, 1) * self.noise[:, t], dim=0)\n\n # Calculate the state estimate of the reward here, then use that for planning etc.\n if self.debug_mode_cp_return_continuous_reward_unc and self.sampling_policy == \"continuous_planning\":\n # Monte Carlo Simulation of latest reward variance\n L = self.K * 10\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(L, 1)\n ts_pred_increment = (\n torch.arange(self.dt_simulation, self.dt, self.dt_simulation, device=self.d, dtype=self.dtype)\n .repeat_interleave(L)\n .view(-1, 1)\n )\n cost_var = torch.zeros_like(cost_total)\n if self.state.shape == (L, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(L, 1)\n state_mu_in = state_mu\n costs_std = []\n costs_std.append(torch.tensor(0, device=self.d, dtype=self.dtype).view(1))\n same_actions = self.U.unsqueeze(0).repeat(L, 1, 1)\n for t in range(self.T):\n u = same_actions[:, t, :]\n # Core parts\n state_mu_pred, state_var_pred = self._dynamics(state_mu_in, u, ts_pred, return_var=True)\n state_mu_final = state_mu_pred + torch.normal(0, 1, size=state_mu_pred.shape).to(self.d) * torch.sqrt(\n state_var_pred\n )\n c = self._running_cost(state_mu_final, u)\n # Intermediate states\n intermediate_state_count = self.discrete_interval - 1\n state_mu_pred_increment, state_var_pred_increment = self._dynamics(\n state_mu_in.repeat(intermediate_state_count, 1),\n u.repeat(intermediate_state_count, 1),\n ts_pred_increment,\n return_var=True,\n )\n state_mu_increment = state_mu_pred_increment + torch.normal(\n 0, 1, size=state_mu_pred_increment.shape\n ).to(self.d) * torch.sqrt(state_var_pred_increment)\n c_increment = self._running_cost(state_mu_increment, u.repeat(intermediate_state_count, 1))\n inter_c_stds = c_increment.view(intermediate_state_count, -1).std(dim=1)\n costs_std.append(torch.cat((inter_c_stds, c.std().view(1))))\n state_mu_in = state_mu_final\n # States is K x T x nx\n costs_std_continuous = torch.cat(costs_std)[1:]\n stats = {\n \"costs_std_median\": costs_std_continuous.median().item(),\n \"costs_std_mean\": costs_std_continuous.mean().item(),\n \"costs_std_max\": costs_std_continuous.max().item(),\n }\n if self.debug_mode_return_full_cost_std:\n return torch.cat(costs_std).cpu()\n elif self.sampling_policy == \"active_observing_control\":\n # Monte Carlo Simulation of latest reward variance\n L = self.K * 10\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(L, 1)\n ts_pred_increment = (\n torch.arange(self.dt_simulation, self.dt, self.dt_simulation, device=self.d, dtype=self.dtype)\n .repeat_interleave(L)\n .view(-1, 1)\n )\n cost_var = torch.zeros_like(cost_total)\n if self.state.shape == (L, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(L, 1)\n state_mu_in = state_mu\n costs_std = []\n costs_std.append(torch.tensor(0, device=self.d, dtype=self.dtype).view(1))\n same_actions = self.U.unsqueeze(0).repeat(L, 1, 1)\n select_actions_up_to = self.T * self.discrete_interval # Initial default value\n for t in range(self.T):\n u = same_actions[:, t, :]\n # Core parts\n state_mu_pred, state_var_pred = self._dynamics(state_mu_in, u, ts_pred, return_var=True)\n state_mu_final = state_mu_pred + torch.normal(0, 1, size=state_mu_pred.shape).to(self.d) * torch.sqrt(\n state_var_pred\n )\n c = self._running_cost(state_mu_final, u)\n if c.std() >= self.observing_var_threshold:\n t_upper = ts_pred.view(-1)[0]\n t_lower = torch.tensor(0.0).to(self.d)\n while (t_upper - t_lower) > self.dt_simulation:\n t_mid = (t_upper + t_lower) / 2.0\n state_mu_pred_increment, state_var_pred_increment = self._dynamics(\n state_mu_in, u, torch.ones_like(ts_pred) * t_mid, return_var=True\n )\n state_mu_increment = state_mu_pred_increment + torch.normal(\n 0, 1, size=state_mu_pred_increment.shape\n ).to(self.d) * torch.sqrt(state_var_pred_increment)\n c_increment = self._running_cost(state_mu_increment, u)\n if c_increment.std() >= self.observing_var_threshold:\n t_upper = t_mid\n else:\n t_lower = t_mid\n select_actions_up_to = (\n t * self.discrete_interval\n + torch.floor((t_mid / ts_pred.view(-1)[0]) * self.discrete_interval).int().item()\n )\n break\n state_mu_in = state_mu_final\n stats = {}\n else:\n # Monte Carlo Simulation of latest reward variance\n L = self.K * 10\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(L, 1)\n cost_var = torch.zeros_like(cost_total)\n if self.state.shape == (L, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(L, 1)\n states_mu = []\n states_var = []\n costs = []\n same_actions = self.U.unsqueeze(0).repeat(L, 1, 1)\n for t in range(self.T):\n u = same_actions[:, t, :]\n state_mu, state_var = self._dynamics(state_mu, u, ts_pred, return_var=True)\n state_mu = state_mu + torch.normal(0, 1, size=state_mu.shape).to(self.d) * torch.sqrt(state_var)\n c = self._running_cost(state_mu, u)\n if self.M > 1: # Untested, however should underperform - MPPI with uncertaintity paper\n cost_var += c.var(dim=0) * (self.rollout_var_discount**t)\n\n # Save total states/actions\n costs.append(c)\n states_mu.append(state_mu)\n states_var.append(state_var)\n\n # States is K x T x nx\n states_mu = torch.stack(states_mu, dim=-2)\n states_var = torch.stack(states_var, dim=-2)\n costs = torch.stack(costs, dim=-2)\n costs_std_discrete = torch.cat(\n (torch.tensor(0, device=self.d, dtype=self.dtype).view(1), costs.std(dim=1))\n )[1:]\n stats = {\n \"costs_std_median\": costs_std_discrete.median().item(),\n \"costs_std_mean\": costs_std_discrete.mean().item(),\n \"costs_std_max\": costs_std_discrete.max().item(),\n }\n if self.debug_mode_return_full_cost_std:\n return (\n torch.cat((torch.tensor(0, device=self.d, dtype=self.dtype).view(1), costs.std(dim=1)))\n .repeat_interleave(self.discrete_interval)\n .cpu()\n )\n\n if self.sampling_policy == \"discrete_monitoring\":\n actions = self.U[costs_std_discrete < self.observing_var_threshold]\n if actions.shape[0] == 0:\n actions = self.U[: self.u_per_command]\n costs_std_discrete = costs_std_discrete[: self.u_per_command]\n else:\n costs_std_discrete = costs_std_discrete[costs_std_discrete < self.observing_var_threshold]\n elif self.sampling_policy == \"discrete_planning\" or self.sampling_policy == \"continuous_planning\":\n if self.fixed_continuous_planning_steps is None:\n if not self.debug_mode_cp_return_continuous_reward_unc:\n actions = self.U[: self.observing_fixed_frequency]\n costs_std_discrete = costs_std_discrete[: self.observing_fixed_frequency]\n else:\n actions = self.U[: self.observing_fixed_frequency]\n costs_std_continuous = costs_std_continuous[\n : self.observing_fixed_frequency * self.continuous_time_interval\n ]\n costs_std_discrete = torch.tensor(0, device=self.d, dtype=self.dtype).view(1)\n else:\n actions = self.U\n costs_std_discrete = costs_std_discrete\n elif self.sampling_policy == \"active_observing_control\":\n actions = self.U\n actions = actions.repeat_interleave(self.discrete_interval, dim=0)\n slice_to_take_holder = torch.zeros((actions.shape[0])).bool()\n slice_to_take_holder[:select_actions_up_to] = True\n actions = actions[slice_to_take_holder]\n if actions.shape[0] <= (self.continuous_time_interval - 1):\n self.previous_step = int(np.ceil(actions.shape[0] / self.discrete_interval))\n actions = self.U.repeat_interleave(self.discrete_interval, dim=0)\n actions = actions[: self.continuous_time_interval]\n else:\n self.previous_step = int(actions.shape[0] / self.discrete_interval)\n assert not torch.isnan(actions).any(), \"Nan detected in actions\"\n costs_std_continuous = torch.ones_like(actions).to(self.d)\n return actions * self.u_scale, costs_std_continuous, stats\n else:\n raise NotImplementedError(f\"sampling_policy: {self.sampling_policy} not recognized\")\n self.previous_step = actions.shape[0]\n assert not torch.isnan(actions).any(), \"Nan detected in actions\"\n if self.discrete_planning:\n actions = actions.repeat_interleave(self.discrete_interval, dim=0)\n costs_std_discrete = costs_std_discrete.repeat_interleave(self.discrete_interval, dim=0)\n if self.sampling_policy == \"continuous_planning\":\n if self.fixed_continuous_planning_steps is None:\n actions = actions[: self.continuous_time_interval]\n if not self.debug_mode_cp_return_continuous_reward_unc:\n costs_std_discrete = costs_std_discrete[: self.continuous_time_interval]\n else:\n costs_std_discrete = costs_std_continuous\n self.previous_step = int(np.ceil(actions.shape[0] / self.discrete_interval))\n else:\n actions = actions[: self.fixed_continuous_planning_steps]\n costs_std_discrete = costs_std_discrete[: self.fixed_continuous_planning_steps]\n self.previous_step = int(np.ceil(actions.shape[0] / self.discrete_interval))\n return actions * self.u_scale, costs_std_discrete, stats" } ]
import logging import os import time import imageio import numpy as np import torch import torch.multiprocessing as multiprocessing from functools import partial from tqdm import tqdm from config import dotdict from overlay import create_env, setup_logger, start_virtual_display, step_env from planners.mppi import MPPI from planners.mppi_active_observing import MPPIActiveObserving from oracle import pendulum_dynamics_dt from oracle import cartpole_dynamics_dt from oracle import acrobot_dynamics_dt from oracle import cancer_dynamics_dt from pathlib import Path from config import get_config, seed_all
12,339
return episodes def mppi_with_model_collect_data( model_name, env_name, roll_outs=1000, time_steps=30, lambda_=1.0, sigma=1.0, dt=0.05, model_seed=11, save_video=False, state_constraint=False, change_goal=False, encode_obs_time=False, model=None, uniq=None, log_debug=False, collect_samples=1e6, config_in={}, debug_main=False, ts_grid="exp", intermediate_run=False, ): config = dotdict(dict(config_in)) file_name = f"replay_buffer_env-name-{env_name}_model-name-{model_name}_encode-obs-time-{encode_obs_time}_ts-grid-{ts_grid}_random-action-noise-{config.collect_expert_random_action_noise}_observation-noise-{config.observation_noise}_friction-{config.friction}.pt" if not config.collect_expert_force_generate_new_data: final_data = torch.load(f"./offlinedata/{file_name}") return final_data global change_goal_flipped change_goal_flipped = False timelen = 10 # seconds if change_goal: timelen = timelen * 2.0 iter_ = timelen / dt change_goal_flipped_iter_ = iter_ / 2.0 multi_inner_mppi_with_model_collect_data = partial( inner_mppi_with_model_collect_data, model_name=model_name, env_name=env_name, roll_outs=roll_outs, time_steps=time_steps, lambda_=lambda_, sigma=sigma, dt=dt, model_seed=model_seed, save_video=save_video, state_constraint=state_constraint, change_goal=change_goal, encode_obs_time=encode_obs_time, model=model, uniq=uniq, log_debug=log_debug, episodes_per_sampler_task=config.collect_expert_episodes_per_sampler_task, config=dict(config), ts_grid=ts_grid, iter_=iter_, change_goal_flipped_iter_=change_goal_flipped_iter_, intermediate_run=intermediate_run, ) total_episodes_needed = int(collect_samples / iter_) task_inputs = [ run_seed for run_seed in range(int(total_episodes_needed / config.collect_expert_episodes_per_sampler_task)) ] episodes = [] if not debug_main: pool_outer = multiprocessing.Pool(config.collect_expert_cores_per_env_sampler) for i, result in tqdm( enumerate(pool_outer.imap_unordered(multi_inner_mppi_with_model_collect_data, task_inputs)), total=len(task_inputs), smoothing=0, ): episodes.extend(result) else: for i, task in tqdm(enumerate(task_inputs), total=len(task_inputs)): result = multi_inner_mppi_with_model_collect_data(task) episodes.extend(result) s0 = [] sn = [] a0 = [] ts = [] for episode in episodes: (es0, ea0, esn, ets) = episode s0.append(es0) sn.append(esn) a0.append(ea0) ts.append(ets) s0 = torch.cat(s0, dim=0) sn = torch.cat(sn, dim=0) a0 = torch.cat(a0, dim=0) ts = torch.cat(ts, dim=0).view(-1, 1) final_data = (s0, a0, sn, ts) if not os.path.exists("./offlinedata/"): os.makedirs("./offlinedata/") torch.save(final_data, f"./offlinedata/{file_name}") pool_outer.close() return final_data if __name__ == "__main__": torch.multiprocessing.set_start_method("spawn") defaults = get_config() debug_collector = False defaults["save_video"] = False defaults["mppi_time_steps"] = 40 defaults["collect_expert_force_generate_new_data"] = True defaults["collect_expert_cores_per_env_sampler"] = 6 defaults["sampling_policy"] = "discrete_planning" defaults["observing_fixed_frequency"] = 1 defaults["planner"] = "mppi_active_observing" # 'mppi' defaults["dt"] = 0.05 config = dotdict(defaults) seed_all(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") logger = logging.getLogger() def inner_mppi_with_model_collect_data( seed, model_name, env_name, roll_outs=1000, time_steps=30, lambda_=1.0, sigma=1.0, dt=0.05, model_seed=11, save_video=False, state_constraint=False, change_goal=False, encode_obs_time=False, model=None, uniq=None, log_debug=False, episodes_per_sampler_task=10, config={}, iter_=200, change_goal_flipped_iter_=False, ts_grid="exp", intermediate_run=False, ): config = dotdict(config) env = create_env(env_name, dt=dt, ts_grid=ts_grid, friction=config.friction) ACTION_LOW = env.action_space.low[0] ACTION_HIGH = env.action_space.high[0] if env_name == "oderl-cancer": limit_actions_to_only_positive = True else: limit_actions_to_only_positive = False nx = env.get_obs().shape[0] nu = env.action_space.shape[0] dtype = torch.float32 gamma = sigma**2 off_diagonal = 0.5 * gamma mppi_noise_sigma = torch.ones((nu, nu), device=device, dtype=dtype) * off_diagonal + torch.eye( nu, device=device, dtype=dtype ) * (gamma - off_diagonal) logger.info(mppi_noise_sigma) mppi_lambda_ = 1.0 random_action_noise = config.collect_expert_random_action_noise if model_name == "random": def dynamics(state, perturbed_action): pass elif model_name == "oracle": oracle_sigma = config.observation_noise if env_name == "oderl-pendulum": dynamics_oracle = pendulum_dynamics_dt elif env_name == "oderl-cartpole": dynamics_oracle = cartpole_dynamics_dt elif env_name == "oderl-acrobot": dynamics_oracle = acrobot_dynamics_dt elif env_name == "oderl-cancer": dynamics_oracle = cancer_dynamics_dt def dynamics(*args, **kwargs): state_mu = dynamics_oracle(*args, **kwargs) return state_mu, torch.ones_like(state_mu) * oracle_sigma dynamics = partial(dynamics, friction=config.friction) def running_cost(state, action): if state_constraint: reward = env.diff_obs_reward_( state, exp_reward=False, state_constraint=state_constraint ) + env.diff_ac_reward_(action) elif change_goal: global change_goal_flipped reward = env.diff_obs_reward_( state, exp_reward=False, change_goal=change_goal, change_goal_flipped=change_goal_flipped ) + env.diff_ac_reward_(action) else: reward = env.diff_obs_reward_(state, exp_reward=False) + env.diff_ac_reward_(action) cost = -reward return cost if config.planner == "mppi": mppi_gym = MPPI( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, ) elif config.planner == "mppi_active_observing": mppi_gym = MPPIActiveObserving( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, observing_cost=config.observing_cost, sampling_policy=config.sampling_policy, observing_var_threshold=config.observing_var_threshold, limit_actions_to_only_positive=limit_actions_to_only_positive, dt=dt, ) if save_video: start_virtual_display() videos_folder = "./logs/new_videos" Path(videos_folder).mkdir(parents=True, exist_ok=True) filename = f"{videos_folder}/{env_name}_{model_name}_{uniq}.mp4" fps = int(1 / dt) def loop(): s0 = [] a0 = [] sn = [] ts = [] ACTION_LOW = env.action_space.low[0] ACTION_HIGH = env.action_space.high[0] it = 0 total_reward = 0 env.reset() start_time = time.perf_counter() mppi_gym.reset() while it < iter_: if change_goal_flipped_iter_ < it: change_goal_flipped = True state = env.get_obs() s0.append(state) command_start = time.perf_counter() if model_name != "random": action, costs_std = mppi_gym.command(state) if random_action_noise is not None: action += ( (torch.rand(nu, device=device) - 0.5) * 2.0 * env.action_space.high[0] ) * random_action_noise action = action.clip(min=ACTION_LOW, max=ACTION_HIGH) action = action.view(nu) else: action = torch.from_numpy(env.action_space.sample()) elapsed = time.perf_counter() - command_start state, reward, done, tsn = step_env(env, action.detach().cpu().numpy(), obs_noise=config.observation_noise) sn.append(state) a0.append(action) ts.append(tsn) total_reward += reward if log_debug: logger.info( f"action taken: {action.detach().cpu().numpy()} cost received: {-reward} | state {state.flatten()} ts {tsn.detach().cpu().numpy()} | time taken: {elapsed}s | {int(it/iter_*100)}% Complete \t | iter={it}" ) if save_video: video.append_data(env.render(mode="rgb_array", last_act=action.detach().cpu().numpy())) it += 1 total_reward = total_reward.detach().cpu().item() ddict = { "model_name": model_name, "env_name": env_name, "roll_outs": roll_outs, "time_steps": time_steps, "uniq": uniq, "episode_elapsed_time": time.perf_counter() - start_time, "dt": dt, "planner": "mpc", "total_reward": total_reward, } if save_video: logger.info(f"[Video] Watch video at : {filename}") if intermediate_run: logger.info(f"[Intermediate Result] {str(ddict)}") else: logger.info(f"[Result] {str(ddict)}") s0 = torch.from_numpy(np.stack(s0)) sn = torch.from_numpy(np.stack(sn)) a0 = torch.stack(a0).cpu() ts = torch.stack(ts).cpu() return ddict, (s0, a0, sn, ts) episodes = [] for j in range(episodes_per_sampler_task): with torch.no_grad(): if save_video: with imageio.get_writer(filename, fps=fps) as video: try: result, episode_buffer = loop() episodes.append(episode_buffer) except Exception as e: logger.info(f"[Error] Error collecting episode : {e}") else: try: result, episode_buffer = loop() episodes.append(episode_buffer) except Exception as e: logger.info(f"[Error] Error collecting episode : {e}") return episodes def mppi_with_model_collect_data( model_name, env_name, roll_outs=1000, time_steps=30, lambda_=1.0, sigma=1.0, dt=0.05, model_seed=11, save_video=False, state_constraint=False, change_goal=False, encode_obs_time=False, model=None, uniq=None, log_debug=False, collect_samples=1e6, config_in={}, debug_main=False, ts_grid="exp", intermediate_run=False, ): config = dotdict(dict(config_in)) file_name = f"replay_buffer_env-name-{env_name}_model-name-{model_name}_encode-obs-time-{encode_obs_time}_ts-grid-{ts_grid}_random-action-noise-{config.collect_expert_random_action_noise}_observation-noise-{config.observation_noise}_friction-{config.friction}.pt" if not config.collect_expert_force_generate_new_data: final_data = torch.load(f"./offlinedata/{file_name}") return final_data global change_goal_flipped change_goal_flipped = False timelen = 10 # seconds if change_goal: timelen = timelen * 2.0 iter_ = timelen / dt change_goal_flipped_iter_ = iter_ / 2.0 multi_inner_mppi_with_model_collect_data = partial( inner_mppi_with_model_collect_data, model_name=model_name, env_name=env_name, roll_outs=roll_outs, time_steps=time_steps, lambda_=lambda_, sigma=sigma, dt=dt, model_seed=model_seed, save_video=save_video, state_constraint=state_constraint, change_goal=change_goal, encode_obs_time=encode_obs_time, model=model, uniq=uniq, log_debug=log_debug, episodes_per_sampler_task=config.collect_expert_episodes_per_sampler_task, config=dict(config), ts_grid=ts_grid, iter_=iter_, change_goal_flipped_iter_=change_goal_flipped_iter_, intermediate_run=intermediate_run, ) total_episodes_needed = int(collect_samples / iter_) task_inputs = [ run_seed for run_seed in range(int(total_episodes_needed / config.collect_expert_episodes_per_sampler_task)) ] episodes = [] if not debug_main: pool_outer = multiprocessing.Pool(config.collect_expert_cores_per_env_sampler) for i, result in tqdm( enumerate(pool_outer.imap_unordered(multi_inner_mppi_with_model_collect_data, task_inputs)), total=len(task_inputs), smoothing=0, ): episodes.extend(result) else: for i, task in tqdm(enumerate(task_inputs), total=len(task_inputs)): result = multi_inner_mppi_with_model_collect_data(task) episodes.extend(result) s0 = [] sn = [] a0 = [] ts = [] for episode in episodes: (es0, ea0, esn, ets) = episode s0.append(es0) sn.append(esn) a0.append(ea0) ts.append(ets) s0 = torch.cat(s0, dim=0) sn = torch.cat(sn, dim=0) a0 = torch.cat(a0, dim=0) ts = torch.cat(ts, dim=0).view(-1, 1) final_data = (s0, a0, sn, ts) if not os.path.exists("./offlinedata/"): os.makedirs("./offlinedata/") torch.save(final_data, f"./offlinedata/{file_name}") pool_outer.close() return final_data if __name__ == "__main__": torch.multiprocessing.set_start_method("spawn") defaults = get_config() debug_collector = False defaults["save_video"] = False defaults["mppi_time_steps"] = 40 defaults["collect_expert_force_generate_new_data"] = True defaults["collect_expert_cores_per_env_sampler"] = 6 defaults["sampling_policy"] = "discrete_planning" defaults["observing_fixed_frequency"] = 1 defaults["planner"] = "mppi_active_observing" # 'mppi' defaults["dt"] = 0.05 config = dotdict(defaults) seed_all(0)
logger = setup_logger(__file__)
2
2023-10-24 16:19:14+00:00
16k
s1tools/s1-etad
s1etad/_jupyter_support.py
[ { "identifier": "Sentinel1Etad", "path": "s1etad/product.py", "snippet": "class Sentinel1Etad:\n \"\"\"Sentinel-1 ETAD product.\n\n Class to decode and access the elements of the Sentinel ETAD product\n which specification is governed by ETAD-DLR-PS-0014.\n\n The index operator [] (implemented with the __getitem__ method) returns\n a Sentinel1EtadSwath instance.\n\n Parameters\n ----------\n product : str or pathlib.Path\n path of the S1-ETAD product (it is a directory)\n\n Attributes\n ----------\n product : pathlib.Path\n path of the S1-ETAD product (it is a directory)\n burst_catalogue : pandas.DataFrame\n dataframe containing main information of all bursts present in\n the product\n ds : netCDF.Dataset\n (provisional) the NetCDF.Dataset in which data are stored\n \"\"\"\n\n def __init__(self, product):\n # TODO: make this read-only (property)\n self.product = pathlib.Path(product)\n # TODO: ds should not be exposed\n self.ds = self._init_measurement_dataset()\n self._annot = self._init_annotation_dataset()\n self.burst_catalogue = self._init_burst_catalogue()\n\n def _init_measurement_dataset(self):\n \"\"\"Open the nc dataset.\"\"\"\n # @TODO: retrieve form manifest\n netcdf_file = next(self.product.glob(\"measurement/*.nc\"))\n rootgrp = Dataset(netcdf_file, \"r\")\n rootgrp.set_auto_mask(False)\n return rootgrp\n\n def _init_annotation_dataset(self):\n \"\"\"Open the xml annotation dataset.\"\"\"\n list_ = [i for i in self.product.glob(\"annotation/*.xml\")]\n xml_file = str(list_[0])\n root = etree.parse(xml_file).getroot()\n return root\n\n @functools.lru_cache()\n def __getitem__(self, index):\n assert index in self.swath_list, f\"{index} is not in {self.swath_list}\"\n return Sentinel1EtadSwath(self.ds[index])\n\n def __iter__(self):\n yield from self.iter_swaths()\n\n def __repr__(self):\n return f'{self.__class__.__name__}(\"{self.product}\") # 0x{id(self):x}'\n\n def __str__(self):\n return f'{self.__class__.__name__}(\"{self.product.name}\")'\n\n @property\n def number_of_swath(self):\n \"\"\"The number of swaths in the product.\"\"\"\n return len(self.ds.groups)\n\n @property\n def swath_list(self):\n \"\"\"The list of swath identifiers (str) in the product.\"\"\"\n return list(self.ds.groups.keys())\n\n def s1_product_list(self):\n \"\"\"Return the list of S-1 products used to compose the ETAD one.\"\"\"\n df = self.burst_catalogue\n\n # this ensures that each product name is located at the correct pIndex\n product_list = [\n item[1] for item in sorted(set(zip(df[\"pIndex\"], df[\"productID\"])))\n ]\n\n return product_list\n\n @property\n def grid_spacing(self):\n \"\"\"Return the grid spacing in meters.\"\"\"\n xp_list = {\n \"x\": \".//correctionGridRangeSampling\",\n \"y\": \".//correctionGridAzimuthSampling\",\n }\n dd = {}\n for tag, xp in xp_list.items():\n dd[tag] = self._xpath_to_list(self._annot, xp, dtype=float)\n dd[\"unit\"] = \"m\"\n return dd\n\n @property\n def grid_sampling(self):\n \"\"\"Return the grid spacing in s.\"\"\"\n xp_list = {\n \"x\": \".//productInformation/gridSampling/range\",\n \"y\": \".//productInformation/gridSampling/azimuth\",\n }\n dd = {}\n for tag, xp in xp_list.items():\n dd[tag] = self._xpath_to_list(self._annot, xp, dtype=float)\n dd[\"unit\"] = \"s\"\n return dd\n\n @property\n def min_azimuth_time(self):\n \"\"\"The minimum azimuth time of all bursts in the product.\"\"\"\n return datetime.datetime.fromisoformat(self.ds.azimuthTimeMin)\n\n @property\n def max_azimuth_time(self):\n \"\"\"The maximum azimuth time of all bursts in the product.\"\"\"\n return datetime.datetime.fromisoformat(self.ds.azimuthTimeMax)\n\n @property\n def min_range_time(self):\n \"\"\"The minimum range time of all bursts in the product.\"\"\"\n return self.ds.rangeTimeMin\n\n @property\n def max_range_time(self):\n \"\"\"The maximum range time of all bursts in the product.\"\"\"\n return self.ds.rangeTimeMax\n\n @property\n def vg(self):\n \"\"\"Mean ground velocity [m/s].\"\"\"\n try:\n xp = (\n \"productInformation/gridGroundSampling/\"\n \"averageZeroDopplerVelocity\"\n )\n vg = float(self._annot.find(xp).taxt)\n except (AttributeError, ValueError):\n vg = self.grid_spacing[\"y\"] / self.grid_sampling[\"y\"]\n return vg\n\n def processing_setting(self):\n \"\"\"Return the corrections performed.\n\n Read the xml file to identify the corrections performed.\n If a correction is not performed the matrix is filled with zeros.\n \"\"\"\n correction_list = [\n \"troposphericDelayCorrection\",\n \"ionosphericDelayCorrection\",\n \"solidEarthTideCorrection\",\n \"bistaticAzimuthCorrection\",\n \"dopplerShiftRangeCorrection\",\n \"FMMismatchAzimuthCorrection\",\n ]\n dd = {}\n xp_root = (\n \"processingInformation/processor/setapConfigurationFile/\"\n \"processorSettings/\"\n )\n for correction in correction_list:\n xp = xp_root + correction\n ret = self._xpath_to_list(self._annot, xp)\n if ret == \"true\":\n ret = True\n else:\n ret = False\n dd[correction] = ret\n return dd\n\n def _init_burst_catalogue(self):\n \"\"\"Build the burst catalog.\n\n Using information stored in the NetCDF file create a\n pandas.DataFrame containing all the elements allowing to index\n properly a burst.\n \"\"\"\n\n def _to_tdelta64(t):\n return np.float64(t * 1e9).astype(\"timedelta64[ns]\")\n\n data = collections.defaultdict(list)\n t0 = np.datetime64(self.ds.azimuthTimeMin, \"ns\")\n for swath in self.ds.groups.values():\n for burst in swath.groups.values():\n ax = burst.variables[\"azimuth\"]\n tmin = t0 + _to_tdelta64(ax[0])\n tmax = t0 + _to_tdelta64(ax[-1])\n\n data[\"bIndex\"].append(burst.bIndex)\n data[\"pIndex\"].append(burst.pIndex)\n data[\"sIndex\"].append(burst.sIndex)\n data[\"productID\"].append(burst.productID)\n data[\"swathID\"].append(burst.swathID)\n data[\"azimuthTimeMin\"].append(tmin)\n data[\"azimuthTimeMax\"].append(tmax)\n\n df = pd.DataFrame(data=data)\n\n return df\n\n def query_burst(\n self,\n first_time=None,\n product_name=None,\n last_time=None,\n swath=None,\n geometry=None,\n ):\n \"\"\"Query the burst catalogue to retrieve the burst matching by time.\n\n Parameters\n ----------\n first_time : datetime\n is set to None then set to the first time\n last_time : datetime\n if set to None the last_time = first_time\n product_name : str\n Name of a real S1 product e.g.\n S1B_IW_SLC__1SDV_20190805T162509_20190805T162...SAFE\n swath : str or list\n list of swathID e.g. 'IW1' or ['IW1'] or ['IW1', 'IW2']\n geometry : shapely.geometry.[Point, Polygon, ...]\n A shapely geometry for which interstion will be searched\n\n Returns\n -------\n pandas.DataFrame\n Filtered panda dataframe\n \"\"\"\n # first sort the burst by time\n df = self.burst_catalogue.sort_values(by=[\"azimuthTimeMin\"])\n if first_time is None:\n first_time = df.iloc[0].azimuthTimeMin\n if last_time is None:\n last_time = df.iloc[-1].azimuthTimeMax\n\n ix0 = (df.azimuthTimeMin >= first_time) & (\n df.azimuthTimeMax <= last_time\n )\n\n if product_name is not None:\n # build a regex based on the name to avoid issues with annotation\n # products and CRC\n product_name = Sentinel1ProductName(product_name)\n product_name.to_annotation(value=\"[AS]\")\n product_name.crc = \"\"\n filter_ = product_name.recompose(with_suffix=False)\n ix0 = ix0 & self.burst_catalogue.productID.str.contains(\n filter_, regex=True\n )\n\n if swath is not None:\n if isinstance(swath, str):\n swath = [swath]\n ix0 = ix0 & df.swathID.isin(swath)\n\n if geometry is not None:\n bix_list = self.intersects(geometry)\n ix0 = ix0 & df.bIndex.isin(bix_list)\n\n return df.loc[ix0]\n\n def _selection_to_swath_list(self, selection=None):\n if selection is None:\n selection = self.burst_catalogue\n\n if isinstance(selection, pd.DataFrame):\n burst_selection = selection\n swath_list = selection.swathID.unique()\n elif isinstance(selection, str):\n burst_selection = None\n swath_list = [selection]\n else:\n # assume it is a list of swaths already\n import collections.abc\n\n assert isinstance(selection, collections.abc.Iterable)\n assert all(isinstance(item, str) for item in selection)\n burst_selection = None\n swath_list = selection\n\n return swath_list, burst_selection\n\n def iter_swaths(self, selection=None):\n \"\"\"Iterate over swaths according to the specified selection.\n\n Parameters\n ----------\n selection : list(str) or pd.Dataframe, optional\n the list of selected swath IDs or the result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the swaths of the product.\n \"\"\"\n swath_list, _ = self._selection_to_swath_list(selection)\n for swath_name in swath_list:\n yield self[swath_name]\n\n def iter_bursts(self, selection=None):\n \"\"\"Iterate over burst according to the specified selection.\n\n Parameters\n ----------\n selection : list(int) or pd.Dataframe, optional\n the list of selected burst indexes or the result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the bursts of the product.\n \"\"\"\n if selection is None:\n selection = self.burst_catalogue\n elif not isinstance(selection, pd.DataFrame):\n # assume it is a list of burst indexes\n bursts = selection\n if isinstance(bursts, int):\n bursts = [selection]\n # NOTE: preserve the order\n selection = self.burst_catalogue.bIndex.isin(bursts)\n\n assert isinstance(selection, pd.DataFrame)\n\n for idx, row in selection.iterrows():\n burst = self[row.swathID][row.bIndex]\n yield burst\n\n @staticmethod\n def _xpath_to_list(\n root, xpath, dtype=None, namespace=None, parse_time_func=None\n ):\n ll = [elt.text for elt in root.findall(xpath, namespace)]\n if parse_time_func is not None:\n ll = [datetime.datetime.fromisoformat(t) for t in ll]\n ll = parse_time_func(ll) # TODO: check\n ll = np.asarray(ll, dtype=dtype)\n\n if ll.size == 1:\n return ll.item(0)\n else:\n return ll\n\n def get_statistics(self, correction, meter=False):\n \"\"\"Return the global statistic value of the specified correction.\n\n The returned value is the pre-computed one that is stored in the\n XML annotation file of the product.\n\n Parameters\n ----------\n correction : str or ECorrectionType\n the corrections for which the statistic value is requested\n meter : bool\n if set to True then the returned value is expressed in meters,\n otherwise it is expressed in seconds (default: False)\n\n Returns\n -------\n dict\n a dictionary containing :class:`Statistics` (min, mean and max)\n for all available components of the specified correction:\n\n :x:\n a :class:`Statistics` instance relative to the range\n component of the specified correction\n :y:\n a :class:`Statistics` instance relative to the azimuth\n component of the specified correction\n :unit:\n the units of the returned statistics (\"m\" or \"s\")\n \"\"\"\n units = \"m\" if meter else \"s\"\n\n stat_xp = \"./qualityAndStatistics\"\n target = ECorrectionType(correction)\n target_tag = _STATS_TAG_MAP[target]\n\n statistics = {\"unit\": units}\n\n # NOTE: looping on element and heuristic test on tags is necessary\n # due to inconsistent naming of range and azimuth element\n # TODO: report the inconsistency to DLR? (TBD)\n correction_elem = self._annot.find(f\"{stat_xp}/{target_tag}\")\n for elem in correction_elem:\n if \"range\" in elem.tag:\n direction = \"x\"\n elif \"azimuth\" in elem.tag:\n direction = \"y\"\n else:\n continue\n\n statistics[direction] = Statistics(\n float(elem.findtext(f'min[@unit=\"{units}\"]')),\n float(elem.findtext(f'mean[@unit=\"{units}\"]')),\n float(elem.findtext(f'max[@unit=\"{units}\"]')),\n )\n\n return statistics\n\n def get_footprint(self, selection=None, merge=False):\n \"\"\"Return the footprints of all the bursts as MultiPolygon.\n\n It calls in the back the get_footprint of the Sentinel1EtadBurst class.\n\n Parameters\n ----------\n selection : list(str) or pd.Dataframe, optional\n the list of selected swath IDs or the result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the swaths of the product.\n merge : bool\n if set to True return a single polygon that is the union of the\n footprints of all bursts\n \"\"\"\n polys = []\n swath_list, burst_selection = self._selection_to_swath_list(selection)\n for swath in self.iter_swaths(swath_list):\n polys.extend(swath.get_footprint(burst_selection))\n\n if merge:\n polys = shapely.ops.cascaded_union(polys)\n else:\n polys = MultiPolygon(polys)\n\n return polys\n\n def intersects(self, geometry: BaseGeometry):\n \"\"\"Return the list of burst indexes intersecting the input geometry.\n\n Computes the intersection of the footprint of the swath (all bursts)\n with the input geometry.\n\n Parameters\n ----------\n geometry : shapely.geometry.[Point, Polygon, MultiPolygon, line]\n\n Returns\n -------\n list\n list of all the burst intersecting with the input shape geometry\n \"\"\"\n lists_of_burst_indexes = [\n swath.intersects(geometry) for swath in self.iter_swaths()\n ]\n # return the flattened list\n return list(itertools.chain(*lists_of_burst_indexes))\n\n def _swath_merger(\n self,\n burst_var,\n selection=None,\n set_auto_mask=False,\n meter=False,\n fill_value=0.0,\n ):\n if selection is None:\n df = self.burst_catalogue\n elif not isinstance(selection, pd.DataFrame):\n df = self.query_burst(swath=selection)\n else:\n assert isinstance(selection, pd.DataFrame)\n df = selection\n\n # NOTE: assume a specific order of swath IDs\n first_swath = self[df.swathID.min()]\n near_burst = first_swath[first_swath.burst_list[0]]\n last_swath = self[df.swathID.max()]\n far_burst = last_swath[last_swath.burst_list[0]]\n\n rg_first_time = near_burst.sampling_start[\"x\"]\n rg_last_time = (\n far_burst.sampling_start[\"x\"]\n + far_burst.sampling[\"x\"] * far_burst.samples\n )\n az_first_time = df.azimuthTimeMin.min()\n az_last_time = df.azimuthTimeMax.max()\n az_ref_time = self.min_azimuth_time\n az_first_time_rel = (az_first_time - az_ref_time).total_seconds()\n\n sampling = self.grid_sampling\n dx = sampling[\"x\"]\n dy = sampling[\"y\"]\n\n num_samples = (\n np.round((rg_last_time - rg_first_time) / dx).astype(int) + 1\n )\n num_lines = (\n np.round(\n (az_last_time - az_first_time).total_seconds() / dy\n ).astype(int)\n + 1\n )\n\n img = np.full((num_lines, num_samples), fill_value=fill_value)\n # TODO: add some control option\n img = np.ma.array(img, mask=True, fill_value=fill_value)\n\n for swath in self.iter_swaths(df):\n # NOTE: use the private \"Sentinel1EtadSwath._burst_merger\" method\n # to be able to work only on the specified NetCDF variable\n dd_ = swath._burst_merger(\n burst_var,\n selection=df, # noqa\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n yoffset = dd_[\"first_azimuth_time\"] - az_first_time_rel\n xoffset = dd_[\"first_slant_range_time\"] - rg_first_time\n line_ofs = np.round(yoffset / dy).astype(int)\n sample_ofs = np.round(xoffset / dx).astype(int)\n\n slice_y = slice(line_ofs, line_ofs + dd_[burst_var].shape[0])\n slice_x = slice(sample_ofs, sample_ofs + dd_[burst_var].shape[1])\n\n img[slice_y, slice_x] = dd_[burst_var]\n\n return {\n burst_var: img,\n \"first_azimuth_time\": az_first_time,\n \"first_slant_range_time\": rg_first_time,\n \"sampling\": sampling,\n }\n\n def _core_merge_correction(\n self, prm_list, selection=None, set_auto_mask=True, meter=False\n ):\n dd = {}\n for dim, field in prm_list.items():\n dd_ = self._swath_merger(\n field,\n selection=selection,\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n dd[dim] = dd_[field]\n dd[\"sampling\"] = dd_[\"sampling\"]\n dd[\"first_azimuth_time\"] = dd_[\"first_azimuth_time\"]\n dd[\"first_slant_range_time\"] = dd_[\"first_slant_range_time\"]\n\n dd[\"unit\"] = \"m\" if meter else \"s\"\n\n # To compute lat/lon/h make a new selection with all gaps filled\n swath_list, _ = self._selection_to_swath_list(selection)\n near_swath = min(swath_list)\n far_swath = max(swath_list)\n idx = self.burst_catalogue.swathID >= near_swath\n idx &= self.burst_catalogue.swathID <= far_swath\n swaths = self.burst_catalogue.swathID[idx].unique()\n\n data = dd[\"x\" if \"x\" in prm_list else \"y\"]\n lines = data.shape[0]\n duration = lines * self.grid_sampling[\"y\"]\n duration = np.float64(duration * 1e9).astype(\"timedelta64[ns]\")\n first_time = dd[\"first_azimuth_time\"]\n last_time = first_time + duration\n\n filled_selection = self.query_burst(\n first_time=first_time, last_time=last_time, swath=swaths\n )\n\n dd[\"lats\"] = self._swath_merger(\n \"lats\",\n selection=filled_selection,\n set_auto_mask=set_auto_mask,\n meter=False,\n fill_value=np.nan,\n )[\"lats\"]\n dd[\"lons\"] = self._swath_merger(\n \"lons\",\n selection=filled_selection,\n set_auto_mask=set_auto_mask,\n meter=False,\n fill_value=np.nan,\n )[\"lons\"]\n dd[\"height\"] = self._swath_merger(\n \"height\",\n selection=filled_selection,\n set_auto_mask=set_auto_mask,\n meter=False,\n fill_value=np.nan,\n )[\"height\"]\n return dd\n\n def merge_correction(\n self,\n name: CorrectionType = ECorrectionType.SUM,\n selection=None,\n set_auto_mask=True,\n meter=False,\n direction=None,\n ):\n \"\"\"Merge multiple swaths of the specified correction variable.\n\n Data of the selected swaths (typically overlapped) are merged\n together to form a single data matrix with a consistent (range and\n azimuth) time axis.\n\n Note\n ----\n\n The current implementation uses a very simple algorithm that\n iterates over selected swaths and bursts and stitches correction\n data together.\n\n In overlapping regions, new data simpy overwrite the old ones.\n This is an easy algorithm and perfectly correct for atmospheric\n and geodetic correction.\n\n It is, instead, sub-optimal for system corrections (bi-static,\n Doppler, FM Rate) which have different values in overlapping\n regions. In this case results are *not* correct.\n\n Parameters\n ----------\n name : str or CorrectionType\n the name of the desired correction\n selection : list or pandas.DataFrame\n list of selected bursts (by default all bursts are selected)\n set_auto_mask : bool\n requested for netCDF4 to avoid retrieving a masked array\n meter : bool\n transform the result in meters\n direction : str or None\n if set to \"x\" (for range) or \"y\" (for \"azimuth\") only extracts\n the specified correction component.\n By default (None) all available components are returned.\n\n Returns\n -------\n dict\n a dictionary containing merged data and sampling information:\n\n :<burst_var_name>:\n merged data for the selected burst_var\n :first_azimuth_time:\n the relative azimuth first time\n :first_slant_range_time:\n the relative (slant) range first time\n :sampling:\n a dictionary containing the sampling along the\n 'x' and 'y' directions and the 'unit'\n :units:\n of the correction (seconds or meters)\n :lats:\n the matrix of latitude values (in degrees) for each point\n :lons:\n the matrix of longitude values (in degrees) for each point\n :height:\n the matrix of height values (in meters) for each point\n \"\"\"\n correction_type = ECorrectionType(name) # check values\n prm_list = _CORRECTION_NAMES_MAP[correction_type.value]\n if direction is not None:\n prm_list = {direction: prm_list[direction]}\n correction = self._core_merge_correction(\n prm_list,\n selection=selection,\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n correction[\"name\"] = correction_type.value\n return correction" }, { "identifier": "Sentinel1EtadSwath", "path": "s1etad/product.py", "snippet": "class Sentinel1EtadSwath:\n \"\"\"Object representing a swath in the S1-ETAD product.\n\n This objects are returned by methods of the :class:`Sentine1Etad` class.\n It is not expected that the user instantiates this objects directly.\n \"\"\"\n\n def __init__(self, nc_group):\n self._grp = nc_group\n\n @functools.lru_cache()\n def __getitem__(self, burst_index):\n burst_name = f\"Burst{burst_index:04d}\"\n return Sentinel1EtadBurst(self._grp[burst_name])\n\n def __iter__(self):\n yield from self.iter_bursts()\n\n def __repr__(self):\n return f'{self.__class__.__name__}(\"{self._grp.path}\") 0x{id(self):x}'\n\n @property\n def burst_list(self):\n \"\"\"The list of burst identifiers (str) of all bursts in the swath.\"\"\"\n return [burst.bIndex for burst in self._grp.groups.values()]\n\n @property\n def number_of_burst(self):\n \"\"\"The number of bursts in the swath.\"\"\"\n return len(self._grp.groups)\n\n @property\n def swath_id(self):\n \"\"\"The swath identifier (str).\"\"\"\n return self._grp.swathID\n\n @property\n def swath_index(self):\n \"\"\"The swath index (int).\"\"\"\n return self._grp.sIndex\n\n @property\n def sampling_start(self):\n \"\"\"Relative sampling start times.\"\"\"\n first_burst_index = self.burst_list[0]\n first_burst = self[first_burst_index]\n return first_burst.sampling_start\n\n @property\n def sampling(self):\n \"\"\"Sampling in seconds used for all bursts of the swath.\n\n A dictionary containing the following keys:\n\n * \"x\": range spacing,\n * \"y\": azimuth spacing,\n * \"units\": the measurement units used for \"x' and \"y\"\n \"\"\"\n first_burst_index = self.burst_list[0]\n first_burst = self[first_burst_index]\n return first_burst.sampling\n\n def _selection_to_burst_index_list(self, selection=None):\n if selection is None:\n index_list = self.burst_list\n elif isinstance(selection, pd.DataFrame):\n idx = selection.swathID == self.swath_id\n index_list = selection.bIndex[idx].values\n else:\n index_list = selection\n return index_list\n\n def iter_bursts(self, selection=None):\n \"\"\"Iterate over bursts according to the specified selection.\n\n Parameters\n ----------\n selection : list(int) or pd.Dataframe, optional\n the list of selected bursts or result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the burst of the swath.\n \"\"\"\n index_list = self._selection_to_burst_index_list(selection)\n for burst_index in index_list:\n yield self[burst_index]\n\n def get_footprint(self, selection=None, merge=False):\n \"\"\"Return the footprints of all the bursts as MultiPolygon.\n\n It calls in the back the get_footprint of the Sentinel1EtadBurst class.\n\n Parameters\n ----------\n selection : list(int) or pd.Dataframe, optional\n the list of selected bursts or result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the burst of the swath.\n merge : bool\n if set to True return a single polygon that is the union of the\n footprints of all bursts\n \"\"\"\n polys = [\n burst.get_footprint() for burst in self.iter_bursts(selection)\n ]\n if merge:\n polys = shapely.ops.cascaded_union(polys)\n else:\n polys = MultiPolygon(polys)\n\n return polys\n\n def intersects(self, geometry: BaseGeometry):\n \"\"\"Return the list of burst indexes intersecting the input geometry.\n\n Computes the intersection of the footprint of the swath (all bursts)\n with the input Geometry\n\n Parameters\n ----------\n geometry : shapely.geometry.[Point, Polygon, MultiPolygon, line]\n\n Returns\n -------\n list\n list of the indexes of all bursts intersecting with the input\n geometry\n \"\"\"\n assert isinstance(\n geometry, BaseGeometry\n ), \"The input shape is not a shapely BaseGeometry object\"\n burst_index_list = []\n swath_footprint = self.get_footprint(merge=True)\n if swath_footprint.intersects(geometry):\n burst_index_list = [\n b.burst_index\n for b in self.iter_bursts()\n if b.intersects(geometry)\n ]\n return burst_index_list\n\n def _burst_merger(\n self,\n burst_var,\n selection=None,\n az_time_min=None,\n az_time_max=None,\n set_auto_mask=False,\n meter=False,\n fill_value=0.0,\n ):\n \"\"\"Low level method to de-burst a NetCDF variable.\n\n The de-burst strategy is simple as the latest line is on top of the\n oldest.\n\n Parameters\n ----------\n burst_var : str\n one of the burst netcdf variables\n selection : list or pandas.DataFrame\n list of selected bursts (by default all bursts are selected)\n az_time_min : float\n minimum azimuth time of the merged swath\n (relative to the reference annotated in the NetCDF root)\n az_time_max : float\n maximum azimuth tim eof the merged swath\n (relative to the reference annotated in the NetCDF root)\n set_auto_mask : bool\n requested for netCDF4 to avoid retrieving a masked array\n meter : bool\n transform the result in meters\n\n Returns\n -------\n dict\n a dictionary containing merged data and sampling information:\n\n :<burst_var_name>: merged data for the selected burst_var\n :first_azimuth_time: the relative azimuth first time\n :first_slant_range_time: the relative (slant) range first time\n :sampling: a dictionary containing the sampling along the\n 'x' and 'y' directions and the 'unit'\n \"\"\"\n burst_index_list = self._selection_to_burst_index_list(selection)\n\n # Find what is the extent of the acquisition in azimuth\n first_burst = self[burst_index_list[0]]\n last_burst = self[burst_index_list[-1]]\n\n if az_time_min is None:\n t0 = first_burst.sampling_start[\"y\"]\n else:\n t0 = az_time_min\n\n last_azimuth, _ = last_burst.get_burst_grid()\n if az_time_max is None:\n t1 = last_azimuth[-1]\n else:\n t1 = az_time_max\n\n tau0 = min(\n burst.sampling_start[\"x\"]\n for burst in self.iter_bursts(burst_index_list)\n )\n\n # grid sampling\n dt = first_burst.sampling[\"y\"]\n dtau = first_burst.sampling[\"x\"]\n\n num_lines = np.round((t1 - t0) / dt).astype(int) + 1\n num_samples = max(\n burst.samples for burst in self.iter_bursts(burst_index_list)\n )\n\n debursted_var = np.full(\n (num_lines, num_samples), fill_value=fill_value\n )\n # TODO: add some control option\n debursted_var = np.ma.array(\n debursted_var, mask=True, fill_value=fill_value\n )\n\n for burst_ in self.iter_bursts(burst_index_list):\n assert (\n dt == burst_.sampling[\"y\"]\n ), \"The azimuth sampling is changing long azimuth\"\n assert (\n first_burst.sampling_start[\"x\"] == burst_.sampling_start[\"x\"]\n ), \"The 2-way range gridStartRangeTime is changing long azimuth\"\n\n # get the timing of the burst and convert into line index\n az_time_, rg_time_ = burst_.get_burst_grid()\n line_index_ = np.round((az_time_ - t0) / dt).astype(int)\n p0 = np.round((rg_time_[0] - tau0) / dtau).astype(int)\n\n # NOTE: use the private \"Sentinel1EtadBurst._get_etad_param\" method\n # to be able to work only on the specified NetCDF variable\n var_ = burst_._get_etad_param(\n burst_var, set_auto_mask=set_auto_mask, meter=meter # noqa\n )\n\n _, burst_samples = var_.shape\n debursted_var[line_index_, p0 : p0 + burst_samples] = var_\n\n return {\n burst_var: debursted_var,\n \"first_azimuth_time\": t0,\n \"first_slant_range_time\": first_burst.sampling_start[\"x\"],\n \"sampling\": first_burst.sampling,\n }\n\n def _core_merge_correction(\n self, prm_list, selection=None, set_auto_mask=True, meter=False\n ):\n dd = {}\n for dim, field in prm_list.items():\n dd_ = self._burst_merger(\n field,\n selection=selection,\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n dd[dim] = dd_[field]\n dd[\"sampling\"] = dd_[\"sampling\"]\n dd[\"first_azimuth_time\"] = dd_[\"first_azimuth_time\"]\n dd[\"first_slant_range_time\"] = dd_[\"first_slant_range_time\"]\n\n dd[\"unit\"] = \"m\" if meter else \"s\"\n dd[\"lats\"] = self._burst_merger(\n \"lats\", set_auto_mask=set_auto_mask, meter=False\n )[\"lats\"]\n dd[\"lons\"] = self._burst_merger(\n \"lons\", set_auto_mask=set_auto_mask, meter=False\n )[\"lons\"]\n dd[\"height\"] = self._burst_merger(\n \"height\", set_auto_mask=set_auto_mask, meter=False\n )[\"height\"]\n return dd\n\n def merge_correction(\n self,\n name: CorrectionType = ECorrectionType.SUM,\n selection=None,\n set_auto_mask=True,\n meter=False,\n direction=None,\n ):\n \"\"\"Merge multiple bursts of the specified correction variable.\n\n Data of the selected bursts (typically overlapped) are merged\n together to form a single data matrix with a consistent (azimuth)\n time axis.\n\n Note\n ----\n\n The current implementation uses a very simple algorithm that\n iterates over selected bursts and stitches correction data\n together.\n\n In overlapping regions, new data simpy overwrite the old ones.\n This is an easy algorithm and perfectly correct for atmospheric\n and geodetic correction.\n\n It is, instead, sub-optimal for system corrections (bi-static,\n Doppler, FM Rate) which have different values in overlapping\n regions. In this case results are *not* correct.\n\n Parameters\n ----------\n name : str or CorrectionType\n the name of the desired correction\n selection : list or pandas.DataFrame\n list of selected bursts (by default all bursts are selected)\n set_auto_mask : bool\n requested for netCDF4 to avoid retrieving a masked array\n meter : bool\n transform the result in meters\n direction : str or None\n if set to \"x\" (for range) or \"y\" (for \"azimuth\") only extracts\n the specified correction component.\n By default (None) all available components are returned.\n\n Returns\n -------\n dict\n a dictionary containing merged data and sampling information:\n\n :<burst_var_name>:\n merged data for the selected burst_var\n :first_azimuth_time:\n the relative azimuth first time\n :first_slant_range_time:\n the relative (slant) range first time\n :sampling:\n a dictionary containing the sampling along the\n 'x' and 'y' directions and the 'unit'\n :units:\n of the correction (seconds or meters)\n :lats:\n the matrix of latitude values (in degrees) for each point\n :lons:\n the matrix of longitude values (in degrees) for each point\n :height:\n the matrix of height values (in meters) for each point\n \"\"\"\n correction_type = ECorrectionType(name) # check values\n prm_list = _CORRECTION_NAMES_MAP[correction_type.value]\n if direction is not None:\n prm_list = {direction: prm_list[direction]}\n correction = self._core_merge_correction(\n prm_list,\n selection=selection,\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n correction[\"name\"] = correction_type.value\n return correction" }, { "identifier": "Sentinel1EtadBurst", "path": "s1etad/product.py", "snippet": "class Sentinel1EtadBurst:\n \"\"\"Object representing a burst in the S1-ETAD product.\n\n This objects are returned by methods of the :class:`Sentinel1EtadSwath`\n class.\n It is not expected that the user instantiates this objects directly.\n \"\"\"\n\n def __init__(self, nc_group):\n self._grp = nc_group\n self._geocoder = None\n\n def __repr__(self):\n return f'{self.__class__.__name__}(\"{self._grp.path}\") 0x{id(self):x}'\n\n @property\n def product_id(self):\n \"\"\"The S1 product (str) to which the burst object is associated.\"\"\"\n return self._grp.productID\n\n @property\n def swath_id(self):\n \"\"\"The swath identifier (str) to which the burst belongs.\"\"\"\n return self._grp.swathID\n\n @property\n def burst_id(self):\n \"\"\"The burst identifier (str).\"\"\"\n return self._grp.name\n\n @property\n def product_index(self):\n \"\"\"Index (int) of the S1 product to which the burst is associated.\"\"\"\n return self._grp.pIndex\n\n @property\n def swath_index(self):\n \"\"\"The index (int) of the swath to which the burst belongs.\"\"\"\n return self._grp.sIndex\n\n @property\n def burst_index(self):\n \"\"\"The index (int) of the burst.\"\"\"\n return self._grp.bIndex\n\n @functools.lru_cache()\n def get_footprint(self):\n \"\"\"Return the footprint of ghe bursts as shapely.Polygon.\n\n It gets the lat/lon/height grid and extract the 4 corners.\n \"\"\"\n lats, lons, heights = self.get_lat_lon_height()\n corner_list = [(0, 0), (0, -1), (-1, -1), (-1, 0)]\n etaf_burst_footprint = []\n for corner in corner_list:\n lat_, lon_, h_ = lats[corner], lons[corner], heights[corner]\n etaf_burst_footprint.append((lon_, lat_, h_))\n etaf_burst_footprint = Polygon(etaf_burst_footprint)\n return etaf_burst_footprint\n\n def intersects(self, geometry: BaseGeometry):\n \"\"\"Intersects the footprint of the burst with the provided shape\n\n Parameters\n ----------\n geometry : shapely.geometry.[Point, Polygon, MultiPolygon, line]\n\n Returns\n -------\n bool\n True if intersects, False otherwise\n \"\"\"\n assert isinstance(\n geometry, BaseGeometry\n ), \"Not a shapely BaseGeometry object\"\n return self.get_footprint().intersects(geometry)\n\n def get_burst_grid(self):\n \"\"\"Return the t, tau grid of the burst.\"\"\"\n azimuth = self._get_etad_param(\"azimuth\", set_auto_mask=True)\n range_ = self._get_etad_param(\"range\", set_auto_mask=True)\n return azimuth, range_\n\n @property\n def sampling_start(self):\n \"\"\"Relative sampling start times.\n\n Value in seconds relative to the beginning of the product.\n \"\"\"\n # TODO: put a reference in the docstring to the proper\n # Sentinel1Etad property.\n return dict(\n x=self._grp.gridStartRangeTime,\n y=self._grp.gridStartAzimuthTime,\n units=\"s\",\n )\n\n @property\n def sampling(self):\n \"\"\"Sampling in seconds used for all bursts of the swath.\n\n A dictionary containing the following keys:\n\n * \"x\": range spacing,\n * \"y\": azimuth spacing,\n * \"units\": the measurement units used for \"x' and \"y\"\n \"\"\"\n return dict(\n x=self._grp.gridSamplingRange,\n y=self._grp.gridSamplingAzimuth,\n units=\"s\",\n )\n\n @property\n def lines(self):\n \"\"\"The number of lines in the burst.\"\"\"\n return self._grp.dimensions[\"azimuthExtent\"].size\n\n @property\n def samples(self):\n \"\"\"The number of samples in the burst.\"\"\"\n return self._grp.dimensions[\"rangeExtent\"].size\n\n @property\n def vg(self) -> float:\n \"\"\"Average zero-Doppler ground velocity [m/s].\"\"\"\n return self._grp.averageZeroDopplerVelocity\n\n @property\n def reference_polarization(self) -> str:\n \"\"\"Reverence polarization (string).\"\"\"\n return self._grp.referencePolarization\n\n def get_polarimetric_channel_offset(self, channel: str) -> dict:\n \"\"\"Polarimetric channel delay.\n\n Return the electronic delay of the specified polarimetric channel\n w.r.t. the reference one (see\n :data:`Sentinel1EtadBurst.reference_polarization`).\n\n channel : str\n the string ID of the requested polarimetric channel:\n * 'VV' or 'VH' for DV products\n * 'HH' or 'HV' for DH products\n \"\"\"\n if channel not in {\"HH\", \"HV\", \"VV\", \"VH\"}:\n raise ValueError(f\"invalid channel ID: {channel!r}\")\n\n if channel[0] != self._grp.referencePolarization[0]:\n raise ValueError(\n f\"polarimetric channel not available: {channel!r}\"\n )\n\n data = dict(units=\"s\")\n\n if channel == \"HH\":\n data[\"x\"] = (self._grp.rangeOffsetHH,)\n data[\"y\"] = (self._grp.rangeOffsetHH,)\n elif channel == \"HV\":\n data[\"x\"] = (self._grp.rangeOffsetHV,)\n data[\"y\"] = (self._grp.rangeOffsetHV,)\n elif channel == \"VH\":\n data[\"x\"] = (self._grp.rangeOffsetVH,)\n data[\"y\"] = (self._grp.rangeOffsetVH,)\n elif channel == \"VV\":\n data[\"x\"] = (self._grp.rangeOffsetVV,)\n data[\"y\"] = (self._grp.rangeOffsetVV,)\n\n return data\n\n def get_timing_calibration_constants(self) -> dict:\n try:\n return dict(\n x=self._grp.instrumentTimingCalibrationRange,\n y=self._grp.instrumentTimingCalibrationAzimuth,\n units=\"s\",\n )\n except AttributeError:\n # @COMPATIBILITY: with SETAP , v1.6\n warnings.warn(\n \"instrument timing calibration constants are not available \"\n \"in the NetCDF data component this product. \"\n \"Calibration constants have been added to the NetCDF \"\n \"component in SETAP v1.6 (ETAD-DLR-PS-0014 - \"\n '\"ETAD Product Format Specification\" Issue 1.5).'\n )\n return dict(x=0, y=0, units=\"s\")\n\n def _get_etad_param(\n self, name, set_auto_mask=False, transpose=False, meter=False\n ):\n assert (\n name in self._grp.variables\n ), f\"Parameter {name!r} is not allowed\"\n\n self._grp.set_auto_mask(set_auto_mask)\n\n # TODO: avoid double copies\n # TODO: decimation factor\n field = np.asarray(self._grp[name])\n if transpose:\n field = np.transpose(field)\n\n if meter:\n if name.endswith(\"Az\"):\n k = self._grp.averageZeroDopplerVelocity\n elif name.endswith(\"Rg\"):\n k = constants.c / 2\n else:\n # it is not a correction (azimuth, range, lats, lons, height)\n k = 1\n warnings.warn(\n f\"the {name} is not a correction: \"\n 'the \"meter\" parameter will be ignored'\n )\n field *= k\n\n return field\n\n def get_lat_lon_height(self, transpose=False):\n \"\"\"Return the latitude, longitude and height for each point.\n\n Data are returned as (3) matrices (lines x samples).\n Latitude and longitude are expressed in degrees, height is\n expressed in meters.\n \"\"\"\n lats = self._get_etad_param(\n \"lats\", transpose=transpose, meter=False, set_auto_mask=True\n )\n lons = self._get_etad_param(\n \"lons\", transpose=transpose, meter=False, set_auto_mask=True\n )\n h = self._get_etad_param(\n \"height\", transpose=transpose, meter=False, set_auto_mask=True\n )\n return lats, lons, h\n\n def _core_get_correction(\n self, prm_list, set_auto_mask=False, transpose=False, meter=False\n ):\n correction = {}\n for dim, field in prm_list.items():\n correction[dim] = self._get_etad_param(\n field,\n set_auto_mask=set_auto_mask,\n transpose=transpose,\n meter=meter,\n )\n\n correction[\"unit\"] = \"m\" if meter else \"s\"\n\n return correction\n\n def get_correction(\n self,\n name: CorrectionType = ECorrectionType.SUM,\n set_auto_mask=False,\n transpose=False,\n meter=False,\n direction=None,\n ):\n \"\"\"Retrieve the correction for the specified correction \"name\".\n\n Puts the results in a dict.\n\n Parameters\n ----------\n name : ECorrectionType or str\n the desired correction\n set_auto_mask : bool\n requested for netCDF4 to avoid retrieving a masked array\n transpose : bool\n requested to retrieve the correction in array following the\n numpy convention for dimensions (default: False)\n meter : bool\n transform the result in meters\n direction : str or None\n if set to \"x\" (for range) or \"y\" (for \"azimuth\") only extracts\n the specified correction component.\n By default (None) all available components are returned.\n\n Returns\n -------\n dict\n a dictionary containing the following items for the\n requested correction:\n\n :x: correction in range (if applicable)\n :y: correction in azimuth (if applicable)\n :unit: 'm' or 's'\n :name: name of the correction\n \"\"\"\n correction_type = ECorrectionType(name) # check values\n name = correction_type.value\n prm_list = _CORRECTION_NAMES_MAP[name]\n if direction is not None:\n prm_list = {direction: prm_list[direction]}\n correction = self._core_get_correction(\n prm_list,\n set_auto_mask=set_auto_mask,\n transpose=transpose,\n meter=meter,\n )\n correction[\"name\"] = name\n return correction\n\n def _get_geocoder(self):\n if self._geocoder is None:\n from .geometry import GridGeocoding\n\n azimuth, range_ = self.get_burst_grid()\n lats, lons, heights = self.get_lat_lon_height()\n self._geocoder = GridGeocoding(\n lats, lons, heights, xaxis=range_, yaxis=azimuth\n )\n return self._geocoder\n\n def radar_to_geodetic(self, tau, t, deg=True):\n \"\"\"Convert RADAR coordinates into geodetic coordinates.\n\n Compute the geodetic coordinates (lat, lon, h) corresponding to\n RADAR coordinates (tau, t), i.e. fast time (range time) and slow\n time (azimuth time expressed in seconds form the reference\n :data:`Sentinel1Etad.min_azimuth_time`)::\n\n (tau, t) -> (lat, lon, h)\n\n If ``deg`` is True the output ``lat`` and ``lon`` are expressed\n in degrees, otherwise in radians.\n\n The implementation is approximated and exploits pre-computed grids\n of latitude, longitude and height values.\n\n The method is not as accurate as solving the range-Doppler equations.\n\n .. seealso:: :class:`s1etad.geometry.GridGeocoding`.\n \"\"\"\n return self._get_geocoder().forward_geocode(tau, t, deg=deg)\n\n def geodetic_to_radar(self, lat, lon, h=0, deg=True):\n \"\"\"Convert geodetic coordinates into RADAR coordinates.\n\n Compute the RADAR coordinates (tau, t), i.e. fast time (range time)\n and slow time (azimuth time expressed in seconds form the reference\n :data:`Sentinel1Etad.min_azimuth_time`) corresponding to\n geodetic coordinates (lat, lon, h)::\n\n (lat, lon, h) -> (tau, t)\n\n If ``deg`` is True it is assumed that input ``lat`` and ``lon``\n are expressed in degrees, otherwise it is assumed that angles\n are expressed in radians.\n\n The implementation is approximated and exploits pre-computed grids\n of latitude, longitude and height values.\n\n The method is not as accurate as solving the range-Doppler equations.\n\n .. seealso:: :class:`s1etad.geometry.GridGeocoding`.\n \"\"\"\n return self._get_geocoder().backward_geocode(lat, lon, h, deg=deg)\n\n def radar_to_image(self, t, tau):\n \"\"\"Convert RADAR coordinates into image coordinates.\n\n Compute the image coordinates (line, sample) corresponding\n to RADAR coordinates (tau, t), i.e. fast time (range time) and\n slow time (azimuth time expressed in seconds form the reference\n :data:`Sentinel1Etad.min_azimuth_time`)::\n\n (tau, t) -> (line, sample)\n \"\"\"\n line = (t - self.sampling_start[\"y\"]) / self.sampling[\"y\"]\n sample = (tau - self.sampling_start[\"x\"]) / self.sampling[\"x\"]\n return line, sample\n\n def image_to_radar(self, line, sample):\n \"\"\"Convert image coordinates into RADAR coordinates.\n\n Compute the RADAR coordinates (tau, t), i.e. fast time (range time)\n and slow time (azimuth time expressed in seconds form the reference\n :data:`Sentinel1Etad.min_azimuth_time`) corresponding to\n image coordinates (line, sample)::\n\n (line, sample) -> (t, tau)\n \"\"\"\n t = self.sampling_start[\"y\"] + line * self.sampling[\"y\"]\n tau = self.sampling_start[\"x\"] + sample * self.sampling[\"x\"]\n return t, tau" } ]
from .product import Sentinel1Etad, Sentinel1EtadSwath, Sentinel1EtadBurst
13,447
# -*- coding: utf-8 -*- def _sentinel1_etad_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() plist = obj.s1_product_list() if isinstance(plist, str): plist = [plist] p.text(f"Number of Sentinel-1 slices: {len(plist)}") p.break_() with p.group(2, "Sentinel-1 products list:"): for name in plist: p.break_() p.text(name) p.break_() p.text(f"Number of swaths: {obj.number_of_swath}") p.break_() p.text("Swath list: {}".format(", ".join(obj.swath_list))) p.break_() with p.group(2, "Azimuth time:"): p.break_() p.text(f"min: {obj.min_azimuth_time}") p.break_() p.text(f"max: {obj.max_azimuth_time}") p.break_() with p.group(2, "Range time:"): p.break_() p.text(f"min: {obj.min_range_time}") p.break_() p.text(f"max: {obj.max_range_time}") p.break_() with p.group(2, "Grid sampling:"): for key, value in obj.grid_sampling.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Grid spacing:"): for key, value in obj.grid_spacing.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Processing settings:"): for key, value in obj.processing_setting().items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_swath_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Number of bursts: {obj.number_of_burst}") p.break_() p.text("Burst list: " + str(obj.burst_list)) p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_burst_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Burst index: {obj.burst_index}") p.break_() p.text(f"Shape: ({obj.lines}, {obj.samples})") p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _register_jupyter_formatters(): try: ipy = get_ipython() # noqa except NameError: return False else: formatter = ipy.display_formatter.formatters["text/plain"] formatter.for_type(Sentinel1Etad, _sentinel1_etad_repr_pretty_) formatter.for_type(
# -*- coding: utf-8 -*- def _sentinel1_etad_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() plist = obj.s1_product_list() if isinstance(plist, str): plist = [plist] p.text(f"Number of Sentinel-1 slices: {len(plist)}") p.break_() with p.group(2, "Sentinel-1 products list:"): for name in plist: p.break_() p.text(name) p.break_() p.text(f"Number of swaths: {obj.number_of_swath}") p.break_() p.text("Swath list: {}".format(", ".join(obj.swath_list))) p.break_() with p.group(2, "Azimuth time:"): p.break_() p.text(f"min: {obj.min_azimuth_time}") p.break_() p.text(f"max: {obj.max_azimuth_time}") p.break_() with p.group(2, "Range time:"): p.break_() p.text(f"min: {obj.min_range_time}") p.break_() p.text(f"max: {obj.max_range_time}") p.break_() with p.group(2, "Grid sampling:"): for key, value in obj.grid_sampling.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Grid spacing:"): for key, value in obj.grid_spacing.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Processing settings:"): for key, value in obj.processing_setting().items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_swath_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Number of bursts: {obj.number_of_burst}") p.break_() p.text("Burst list: " + str(obj.burst_list)) p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_burst_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Burst index: {obj.burst_index}") p.break_() p.text(f"Shape: ({obj.lines}, {obj.samples})") p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _register_jupyter_formatters(): try: ipy = get_ipython() # noqa except NameError: return False else: formatter = ipy.display_formatter.formatters["text/plain"] formatter.for_type(Sentinel1Etad, _sentinel1_etad_repr_pretty_) formatter.for_type(
Sentinel1EtadSwath, _sentinel1_etad_swath_repr_pretty_
1
2023-10-27 13:47:30+00:00
16k
ifrit98/storage-subnet
neurons/api.py
[ { "identifier": "protocol", "path": "storage/protocol.py", "snippet": "class Store(bt.Synapse):\nclass StoreUser(bt.Synapse):\nclass Challenge(bt.Synapse):\nclass Retrieve(bt.Synapse):\nclass RetrieveUser(bt.Synapse):\n def __str__(self):\n def __str__(self):\n def __str__(self):" }, { "identifier": "hash_data", "path": "storage/shared/ecc.py", "snippet": "def hash_data(data):\n \"\"\"\n Compute a SHA3-256 hash of the input data and return its integer representation.\n\n The function handles both byte-like and non-byte-like inputs by converting non-byte inputs to\n strings and then encoding to bytes before hashing.\n\n Parameters:\n - data (bytes | bytearray | object): Data to be hashed.\n\n Returns:\n - int: Integer representation of the SHA3-256 hash of the input data.\n\n Raises:\n - TypeError: If the hashing operation encounters an incompatible data type.\n \"\"\"\n if not isinstance(data, (bytes, bytearray)):\n data_str = str(data)\n data = data_str.encode()\n h = hashlib.sha3_256(data).hexdigest()\n return int(h, 16)" }, { "identifier": "get_current_block", "path": "storage/shared/subtensor.py", "snippet": "@ttl_cache(maxsize=1, ttl=12)\ndef get_current_block(subtensor) -> int:\n return subtensor.get_current_block()" }, { "identifier": "config", "path": "storage/validator/config.py", "snippet": "def config(cls):\n parser = argparse.ArgumentParser()\n bt.wallet.add_args(parser)\n bt.subtensor.add_args(parser)\n bt.logging.add_args(parser)\n bt.axon.add_args(parser)\n cls.add_args(parser)\n return bt.config(parser)" }, { "identifier": "check_config", "path": "storage/validator/config.py", "snippet": "def check_config(cls, config: \"bt.Config\"):\n r\"\"\"Checks/validates the config namespace object.\"\"\"\n bt.logging.check_config(config)\n\n if config.mock:\n config.wallet._mock = True\n\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n full_path = os.path.expanduser(\n \"{}/{}/{}/netuid{}/{}\".format(\n config.logging.logging_dir,\n config.wallet.name,\n config.wallet.hotkey,\n config.netuid,\n config.neuron.name,\n )\n )\n log_path = os.path.join(full_path, \"logs\", timestamp)\n\n config.neuron.full_path = os.path.expanduser(full_path)\n config.neuron.log_path = log_path\n\n if not os.path.exists(config.neuron.full_path):\n os.makedirs(config.neuron.full_path, exist_ok=True)\n if not os.path.exists(config.neuron.log_path):\n os.makedirs(config.neuron.log_path, exist_ok=True)\n\n if not config.neuron.dont_save_events:\n # Add custom event logger for the events.\n logger.level(\"EVENTS\", no=38, icon=\"📝\")\n logger.add(\n config.neuron.log_path + \"/\" + \"EVENTS.log\",\n rotation=config.neuron.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"EVENTS\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.neuron.log_path + \"/\" + \"INFO.log\",\n rotation=config.neuron.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"INFO\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.neuron.log_path + \"/\" + \"DEBUG.log\",\n rotation=config.neuron.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"DEBUG\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.neuron.log_path + \"/\" + \"TRACE.log\",\n rotation=config.neuron.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"TRACE\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n # Set miner stats and total storage save path\n config.neuron.miner_stats_path = os.path.expanduser(\n os.path.join(config.neuron.full_path + \"/\" + \"miner_stats.json\")\n )\n config.neuron.hash_map_path = os.path.expanduser(\n os.path.join(config.neuron.full_path + \"/\" + \"hash_map.json\")\n )\n config.neuron.total_storage_path = os.path.expanduser(\n os.path.join(config.neuron.full_path + \"/\" + \"total_storage.csv\")\n )\n\n if config.database.purge_challenges:\n bt.logging.warning(\n \"Purging all challenges from ALL miners! Waiting 60 sec in case this is unintentional...\"\n )\n bt.logging.warning(\n \"Please abort the process if you are not intending to purge all your challenge data!\"\n )\n time.sleep(60)\n\n bt.logging.info(f\"Loaded config in fullpath: {config.neuron.full_path}\")" }, { "identifier": "add_args", "path": "storage/validator/config.py", "snippet": "def add_args(cls, parser):\n # Netuid Arg\n parser.add_argument(\"--netuid\", type=int, help=\"Storage network netuid\", default=21)\n\n parser.add_argument(\n \"--neuron.name\",\n type=str,\n help=\"Trials for this miner go in miner.root / (wallet_cold - wallet_hot) / miner.name. \",\n default=\"core_storage_validator\",\n )\n parser.add_argument(\n \"--neuron.device\",\n type=str,\n help=\"Device to run the validator on.\",\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n )\n parser.add_argument(\n \"--neuron.curve\",\n default=\"P-256\",\n help=\"Curve for elliptic curve cryptography.\",\n choices=[\"P-256\"], # TODO: expand this list\n )\n parser.add_argument(\n \"--neuron.maxsize\",\n default=None, # Use lognormal random gaussian if None (2**16, # 64KB)\n type=int,\n help=\"Maximum size of random data to store.\",\n )\n parser.add_argument(\n \"--neuron.min_chunk_size\",\n default=256,\n type=int,\n help=\"Minimum chunk size of random data to challenge (bytes).\",\n )\n parser.add_argument(\n \"--neuron.override_chunk_size\",\n default=0,\n type=int,\n help=\"Override random chunk size to split data into for challenges.\",\n )\n parser.add_argument(\n \"--neuron.reward_mode\",\n default=\"sigmoid\",\n type=str,\n choices=[\"minmax\", \"sigmoid\"],\n help=\"Reward mode for the validator.\",\n )\n parser.add_argument(\n \"--neuron.store_redundancy\",\n type=int,\n default=4,\n help=\"Number of miners to store each piece of data on.\",\n )\n parser.add_argument(\n \"--neuron.store_step_length\",\n type=int,\n default=2,\n help=\"Number of steps before random store epoch is complete.\",\n )\n parser.add_argument(\n \"--neuron.store_sample_size\",\n type=int,\n default=10,\n help=\"Number of miners to store each piece of data on.\",\n )\n parser.add_argument(\n \"--neuron.challenge_sample_size\",\n type=int,\n default=10,\n help=\"Number of miners to challenge at a time. Target is ~90 miners per epoch.\",\n )\n parser.add_argument(\n \"--neuron.retrieve_step_length\",\n type=int,\n default=5,\n help=\"Number of steps before random retrieve epoch is complete.\",\n )\n parser.add_argument(\n \"--neuron.compute_stats_interval\",\n type=int,\n default=360,\n help=\"Number of steps before computing and logging all stats.\",\n )\n parser.add_argument(\n \"--neuron.monitor_step_length\",\n type=int,\n default=5,\n help=\"Number of steps before calling monitor for down uids.\",\n )\n parser.add_argument(\n \"--neuron.monitor_sample_size\",\n type=int,\n default=20,\n help=\"Number of miners to monitor each interval.\",\n )\n parser.add_argument(\n \"--neuron.max_failed_pings\",\n type=int,\n default=10,\n help=\"Number of failed periodic pings before a miner is considered offline.\",\n )\n parser.add_argument(\n \"--neuron.set_weights_epoch_length\",\n type=int,\n help=\"Blocks until the miner sets weights on chain\",\n default=200,\n )\n parser.add_argument(\n \"--neuron.disable_log_rewards\",\n action=\"store_true\",\n help=\"Disable all reward logging, suppresses reward functions and their values from being logged to wandb.\",\n default=False,\n )\n parser.add_argument(\n \"--neuron.subscription_logging_path\",\n type=str,\n help=\"The path to save subscription logs.\",\n default=\"subscription_logs.txt\",\n )\n parser.add_argument(\n \"--neuron.chunk_factor\",\n type=int,\n help=\"The chunk factor to divide data.\",\n default=4,\n )\n parser.add_argument(\n \"--neuron.num_concurrent_forwards\",\n type=int,\n help=\"The number of concurrent forwards running at any time.\",\n default=1,\n )\n parser.add_argument(\n \"--neuron.disable_set_weights\",\n action=\"store_true\",\n help=\"Disables setting weights.\",\n default=False,\n )\n parser.add_argument(\n \"--neuron.moving_average_alpha\",\n type=float,\n help=\"Moving average alpha parameter, how much to add of the new observation.\",\n default=0.05,\n )\n parser.add_argument(\n \"--neuron.semaphore_size\",\n type=int,\n help=\"How many async calls to limit concurrently.\",\n default=256,\n )\n parser.add_argument(\n \"--neuron.store_timeout\",\n type=float,\n help=\"Store data query timeout.\",\n default=60,\n )\n parser.add_argument(\n \"--neuron.challenge_timeout\",\n type=float,\n help=\"Challenge data query timeout.\",\n default=30,\n )\n parser.add_argument(\n \"--neuron.retrieve_timeout\",\n type=float,\n help=\"Retreive data query timeout.\",\n default=60,\n )\n parser.add_argument(\n \"--neuron.checkpoint_block_length\",\n type=int,\n help=\"Blocks before a checkpoint is saved.\",\n default=100,\n )\n parser.add_argument(\n \"--neuron.distribute_step_length\",\n type=int,\n help=\"Blocks before a distribute step is taken.\",\n default=10,\n )\n parser.add_argument(\n \"--neuron.blocks_per_step\",\n type=int,\n help=\"Blocks before a step is taken.\",\n default=3,\n )\n parser.add_argument(\n \"--neuron.events_retention_size\",\n type=str,\n help=\"Events retention size.\",\n default=\"2 GB\",\n )\n parser.add_argument(\n \"--neuron.dont_save_events\",\n action=\"store_true\",\n help=\"If set, we dont save events to a log file.\",\n default=False,\n )\n parser.add_argument(\n \"--neuron.vpermit_tao_limit\",\n type=int,\n help=\"The maximum number of TAO allowed to query a validator with a vpermit.\",\n default=500,\n )\n parser.add_argument(\n \"--neuron.verbose\",\n action=\"store_true\",\n help=\"If set, we will print verbose detailed logs.\",\n default=False,\n )\n parser.add_argument(\n \"--neuron.log_responses\",\n action=\"store_true\",\n help=\"If set, we will log responses. These can be LONG.\",\n default=False,\n )\n parser.add_argument(\n \"--neuron.data_ttl\",\n type=int,\n help=\"The number of blocks before data expires.\",\n default=50000, # 7 days\n )\n parser.add_argument(\n \"--neuron.profile\",\n action=\"store_true\",\n help=\"If set, we will profile the neuron network and I/O actions.\",\n default=False,\n )\n parser.add_argument(\n \"--neuron.debug_logging_path\",\n type=str,\n help=\"The path to save debug logs.\",\n default=\"debug_logs.txt\",\n )\n\n # Redis arguments\n parser.add_argument(\n \"--database.host\", default=\"localhost\", help=\"The host of the redis database.\"\n )\n parser.add_argument(\n \"--database.port\", default=6379, help=\"The port of the redis database.\"\n )\n parser.add_argument(\n \"--database.index\",\n default=1,\n help=\"The database number of the redis database.\",\n )\n parser.add_argument(\n \"--database.purge_challenges\",\n action=\"store_true\",\n help=\"If set, we will purge all challenges from ALL miners on start.\",\n default=False,\n )\n\n # Wandb args\n parser.add_argument(\n \"--wandb.off\", action=\"store_true\", help=\"Turn off wandb.\", default=False\n )\n parser.add_argument(\n \"--wandb.project_name\",\n type=str,\n help=\"The name of the project where you are sending the new run.\",\n default=\"philanthropic-thunder\",\n )\n parser.add_argument(\n \"--wandb.entity\",\n type=str,\n help=\"An entity is a username or team name where youre sending runs.\",\n default=\"philanthrope\",\n )\n parser.add_argument(\n \"--wandb.offline\",\n action=\"store_true\",\n help=\"Runs wandb in offline mode.\",\n default=False,\n )\n parser.add_argument(\n \"--wandb.weights_step_length\",\n type=int,\n help=\"How many steps before we log the weights.\",\n default=10,\n )\n parser.add_argument(\n \"--wandb.run_step_length\",\n type=int,\n help=\"How many steps before we rollover to a new run.\",\n default=1500,\n )\n parser.add_argument(\n \"--wandb.notes\",\n type=str,\n help=\"Notes to add to the wandb run.\",\n default=\"\",\n )\n\n # Mocks\n parser.add_argument(\n \"--mock\", action=\"store_true\", help=\"Mock all items.\", default=False\n )\n\n # API specific\n parser.add_argument(\n \"--api.store_timeout\",\n type=int,\n help=\"Store data query timeout.\",\n default=60,\n )\n parser.add_argument(\n \"--api.retrieve_timeout\",\n type=int,\n help=\"Retrieve data query timeout.\",\n default=60,\n )\n parser.add_argument(\n \"--api.ping_timeout\",\n type=int,\n help=\"Ping data query timeout.\",\n default=5,\n )\n parser.add_argument(\n \"--api.whitelisted_hotkeys\",\n nargs=\"+\",\n type=list,\n help=\"List of whitelisted hotkeys.\",\n default=[],\n )\n parser.add_argument(\n \"--api.debug\",\n action=\"store_true\",\n help=\"If set, we whitelist by default to test easily.\",\n )\n\n # Encryption wallet\n parser.add_argument(\n \"--encryption.wallet_name\",\n type=str,\n help=\"The name of the wallet to use for encryption.\",\n default=\"core_storage_coldkey\",\n )\n parser.add_argument(\n \"--encryption.wallet_hotkey\",\n type=str,\n help=\"The hotkey name of the wallet to use for encryption.\",\n default=\"core_storage_hotkey\",\n )\n parser.add_argument(\n \"--encryption.password\",\n type=str,\n help=\"The password of the wallet to use for encryption.\",\n default=\"dummy_password\",\n )" }, { "identifier": "should_checkpoint", "path": "storage/validator/state.py", "snippet": "def should_checkpoint(current_block, prev_step_block, checkpoint_block_length):\n # Check if enough epoch blocks have elapsed since the last checkpoint.\n return current_block - prev_step_block >= checkpoint_block_length" }, { "identifier": "encrypt_data", "path": "storage/validator/encryption.py", "snippet": "NACL_SALT = b\"\\x13q\\x83\\xdf\\xf1Z\\t\\xbc\\x9c\\x90\\xb5Q\\x879\\xe9\\xb1\"\ndef encrypt_aes(filename: typing.Union[bytes, str], key: bytes) -> bytes:\ndef decrypt_aes(cipher_text: bytes, key: bytes, nonce: bytes, tag: bytes) -> bytes:\ndef encrypt_data_with_wallet(data: bytes, wallet) -> bytes:\ndef decrypt_data_with_coldkey_private_key(\n encrypted_data: bytes, private_key: typing.Union[str, bytes]\n) -> bytes:\ndef decrypt_data_with_wallet(encrypted_data: bytes, wallet) -> bytes:\ndef encrypt_data_with_aes_and_serialize(\n data: bytes, wallet: bt.wallet\n) -> typing.Tuple[bytes, bytes]:\ndef decrypt_data_and_deserialize(\n encrypted_data: bytes, encryption_payload: bytes, wallet: bt.wallet\n) -> bytes:\ndef decrypt_data_and_deserialize_with_coldkey_private_key(\n encrypted_data: bytes,\n encryption_payload: bytes,\n private_key: typing.Union[str, bytes],\n) -> bytes:\ndef serialize_nacl_encrypted_message(encrypted_message: EncryptedMessage) -> str:\ndef deserialize_nacl_encrypted_message(serialized_data: str) -> EncryptedMessage:\ndef setup_encryption_wallet(\n wallet_name=\"encryption\",\n wallet_hotkey=\"encryption\",\n password=\"dummy_password\",\n n_words=12,\n use_encryption=False,\n overwrite=False,\n):" }, { "identifier": "store_broadband", "path": "storage/validator/store.py", "snippet": "async def store_broadband(\n self,\n encrypted_data,\n encryption_payload,\n R=3,\n k=10,\n data_hash=None,\n exclude_uids=None,\n):\n \"\"\"\n Asynchronously stores encrypted data across a distributed network by splitting it into chunks and\n assigning these chunks to various miners for storage. This method ensures redundancy and efficient\n data distribution while handling network requests concurrently.\n\n The process includes chunking the data, selecting miners for storage, and verifying the integrity\n of stored data through response validation.\n\n Parameters:\n encrypted_data (bytes): The encrypted data to be stored across the network.\n encryption_payload (dict): Additional payload information required for encryption.\n R (int, optional): The redundancy factor, denoting how many times each chunk is replicated. Default is 3.\n k (int, optional): The number of miners to query for each chunk. Default is 10.\n data_hash (str, optional): The hash of the data to be stored. If not provided, compute it. Default is None.\n exclude_uids: (list of int, optional): A list of UIDs to exclude from the storage process. Default is None.\n\n Returns:\n str: The hash of the full data, representing its unique identifier in the network.\n\n Raises:\n Exception: If the process of creating initial distributions fails after multiple retries.\n\n Note:\n - Uses a semaphore to limit the number of concurrent network requests.\n - Employs a retry mechanism for handling network and miner availability issues.\n - Logs various stages of the process for debugging and monitoring purposes.\n \"\"\"\n if self.config.neuron.profile:\n # Create a profiler instance\n profiler = Profiler()\n profiler.start()\n\n semaphore = asyncio.Semaphore(self.config.neuron.semaphore_size)\n\n async def store_chunk_group(chunk_hash, chunk, uids):\n event = EventSchema(\n task_name=\"Store\",\n successful=[],\n completion_times=[],\n task_status_messages=[],\n task_status_codes=[],\n block=self.subtensor.get_current_block(),\n uids=[],\n step_length=0.0,\n best_uid=\"\",\n best_hotkey=\"\",\n rewards=[],\n moving_averaged_scores=[],\n )\n\n g, h = setup_CRS(curve=self.config.neuron.curve)\n\n bt.logging.debug(f\"type(chunk): {type(chunk)}\")\n bt.logging.debug(f\"chunk: {chunk[:100]}\")\n chunk = chunk.encode(\"utf-8\") if isinstance(chunk, str) else chunk\n b64_encoded_chunk = await asyncio.to_thread(base64.b64encode, chunk)\n b64_encoded_chunk = b64_encoded_chunk.decode(\"utf-8\")\n bt.logging.debug(f\"b64_encoded_chunk: {b64_encoded_chunk[:100]}\")\n random_seed = get_random_bytes(32).hex()\n\n synapse = protocol.Store(\n encrypted_data=b64_encoded_chunk,\n curve=self.config.neuron.curve,\n g=ecc_point_to_hex(g),\n h=ecc_point_to_hex(h),\n seed=random_seed,\n )\n\n uids = [\n uid\n for uid in uids\n if not await hotkey_at_capacity(self.metagraph.hotkeys[uid], self.database)\n ]\n\n axons = [self.metagraph.axons[uid] for uid in uids]\n responses = await self.dendrite(\n axons,\n synapse,\n deserialize=False,\n timeout=self.config.neuron.store_timeout,\n )\n\n # Compute the rewards for the responses given proc time.\n rewards: torch.FloatTensor = torch.zeros(\n len(responses), dtype=torch.float32\n ).to(self.device)\n\n async def success(hotkey, idx, uid, response):\n bt.logging.debug(f\"Stored data in database with key: {hotkey}\")\n\n failed_uids = []\n\n def failure(uid):\n failed_uids.append(uid)\n\n await create_reward_vector(\n self, synapse, rewards, uids, responses, event, success, failure\n )\n event.rewards.extend(rewards.tolist())\n\n apply_reward_scores(\n self,\n uids,\n responses,\n rewards,\n timeout=self.config.neuron.store_timeout,\n mode=self.config.neuron.reward_mode,\n )\n\n bt.logging.debug(f\"Updated reward scores: {rewards.tolist()}\")\n\n # Determine the best UID based on rewards\n if event.rewards:\n best_index = max(range(len(event.rewards)), key=event.rewards.__getitem__)\n event.best_uid = event.uids[best_index]\n event.best_hotkey = self.metagraph.hotkeys[event.best_uid]\n\n chunk_size = sys.getsizeof(chunk) # chunk size in bytes\n bt.logging.debug(f\"chunk size: {chunk_size}\")\n\n await store_chunk_metadata(\n full_hash,\n chunk_hash,\n [self.metagraph.hotkeys[uid] for uid in uids],\n chunk_size, # this should be len(chunk) but we need to fix the chunking\n self.database,\n )\n\n return responses, b64_encoded_chunk, random_seed\n\n async def handle_uid_operations(\n uid, response, b64_encoded_chunk, random_seed, chunk_hash, chunk_size\n ):\n ss = time.time()\n start = time.time()\n\n # Offload the CPU-intensive verification to a separate thread\n verified = await asyncio.to_thread(\n verify_store_with_seed, response, b64_encoded_chunk, random_seed\n )\n\n end = time.time()\n bt.logging.debug(f\"verify_store_with_seed time for uid {uid} : {end-start}\")\n if verified:\n # Prepare storage for the data for particular miner\n response_storage = {\n \"prev_seed\": response.seed,\n \"size\": chunk_size,\n \"encryption_payload\": encryption_payload,\n }\n start = time.time()\n # Store in the database according to the data hash and the miner hotkey\n await add_metadata_to_hotkey(\n self.metagraph.hotkeys[uid],\n chunk_hash,\n response_storage, # seed + size + encryption keys\n self.database,\n )\n end = time.time()\n bt.logging.debug(\n f\"Stored data in database for uid: {uid} | {str(chunk_hash)}\"\n )\n else:\n bt.logging.error(f\"Failed to verify store commitment from UID: {uid}\")\n\n # Update the storage statistics\n await update_statistics(\n ss58_address=self.metagraph.hotkeys[uid],\n success=verified,\n task_type=\"store\",\n database=self.database,\n )\n bt.logging.debug(f\"handle_uid_operations time for uid {uid} : {time.time()-ss}\")\n\n return {\"chunk_hash\": chunk_hash, \"uid\": uid, \"verified\": verified}\n\n async def semaphore_query_miners(distributions):\n tasks = []\n async with semaphore:\n for i, dist in enumerate(distributions):\n bt.logging.trace(\n f\"Start index: {dist['start_idx']}, End index: {dist['end_idx']}\"\n )\n chunk = encrypted_data[dist[\"start_idx\"] : dist[\"end_idx\"]]\n bt.logging.trace(f\"chunk: {chunk[:12]}\")\n dist[\"chunk_hash\"] = hash_data(chunk)\n bt.logging.debug(\n f\"Chunk {i} | uid distribution: {dist['uids']} | size: {dist['chunk_size']}\"\n )\n\n # Create an asyncio task for each chunk processing\n task = asyncio.create_task(\n store_chunk_group(dist[\"chunk_hash\"], chunk, dist[\"uids\"])\n )\n tasks.append(task)\n\n bt.logging.debug(f\"gathering broadband tasks: {pformat(tasks)}\")\n results = await asyncio.gather(*tasks)\n bt.logging.debug(f\"store_chunk_group() results: {pformat(results)}\")\n # Grab the responses and relevant data necessary for verify from the results\n for i, result_group in enumerate(results):\n responses, b64_encoded_chunk, random_seed = result_group\n bt.logging.debug(f\"-- responses_nested: {pformat(responses)}\")\n bt.logging.debug(f\"-- b64_encoded_chunk: {b64_encoded_chunk[:100]}\")\n bt.logging.debug(f\"-- random_seed: {random_seed}\")\n\n # Update the distributions with respones\n distributions[i][\"responses\"] = responses\n distributions[i][\"b64_encoded_chunk\"] = b64_encoded_chunk\n distributions[i][\"random_seed\"] = random_seed\n\n return distributions\n\n async def semaphore_query_uid_operations(distributions):\n tasks = []\n for dist in distributions:\n chunk_hash = dist[\"chunk_hash\"]\n chunk_size = dist[\"chunk_size\"]\n random_seed = dist[\"random_seed\"]\n b64_encoded_chunk = dist[\"b64_encoded_chunk\"]\n for uid, response in zip(dist[\"uids\"], dist[\"responses\"]):\n task = asyncio.create_task(\n handle_uid_operations(\n uid,\n response,\n b64_encoded_chunk,\n random_seed,\n chunk_hash,\n chunk_size,\n )\n )\n tasks.append(task)\n uid_verified_dict_list = await asyncio.gather(*tasks)\n return uid_verified_dict_list\n\n async def create_initial_distributions(encrypted_data, R, k):\n dist_gen = compute_chunk_distribution_mut_exclusive_numpy_reuse_uids(\n self,\n data_size=sys.getsizeof(encrypted_data),\n R=R,\n k=k,\n exclude=exclude_uids,\n )\n # Ping first to see if we need to reroll instead of waiting for the timeout\n distributions = [dist async for dist in dist_gen]\n distributions = await compute_and_ping_chunks(self, distributions)\n return distributions\n\n bt.logging.debug(f\"store_broadband() {encrypted_data[:100]}\")\n\n full_hash = data_hash or hash_data(encrypted_data)\n bt.logging.debug(f\"full hash: {full_hash}\")\n\n # Check and see if hash already exists, reject if so.\n if await get_ordered_metadata(full_hash, self.database):\n bt.logging.warning(f\"Hash {full_hash} already exists on the network.\")\n return full_hash\n\n exclude_uids = copy.deepcopy(exclude_uids)\n\n full_size = sys.getsizeof(encrypted_data)\n bt.logging.debug(f\"full size: {full_size}\")\n\n # Sometimes this can fail, try/catch and retry for starters...\n # Compute the chunk distribution\n retries = 0\n while retries < 3:\n try:\n distributions = await create_initial_distributions(encrypted_data, R, k)\n break\n except websocket._exceptions.WebSocketConnectionClosedException:\n bt.logging.warning(f\"Failed to create initial distributions, retrying...\")\n retries += 1\n except Exception as e:\n bt.logging.warning(\n f\"Failed to create initial distributions: {e}, retrying...\"\n )\n retries += 1\n\n bt.logging.trace(f\"computed distributions: {pformat(distributions)}\")\n\n chunk_hashes = []\n retry_dists = [None] # sentinel for first iteration\n retries = 0\n while len(distributions) > 0 and retries < 3:\n async with semaphore:\n # Store on the network: query miners for each chunk\n # Updated distributions now contain responses from the network\n updated_distributions = await semaphore_query_miners(distributions)\n # Verify the responses and store the metadata for each verified response\n verifications = await semaphore_query_uid_operations(updated_distributions)\n if (\n chunk_hashes == []\n ): # First time only. Grab all hashes in order after processed.\n chunk_hashes.extend(\n [dist[\"chunk_hash\"] for dist in updated_distributions]\n )\n\n # Process verification results and reroll failed distributions in a single loop\n distributions = (\n []\n ) # reset original distributions to populate for next round\n for i, dist in enumerate(updated_distributions):\n # Get verification status for the current distribution\n bt.logging.trace(f\"verifications: {pformat(verifications)}\")\n\n # Check if any UID in the distribution failed verification\n if any(not v[\"verified\"] for v in verifications):\n # Extract failed UIDs\n failed_uids = [v[\"uid\"] for v in verifications if not v[\"verified\"]]\n bt.logging.trace(f\"failed uids: {pformat(failed_uids)}\")\n # Reroll distribution with failed UIDs\n rerolled_dist = await reroll_distribution(self, dist, failed_uids)\n bt.logging.trace(f\"rerolled uids: {pformat(rerolled_dist['uids'])}\")\n # Replace the original distribution with the rerolled one\n distributions.append(rerolled_dist)\n\n retries += 1\n\n # Update the chunk hash mapping for this entire file\n # TODO: change this to store_file_chunk_mapping_ordered\n # to append rather than overwrite\n await store_file_chunk_mapping_ordered(\n full_hash=full_hash,\n chunk_hashes=chunk_hashes,\n chunk_indices=list(range(len(chunk_hashes))),\n encryption_payload=encryption_payload,\n database=self.database,\n )\n\n if self.config.neuron.profile:\n # Stop the profiler\n profiler.stop()\n # Print the results\n print(profiler.output_text(unicode=True, color=True))\n\n return full_hash" }, { "identifier": "retrieve_broadband", "path": "storage/validator/retrieve.py", "snippet": "async def retrieve_broadband(self, full_hash: str):\n \"\"\"\n Asynchronously retrieves and verifies data from the network based on a given hash, ensuring\n the integrity and correctness of the data. This method orchestrates the retrieval process across\n multiple miners, reconstructs the data from chunks, and verifies its integrity.\n\n Parameters:\n full_hash (str): The hash of the data to be retrieved, representing its unique identifier on the network.\n\n Returns:\n tuple: A tuple containing the reconstructed data and its associated encryption payload.\n\n Raises:\n Exception: If no metadata is found for the given hash or if there are issues during the retrieval process.\n\n Note:\n - This function is a critical component of data retrieval in a distributed storage system.\n - It handles concurrent requests to multiple miners and assembles the data chunks based on\n ordered metadata.\n - In case of discrepancies in data size, the function logs a warning for potential data integrity issues.\n \"\"\"\n semaphore = asyncio.Semaphore(self.config.neuron.semaphore_size)\n\n async def retrieve_chunk_group(chunk_hash, uids):\n event = EventSchema(\n task_name=\"Store\",\n successful=[],\n completion_times=[],\n task_status_messages=[],\n task_status_codes=[],\n block=self.subtensor.get_current_block(),\n uids=[],\n step_length=0.0,\n best_uid=\"\",\n best_hotkey=\"\",\n rewards=[],\n moving_averaged_scores=[],\n )\n\n synapse = protocol.Retrieve(\n data_hash=chunk_hash,\n seed=get_random_bytes(32).hex(),\n )\n\n axons = [self.metagraph.axons[uid] for uid in uids]\n responses = await self.dendrite(\n axons,\n synapse,\n deserialize=False,\n timeout=self.config.api.retrieve_timeout,\n )\n\n # Compute the rewards for the responses given proc time.\n rewards: torch.FloatTensor = torch.zeros(\n len(responses), dtype=torch.float32\n ).to(self.device)\n\n async def success(hotkey, idx, uid, response):\n bt.logging.debug(f\"Stored data in database with key: {hotkey}\")\n\n failed_uids = []\n\n def failure(uid):\n failed_uids.append(uid)\n\n await create_reward_vector(\n self, synapse, rewards, uids, responses, event, success, failure\n )\n event.rewards.extend(rewards.tolist())\n bt.logging.debug(f\"Updated reward scores: {rewards.tolist()}\")\n\n apply_reward_scores(\n self,\n uids,\n responses,\n rewards,\n timeout=self.config.neuron.retrieve_timeout,\n mode=self.config.neuron.reward_mode,\n )\n\n # Determine the best UID based on rewards\n if event.rewards:\n best_index = max(range(len(event.rewards)), key=event.rewards.__getitem__)\n event.best_uid = event.uids[best_index]\n event.best_hotkey = self.metagraph.hotkeys[event.best_uid]\n\n return responses, synapse.seed\n\n # Get the chunks you need to reconstruct IN order\n ordered_metadata = await get_ordered_metadata(full_hash, self.database)\n if ordered_metadata == []:\n bt.logging.error(f\"No metadata found for full hash: {full_hash}\")\n return None\n\n # Get the hotkeys/uids to query\n tasks = []\n total_size = 0\n bt.logging.debug(f\"ordered metadata: {pformat(ordered_metadata)}\")\n # TODO: change this to use retrieve_mutually_exclusive_hotkeys_full_hash\n # to avoid possibly double querying miners for greater retrieval efficiency\n\n async with semaphore:\n for chunk_metadata in ordered_metadata:\n bt.logging.debug(f\"chunk metadata: {chunk_metadata}\")\n\n # Ensure still registered before trying to retrieve\n uids = [\n self.metagraph.hotkeys.index(hotkey)\n for hotkey in chunk_metadata[\"hotkeys\"]\n if hotkey in self.metagraph.hotkeys\n ]\n\n # Don't waste time waiting on UIDs that are nonresponsive anyway\n uids, _ = await ping_uids(self, uids=uids)\n total_size += chunk_metadata[\"size\"]\n tasks.append(\n asyncio.create_task(\n retrieve_chunk_group(chunk_metadata[\"chunk_hash\"], uids)\n )\n )\n responses = await asyncio.gather(*tasks)\n\n chunks = {}\n # TODO: make these asyncio tasks and use .to_thread() to avoid blocking\n for i, (response_group, seed) in enumerate(responses):\n for response in response_group:\n if response.dendrite.status_code != 200:\n bt.logging.debug(f\"failed response: {response.dendrite.dict()}\")\n continue\n verified = verify_retrieve_with_seed(response, seed)\n if verified:\n # Add to final chunks dict\n if i not in list(chunks.keys()):\n bt.logging.debug(\n f\"Adding chunk {i} to chunks, size: {sys.getsizeof(response.data)}\"\n )\n chunks[i] = base64.b64decode(response.data)\n bt.logging.debug(f\"chunk {i} | {chunks[i][:10]}\")\n else:\n uid = self.metagraph.hotkeys.index(response.axon.hotkey)\n bt.logging.error(\n f\"Failed to verify store commitment from UID: {uid}\"\n )\n\n bt.logging.trace(f\"chunks after: {[chunk[:12] for chunk in chunks.values()]}\")\n bt.logging.trace(f\"len(chunks) after: {[len(chunk) for chunk in chunks.values()]}\")\n\n # Reconstruct the data\n encrypted_data = b\"\".join(chunks.values())\n bt.logging.trace(f\"retrieved data: {encrypted_data[:12]}\")\n\n # Retrieve user encryption payload (if exists)\n encryption_payload = await retrieve_encryption_payload(full_hash, self.database)\n bt.logging.debug(f\"retrieved encryption_payload: {encryption_payload}\")\n\n return encrypted_data, encryption_payload" }, { "identifier": "reroll_distribution", "path": "storage/validator/network.py", "snippet": "async def reroll_distribution(self, distribution, failed_uids):\n \"\"\"\n Asynchronously rerolls a single data chunk distribution by replacing failed miner UIDs with new, available ones.\n This is part of the error handling process in data distribution to ensure that each chunk is reliably stored.\n\n Parameters:\n distribution (dict): The original chunk distribution dictionary, containing chunk information and miner UIDs.\n failed_uids (list of int): List of UIDs that failed in the original distribution and need replacement.\n\n Returns:\n dict: The updated chunk distribution with new miner UIDs replacing the failed ones.\n\n Note:\n - This function is typically used when certain miners are unresponsive or unable to store the chunk.\n - Ensures that each chunk has the required number of active miners for redundancy.\n \"\"\"\n # Get new UIDs to replace the failed ones\n new_uids = await get_available_query_miners(\n self, k=len(failed_uids), exclude=failed_uids\n )\n distribution[\"uids\"] = new_uids\n return distribution" }, { "identifier": "compute_and_ping_chunks", "path": "storage/validator/network.py", "snippet": "async def compute_and_ping_chunks(self, distributions):\n \"\"\"\n Asynchronously evaluates the availability of miners for the given chunk distributions by pinging them.\n Rerolls the distribution to replace failed miners, ensuring exactly k successful miners are selected.\n\n Parameters:\n distributions (list of dicts): A list of chunk distribution dictionaries, each containing\n information about chunk indices and assigned miner UIDs.\n\n Returns:\n list of dicts: The updated list of chunk distributions with exactly k successful miner UIDs.\n\n Note:\n - This function is crucial for ensuring that data chunks are assigned to available and responsive miners.\n - Pings miners based on their UIDs and updates the distributions accordingly.\n - Logs the new set of UIDs and distributions for traceability.\n \"\"\"\n max_retries = 3 # Define the maximum number of retries\n target_number_of_uids = len(\n distributions[0][\"uids\"]\n ) # Assuming k is the length of the uids in the first distribution\n\n for dist in distributions:\n retries = 0\n successful_uids = set()\n\n while len(successful_uids) < target_number_of_uids and retries < max_retries:\n # Ping all UIDs\n current_successful_uids, _ = await ping_uids(self, dist[\"uids\"])\n successful_uids.update(current_successful_uids)\n\n # If enough UIDs are successful, select the first k items\n if len(successful_uids) >= target_number_of_uids:\n dist[\"uids\"] = tuple(sorted(successful_uids)[:target_number_of_uids])\n break\n\n # Reroll for k UIDs excluding the successful ones\n new_uids = await get_available_query_miners(\n self, k=target_number_of_uids, exclude=successful_uids\n )\n bt.logging.trace(\"compute_and_ping_chunks() new uids:\", new_uids)\n\n # Update the distribution with new UIDs\n dist[\"uids\"] = tuple(new_uids)\n retries += 1\n\n # Log if the maximum retries are reached without enough successful UIDs\n if len(successful_uids) < target_number_of_uids:\n bt.logging.warning(\n f\"compute_and_ping_chunks(): Insufficient successful UIDs for distribution: {dist}\"\n )\n\n # Continue with your logic using the updated distributions\n bt.logging.trace(\"new distributions:\", distributions)\n return distributions" }, { "identifier": "ping_uids", "path": "storage/validator/network.py", "snippet": "async def ping_uids(self, uids):\n \"\"\"\n Ping a list of UIDs to check their availability.\n Returns a tuple with a list of successful UIDs and a list of failed UIDs.\n \"\"\"\n axons = [self.metagraph.axons[uid] for uid in uids]\n try:\n responses = await self.dendrite(\n axons,\n bt.Synapse(),\n deserialize=False,\n timeout=self.config.api.ping_timeout,\n )\n successful_uids = [\n uid\n for uid, response in zip(uids, responses)\n if response.dendrite.status_code == 200\n ]\n failed_uids = [\n uid\n for uid, response in zip(uids, responses)\n if response.dendrite.status_code != 200\n ]\n except Exception as e:\n bt.logging.error(f\"Dendrite ping failed: {e}\")\n successful_uids = []\n failed_uids = uids\n bt.logging.debug(\"ping() successful uids:\", successful_uids)\n bt.logging.debug(\"ping() failed uids :\", failed_uids)\n return successful_uids, failed_uids" }, { "identifier": "retrieve_encryption_payload", "path": "storage/validator/database.py", "snippet": "async def retrieve_encryption_payload(\n full_hash: str,\n database: aioredis.Redis,\n return_dict: bool = False,\n) -> Optional[Union[bytes, dict]]:\n \"\"\"\n Retrieve the encryption payload for a file.\n\n This function fetches the encryption payload for a file from the Redis database.\n\n Parameters:\n - full_hash (str): The full hash of the file.\n - database (aioredis.Redis): An instance of the Redis database.\n\n Returns:\n - Optional[Union[bytes, dict]]: The encryption payload for the file.\n \"\"\"\n encryption_payload = await database.get(f\"payload:{full_hash}\")\n if encryption_payload:\n if return_dict:\n return encryption_payload\n try:\n return json.loads(encryption_payload)\n except json.JSONDecodeError:\n return encryption_payload\n else:\n return None" }, { "identifier": "decrypt_data_with_private_key", "path": "storage/validator/encryption.py", "snippet": "NACL_SALT = b\"\\x13q\\x83\\xdf\\xf1Z\\t\\xbc\\x9c\\x90\\xb5Q\\x879\\xe9\\xb1\"\ndef encrypt_aes(filename: typing.Union[bytes, str], key: bytes) -> bytes:\ndef decrypt_aes(cipher_text: bytes, key: bytes, nonce: bytes, tag: bytes) -> bytes:\ndef encrypt_data_with_wallet(data: bytes, wallet) -> bytes:\ndef decrypt_data_with_coldkey_private_key(\n encrypted_data: bytes, private_key: typing.Union[str, bytes]\n) -> bytes:\ndef decrypt_data_with_wallet(encrypted_data: bytes, wallet) -> bytes:\ndef encrypt_data_with_aes_and_serialize(\n data: bytes, wallet: bt.wallet\n) -> typing.Tuple[bytes, bytes]:\ndef decrypt_data_and_deserialize(\n encrypted_data: bytes, encryption_payload: bytes, wallet: bt.wallet\n) -> bytes:\ndef decrypt_data_and_deserialize_with_coldkey_private_key(\n encrypted_data: bytes,\n encryption_payload: bytes,\n private_key: typing.Union[str, bytes],\n) -> bytes:\ndef serialize_nacl_encrypted_message(encrypted_message: EncryptedMessage) -> str:\ndef deserialize_nacl_encrypted_message(serialized_data: str) -> EncryptedMessage:\ndef setup_encryption_wallet(\n wallet_name=\"encryption\",\n wallet_hotkey=\"encryption\",\n password=\"dummy_password\",\n n_words=12,\n use_encryption=False,\n overwrite=False,\n):" } ]
import os import sys import copy import json import time import torch import base64 import typing import asyncio import aioredis import traceback import websocket import bittensor as bt import threading from storage import protocol from storage.shared.ecc import hash_data from storage.shared.subtensor import get_current_block from storage.validator.config import config, check_config, add_args from storage.validator.state import should_checkpoint from storage.validator.encryption import encrypt_data, setup_encryption_wallet from storage.validator.store import store_broadband from storage.validator.retrieve import retrieve_broadband from storage.validator.network import ( reroll_distribution, compute_and_ping_chunks, ping_uids, ) from storage.validator.database import retrieve_encryption_payload from storage.validator.encryption import decrypt_data_with_private_key
11,046
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. def MockDendrite(): pass class neuron: """ API node for storage network Attributes: subtensor (bt.subtensor): The interface to the Bittensor network's blockchain. wallet (bt.wallet): Cryptographic wallet containing keys for transactions and encryption. metagraph (bt.metagraph): Graph structure storing the state of the network. database (redis.StrictRedis): Database instance for storing metadata and proofs. """ @classmethod def check_config(cls, config: "bt.Config"): check_config(cls, config) @classmethod
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. def MockDendrite(): pass class neuron: """ API node for storage network Attributes: subtensor (bt.subtensor): The interface to the Bittensor network's blockchain. wallet (bt.wallet): Cryptographic wallet containing keys for transactions and encryption. metagraph (bt.metagraph): Graph structure storing the state of the network. database (redis.StrictRedis): Database instance for storing metadata and proofs. """ @classmethod def check_config(cls, config: "bt.Config"): check_config(cls, config) @classmethod
def add_args(cls, parser):
5
2023-10-26 18:54:47+00:00
16k
Eclectic-Sheep/sheeprlhf
sheeprlhf/task/train/ppo.py
[ { "identifier": "PPOAgent", "path": "sheeprlhf/agent/ppo.py", "snippet": "class PPOAgent:\n \"\"\"Agent model for PPO training.\"\"\"\n\n _reference: ActorModel\n _reward: RewardModel\n _finetune_mode: FINETUNE_MODE\n _actor: Optional[ActorModel] = None\n _critic: Optional[CriticModel] = None\n _same_actor_critic: bool = False\n _share_actor_critic: bool = False\n _share_critic_reward: bool = False\n\n _sft_checkpoint_path: str\n _sft_model_cfg: ModelConfig\n _rm_checkpoint_path: str\n _rm_model_cfg: ModelConfig\n\n _lora_enabled: bool\n _init_critic_with_reward: bool\n\n def __init__(self, model_cfg: ModelConfig, task_cfg: PPOConfig) -> None:\n self.model_cfg = model_cfg\n self._init_critic_with_reward = task_cfg.init_critic_with_reward\n\n self._sft_model_cfg, self._sft_checkpoint_path = get_model_checkpoint(\n task_cfg.sft_experiment_dir, task_cfg.sft_model_name\n )\n sft_model_name = self._sft_model_cfg.repo_name\n\n self._rm_model_cfg, self._rm_checkpoint_path = get_model_checkpoint(\n task_cfg.rm_experiment_dir, task_cfg.sft_model_name\n )\n rm_model_name = self._rm_model_cfg.repo_name\n\n self._reference = ActorModel(model_cfg=self._sft_model_cfg)\n self._reward = RewardModel(model_cfg=self._rm_model_cfg)\n\n self._same_actor_critic = sft_model_name == rm_model_name\n self._finetune_mode = model_cfg.finetune_mode\n self._lora_enabled = self._finetune_mode == FINETUNE_MODE.LORA\n if not self._init_critic_with_reward:\n if not (self._lora_enabled and self._same_actor_critic):\n # Actor and critic cannot be shared, we fallback to the default behavior\n self._actor = ActorModel(model_cfg=self._sft_model_cfg)\n self._critic = CriticModel(model_cfg=self._sft_model_cfg)\n else:\n self._share_actor_critic = True\n\n else:\n if not self._lora_enabled:\n self._actor = ActorModel(model_cfg=self._sft_model_cfg)\n self._critic = CriticModel(model_cfg=self._rm_model_cfg)\n else:\n self._share_critic_reward = True\n\n def load_checkpoint(self, device: torch.device) -> None:\n \"\"\"Load checkpoints for Actor, Critic and Reward models.\"\"\"\n self._reference.load_checkpoint(\n path=self._sft_checkpoint_path, device=device, model_cfg=self._sft_model_cfg, freeze=True\n )\n self._reward.load_checkpoint(\n path=self._rm_checkpoint_path, device=device, model_cfg=self._rm_model_cfg, freeze=True\n )\n if not self._init_critic_with_reward:\n if not (self._lora_enabled and self._same_actor_critic):\n # Actor and critic cannot be shared, we fallback to the default behavior\n self._actor.load_checkpoint(\n path=self._sft_checkpoint_path, device=device, model_cfg=self._sft_model_cfg, freeze=True\n )\n self._critic.load_checkpoint(\n path=self._sft_checkpoint_path, device=device, model_cfg=self._sft_model_cfg, freeze=True\n )\n else:\n if not self._lora_enabled:\n self._critic.load_checkpoint(\n path=self._rm_checkpoint_path, device=device, model_cfg=self._rm_model_cfg, freeze=True\n )\n self._actor.load_checkpoint(\n path=self._sft_checkpoint_path, device=device, model_cfg=self._sft_model_cfg, freeze=True\n )\n\n def setup_finetuning(self, model_cfg: Optional[ModelConfig] = None) -> None:\n \"\"\"Setup finetuning for Actor, Critic and Reward models.\"\"\"\n if model_cfg is None:\n model_cfg = self.model_cfg\n lora_cfg = self.model_cfg.lora_cfg\n if not self._init_critic_with_reward:\n if self._lora_enabled and self._same_actor_critic:\n # here we can share reference model between Actor and Critic\n add_multiple_lora(self._reference, lora_cfg=lora_cfg, num=2)\n else:\n # Actor and critic cannot be shared, we fallback to the default behavior\n self._actor.setup_finetuning(model_cfg=model_cfg)\n self._critic.setup_finetuning(model_cfg=model_cfg)\n else:\n if self._lora_enabled:\n add_lora(self._reward, lora_cfg=lora_cfg)\n add_lora(self._reference, lora_cfg=lora_cfg)\n else:\n self._critic.setup_finetuning(model_cfg=model_cfg)\n self._actor.setup_finetuning(model_cfg=model_cfg)\n trainable_parameter_summary(self.actor, show_names=False, tag=\"Actor\")\n trainable_parameter_summary(self.critic, show_names=False, tag=\"Critic\")\n\n @property\n def share_actor_critic(self) -> bool:\n \"\"\"Whether Actor and Critic models are shared.\"\"\"\n return self._share_actor_critic\n\n @property\n def share_critic_reward(self) -> bool:\n \"\"\"Whether Critic and Reward models are shared.\"\"\"\n return self._share_critic_reward\n\n @property\n def lora_enabled(self) -> bool:\n \"\"\"Whether LoRA is enabled.\"\"\"\n return self._lora_enabled\n\n @property\n def actor(self) -> ActorModel: # noqa: D102\n if self._share_actor_critic:\n enable_lora(self._reference)\n return select_lora(self._reference, index=0)\n elif self._lora_enabled and self._init_critic_with_reward:\n enable_lora(self._reference)\n return self._reference\n else:\n return self._actor\n\n @actor.setter\n def actor(self, actor: ActorModel) -> None:\n if self._lora_enabled and (self._share_actor_critic or self._init_critic_with_reward):\n self._reference = actor\n else:\n self._actor = actor\n\n @property\n def critic(self) -> CriticModel: # noqa: D102\n if self._share_actor_critic:\n enable_lora(self._reference)\n return select_lora(self._reference, index=1)\n elif self._share_critic_reward:\n enable_lora(self._reward)\n self._reward.disable_bias_gain()\n return self._reward\n else:\n return self._critic\n\n @critic.setter\n def critic(self, critic: CriticModel) -> None:\n if self._share_actor_critic:\n self._reference = critic\n elif self._share_critic_reward:\n self._reward = critic\n else:\n self._critic = critic\n\n @property\n def reference(self) -> ActorModel: # noqa: D102\n if self._share_actor_critic and self._lora_enabled:\n disable_lora(self._reference)\n\n return self._reference\n\n @reference.setter\n def reference(self, reference: ActorModel) -> None:\n self._reference = reference\n\n @property\n def reward(self) -> RewardModel: # noqa: D102\n if self._share_critic_reward:\n disable_lora(self._reward)\n self._reward.enable_bias_gain()\n return self._reward\n\n @reward.setter\n def reward(self, reward: RewardModel) -> None:\n self._reward = reward" }, { "identifier": "TextDataset", "path": "sheeprlhf/data/base.py", "snippet": "class TextDataset(torch.utils.data.Dataset):\n \"\"\"A simple text dataset for loading data from a pandas dataframe.\"\"\"\n\n def __init__(self, dataframe_path: str):\n self.dataframe = pd.read_pickle(dataframe_path).reset_index(drop=True)\n\n def __getitem__(self, index):\n row = self.dataframe.iloc[index].to_dict()\n return row\n\n def __len__(self):\n return len(self.dataframe)" }, { "identifier": "LeftPadCollate", "path": "sheeprlhf/data/collate.py", "snippet": "class LeftPadCollate:\n \"\"\"Data collator used for training.\n\n It is used when the data is left padded.\n \"\"\"\n\n def __init__(self, dim=1, pad_value=0, ignore_index=-1):\n self.dim = dim\n self.pad_value = pad_value\n self.ignore_index = ignore_index\n\n def __call__(self, batch): # noqa: D102\n input_ids = [list_to_tensor(item[\"chosen_input_ids\"])[: item[\"prompt_len\"]] for item in batch]\n # Use PyTorch's pad_sequence function\n # convert into left padding\n reversed_input_ids = [i.flip(dims=[0]) for i in input_ids]\n input_ids = pad_sequence(reversed_input_ids, batch_first=True, padding_value=self.pad_value).flip(dims=[1])\n attention_mask = input_ids.ne(self.pad_value).type(torch.int64)\n\n return {\n \"prompt_input_ids\": input_ids,\n \"prompt_attention_mask\": attention_mask,\n }" }, { "identifier": "policy_loss", "path": "sheeprlhf/loss/ppo.py", "snippet": "def policy_loss(\n log_probs: torch.Tensor,\n old_log_probs: torch.Tensor,\n advantages: torch.Tensor,\n clip_coeff: float,\n action_mask: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n \"\"\"Compute the policy loss for PPO.\"\"\"\n log_ratio = (log_probs - old_log_probs) * action_mask\n ratio = torch.exp(log_ratio)\n policy_loss_1 = -advantages * ratio\n policy_loss_2 = -advantages * torch.clamp(ratio, 1 - clip_coeff, 1 + clip_coeff)\n policy_loss = torch.max(policy_loss_1, policy_loss_2)\n if action_mask is not None:\n policy_loss = torch.sum(policy_loss * action_mask) / action_mask.sum()\n else:\n policy_loss = policy_loss.mean()\n return policy_loss" }, { "identifier": "value_loss", "path": "sheeprlhf/loss/ppo.py", "snippet": "def value_loss(\n values: torch.Tensor,\n old_values: torch.Tensor,\n returns: torch.Tensor,\n clip_coeff: float,\n action_mask: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n \"\"\"Compute the value loss for PPO.\"\"\"\n values_clipped = torch.clamp(values, old_values - clip_coeff, old_values + clip_coeff)\n value_loss1 = F.mse_loss(values, returns, reduction=\"none\")\n value_loss2 = F.mse_loss(values_clipped, returns, reduction=\"none\")\n value_loss = torch.max(value_loss1, value_loss2)\n if action_mask is not None:\n value_loss = torch.sum(value_loss * action_mask) / action_mask.sum()\n else:\n value_loss = value_loss.mean()\n return value_loss" }, { "identifier": "ActorModel", "path": "sheeprlhf/model/actor.py", "snippet": "class ActorModel(CasualModel):\n \"\"\"Actor model for PPO and DPO algorithms.\"\"\"\n\n def __init__(self, model_cfg: ModelConfig):\n super().__init__(model_cfg=model_cfg)\n\n def forward(self, **kwargs): # noqa: D102\n input_ids = kwargs[\"input_ids\"]\n if self.training and not self.model_cfg.use_attention_mask:\n kwargs.pop(\"attention_mask\")\n out = self.model(**kwargs)\n # Model predicts next token log probability here.\n actor_log_probs = F.log_softmax(out.logits[:, :-1, :], dim=-1)\n selected_actor_log_probs = actor_log_probs.gather(dim=-1, index=input_ids[:, 1:].unsqueeze(-1))\n return selected_actor_log_probs.squeeze(-1)" }, { "identifier": "DataConfig", "path": "sheeprlhf/structure/data.py", "snippet": "class DataConfig:\n \"\"\"The main class for processing data for the RLHF algorithm.\n\n Args:\n config_name: The name of the data configuration.\n dataset_name: The name of the dataset to load.\n root_dir: The directory where the processed data will be saved.\n tokenizer_name: The name of the tokenizer to use.\n max_length: The maximum length of the input tokens. Defaults to 512.\n max_prompt_length: The maximum length of the prompt tokens. Defaults to 512.\n num_samples: The number of samples to use. Defaults to None.\n ignore_index: The index to use for ignored tokens. Defaults to -1.\n remove_same_responses: Whether to remove samples with the same response. Defaults to True.\n remove_same_inputs: Whether to remove samples with the same input. Defaults to True.\n minimum_response_length: The minimum length of the response tokens. Defaults to 2.\n save_skipped_examples: Whether to save skipped examples. Defaults to False.\n validation_split: The validation split. Defaults to 0.1.\n reward_model_split: The reward model split. Defaults to 0.5.\n shuffle: Whether to shuffle the dataset. Defaults to True.\n seed: The random seed. Defaults to 42.\n split_names: The names of the splits. Defaults to (\"train\", \"val\", \"test\").\n \"\"\"\n\n _target_: str = \"sheeprlhf.data.DataProcessor\"\n config_name: str = MISSING\n dataset_name: str = MISSING\n root_dir: str = Path(\"./rlhf_data\")\n tokenizer_name: str = II(\"model.repo_name\")\n max_length: int = 256\n max_prompt_length: int = 128\n num_samples: Optional[int] = None\n ignore_index: int = -1\n remove_same_responses: bool = True\n remove_same_inputs: bool = True\n minimum_response_length: int = 5\n save_skipped_examples: bool = False\n shuffle: bool = True\n seed: int = II(\"seed\")\n validation_split: float = 0.1\n reward_model_split: float = 0.5\n split_names: Tuple[str] = (\"train\", \"test\")\n dry_run: bool = II(\"dry_run\")" }, { "identifier": "GenConfig", "path": "sheeprlhf/structure/generation.py", "snippet": "class GenConfig:\n \"\"\"The default configuration for the generator.\"\"\"\n\n # We cannot call this GenerationConfig because it will\n # conflict with transformers.GenerationConfig\n max_new_tokens: int = 128\n num_beams: int = 1\n do_sample: bool = True\n top_k: int = 50\n top_p: float = 1.0\n temperature: float = 1.0\n num_return_sequences: int = 1" }, { "identifier": "ModelConfig", "path": "sheeprlhf/structure/model.py", "snippet": "class ModelConfig:\n \"\"\"A generic configuration for models.\"\"\"\n\n config_name: str = MISSING\n repo_name: Optional[str] = None\n embedding_dim_name: Optional[str] = None\n transformer_name: Optional[str] = None\n casual: bool = True\n freeze_transformer: bool = False\n disable_dropout: bool = False\n library_cfg: HuggingFaceConfig = HuggingFaceConfig()\n finetune_mode: FINETUNE_MODE = FINETUNE_MODE.ALL\n lora_cfg: Optional[LORAConfig] = None\n use_attention_mask: bool = True\n fabric_empty_init: bool = True\n\n def __post_init__(self):\n if isinstance(self.finetune_mode, str):\n self.finetune_mode = FINETUNE_MODE(self.finetune_mode)" }, { "identifier": "PPOConfig", "path": "sheeprlhf/structure/task.py", "snippet": "class PPOConfig(TrainTaskConfig):\n \"\"\"Configuration class for PPO algorithm.\n\n Args:\n _name_: Name of the algorithm. Default is \"ppo\".\n rollout_size: Rollout size for PPO. For every training iteration this number of samples will\n be sampled from dataset and each will be used for generating response.\n rollout_mini_batch_size: Rollout mini batch size for PPO. This number is useful when the\n GPU memory is not sufficient for running all generation code with single batch.\n ppo_epochs: Number of ppo epochs to training. `ppo_step` function will be called `ppo_epochs` times\n normalize_rewards: Whether to whiten rewards\n normalize_advantages: Whether to whiten advantages\n adaptive_kl_coeff: Whether to use adaptively changing KL divergence coefficient\n clip_rewards: Whether to clip rewards\n reward_clip_value: Reward clipping value\n init_kl_coeff: KL divergence coefficient for comparing actor model with reference model.\n Higher value means more trust to reference model.\n target_kl_coeff: Target KL divergence coefficient\n clip_coeff: Clip coefficient for PPO loss\n vf_coeff: Value loss coefficient for PPO loss\n gae_gamma: Discount factor for GAE(Generalized Advantage Estimation)\n gae_lambd: Lambda for GAE(Generalized Advantage Estimation)\n sft_experiment_dir: Path to supervised finetuning experiment directory. Latest checkpoint will be loaded.\n rm_experiment_dir: Path to reward modelling experiment directory. Latest checkpoint will be loaded.\n sft_model_name: Name of the model to load from supervised finetuning experiment directory.\n If not provided, latest checkpoint will be loaded.\n rm_model_name: Name of the model to load from reward modelling experiment directory.\n If not provided, latest checkpoint will be loaded.\n actor_learning_rate: Learning rate for actor optimizer\n critic_learning_rate: Learning rate for critic optimizer\n init_critic_with_reward: Whether to initialize critic with reward model checkpoint or not.\n \"\"\"\n\n config_name: str = \"ppo\"\n rollout_size: int = 128\n rollout_mini_batch_size: int = 32\n ppo_epochs: int = 1\n normalize_rewards: bool = True\n normalize_advantages: bool = True\n adaptive_kl_coeff: bool = False\n clip_rewards: bool = True\n reward_clip_value: float = 5.0\n init_kl_coeff: float = 0.1\n target_kl_coeff: float = 0.1\n clip_coeff: float = 0.2\n vf_coeff: float = 0.1\n gae_gamma: float = 1.0\n gae_lambd: float = 0.95\n sft_experiment_dir: str = II(\"sft_experiment_dir\")\n rm_experiment_dir: str = II(\"rm_experiment_dir\")\n sft_model_name: Optional[str] = None\n rm_model_name: Optional[str] = None\n actor_learning_rate: float = 1e-6\n critic_learning_rate: float = 1e-6\n init_critic_with_reward: bool = True" }, { "identifier": "prepare_generation_config", "path": "sheeprlhf/utils/data.py", "snippet": "def prepare_generation_config(\n tokenizer: PreTrainedTokenizer, model_cfg: ModelConfig, gen_cfg: GenConfig, fabric: lightning.Fabric\n) -> Dict[str, Any]:\n \"\"\"Creates generation config for Hugginface models.\n\n In this function, we try to solve token problems for different models.\n \"\"\"\n gen_cfg_dict = asdict(gen_cfg)\n try:\n generation_config = GenerationConfig.from_pretrained(model_cfg.repo_name, **gen_cfg_dict)\n except EnvironmentError:\n # If the model does not have `generation_config.json` file, we create from scratch\n fabric.print(\"`generation_config.json` not found, creating `GenerationConfig` from scratch\")\n generation_config = GenerationConfig(**gen_cfg_dict)\n generation_config.pad_token_id = tokenizer.pad_token_id\n generation_config.eos_token_id = tokenizer.eos_token_id\n generation_config.bos_token_id = tokenizer.bos_token_id\n return generation_config" }, { "identifier": "validate_dataset", "path": "sheeprlhf/utils/data.py", "snippet": "def validate_dataset(fabric: lightning.Fabric, data_cfg: DataConfig) -> DataProcessor:\n \"\"\"Dataset validator.\n\n Validates the dataset for checking if it is required to re-create\n all preprocessing steps using tokenizers.\n \"\"\"\n os.environ.setdefault(\"TOKENIZERS_PARALLELISM\", \"true\")\n data_processor: DataProcessor = instantiate_from_config(data_cfg)\n full_path = data_processor.full_path\n create_dataset: bool = True\n if os.path.isdir(full_path):\n config_path = full_path / \"config.yaml\"\n if not config_path.exists():\n fabric.print(f\"Config file not found at {config_path} for the given dataset {data_cfg.config_name}\")\n fabric.print(\"Dataset will be recreated and previous files will be deleted.\")\n else:\n open_config = OmegaConf.load(config_path)\n loaded_dataset_cfg = DataConfig(**open_config)\n current_tokenizer = prepare_tokenizer(data_cfg.tokenizer_name)\n loaded_tokenizer = prepare_tokenizer(loaded_dataset_cfg.tokenizer_name)\n\n if type(current_tokenizer) != type(loaded_tokenizer):\n fabric.print(\"Tokenizer type changed.\")\n fabric.print(f\"Was {type(loaded_tokenizer)} now {type(current_tokenizer)}\")\n fabric.print(\"New dataset will be recreated and previous files will be deleted.\")\n create_dataset = True\n elif data_cfg != loaded_dataset_cfg:\n diffs = {}\n for k, v in asdict(data_cfg).items():\n if v != getattr(loaded_dataset_cfg, k):\n diffs[k] = (v, getattr(loaded_dataset_cfg, k))\n fabric.print(\"Dataset config changed.\")\n\n fabric.print(\"\\n\".join([f\"{k} was {v[0]} now {v[1]}\" for k, v in diffs.items()]))\n fabric.print(\"New dataset will be recreated and previous files will be deleted.\")\n create_dataset = True\n else:\n fabric.print(\"Dataset already exists. Skipping dataset creation.\")\n create_dataset = False\n if create_dataset:\n shutil.rmtree(full_path)\n # This disables FastTokenizer's parallelism for multiprocessing with dataloaders\n # TODO: check if can be avoided\n os.environ.setdefault(\"TOKENIZERS_PARALLELISM\", \"false\")\n data_processor.tokenizer = prepare_tokenizer(data_cfg.tokenizer_name)\n if create_dataset and fabric.is_global_zero:\n fabric.print(f\"Creating new dataset in {full_path}\")\n data_processor.process()\n OmegaConf.save(data_cfg, full_path / \"config.yaml\")\n fabric.barrier()\n\n return data_processor" }, { "identifier": "create_tensorboard_logger", "path": "sheeprlhf/utils/helper.py", "snippet": "def create_tensorboard_logger(\n fabric: Fabric, cfg: Dict[str, Any], override_log_level: bool = False\n) -> Tuple[Optional[TensorBoardLogger]]:\n \"\"\"Creates tensorboard logger.\n\n Set logger only on rank-0 but share the logger directory: since\n we don't know. what is happening during the `fabric.save()` method,\n at least we assure that all ranks save under the same named folder.\n As a plus, rank-0 sets the time uniquely for everyone.\n \"\"\"\n # Set logger only on rank-0 but share the logger directory: since we don't know\n # what is happening during the `fabric.save()` method, at least we assure that all\n # ranks save under the same named folder.\n # As a plus, rank-0 sets the time uniquely for everyone\n logger = None\n if fabric.is_global_zero:\n root_dir = os.path.join(\"logs\", \"runs\", cfg.root_dir)\n if override_log_level or cfg.metric.log_level > 0:\n logger = TensorBoardLogger(root_dir=root_dir, name=cfg.run_name)\n return logger" }, { "identifier": "get_log_dir", "path": "sheeprlhf/utils/helper.py", "snippet": "def get_log_dir(fabric: Fabric, root_dir: str, run_name: str, share: bool = True) -> str:\n \"\"\"Return and, if necessary, create the log directory.\n\n If there are more than one processes, the rank-0 process shares\n the directory to the others\n (if the `share` parameter is set to `True`).\n\n Args:\n fabric: the fabric instance.\n root_dir: the root directory of the experiment.\n run_name: the name of the experiment.\n share: whether or not to share the `log_dir` among processes.\n\n Returns:\n The log directory of the experiment.\n \"\"\"\n world_collective = TorchCollective()\n if fabric.world_size > 1 and share:\n world_collective.setup()\n world_collective.create_group()\n if fabric.is_global_zero:\n # If the logger was instantiated, then take the log_dir from it\n if len(fabric.loggers) > 0:\n log_dir = fabric.logger.log_dir\n else:\n # Otherwise the rank-zero process creates the log_dir\n save_dir = os.path.join(\"logs\", \"runs\", root_dir, run_name)\n fs = get_filesystem(root_dir)\n try:\n listdir_info = fs.listdir(save_dir)\n existing_versions = []\n for listing in listdir_info:\n d = listing[\"name\"]\n bn = os.path.basename(d)\n if _is_dir(fs, d) and bn.startswith(\"version_\"):\n dir_ver = bn.split(\"_\")[1].replace(\"/\", \"\")\n existing_versions.append(int(dir_ver))\n version = 0 if len(existing_versions) == 0 else max(existing_versions) + 1\n log_dir = os.path.join(save_dir, f\"version_{version}\")\n except OSError:\n warnings.warn(\"Missing logger folder: %s\", save_dir, stacklevel=2)\n log_dir = os.path.join(save_dir, f\"version_{0}\")\n\n os.makedirs(log_dir, exist_ok=True)\n if fabric.world_size > 1 and share:\n world_collective.broadcast_object_list([log_dir], src=0)\n else:\n data = [None]\n world_collective.broadcast_object_list(data, src=0)\n log_dir = data[0]\n return log_dir" }, { "identifier": "log_text", "path": "sheeprlhf/utils/helper.py", "snippet": "@rank_zero_only\ndef log_text(fabric: lightning.Fabric, text: str, name: str, step: int):\n \"\"\"Wrapper function to log text to tensorboard.\"\"\"\n if fabric.logger is not None:\n if isinstance(fabric.logger, lightning.fabric.loggers.tensorboard.TensorBoardLogger):\n fabric.logger.experiment.add_text(name, text, step)\n else:\n warnings.warn(f\"Logging text is not supported for {type(fabric.logger)}\", stacklevel=2)" }, { "identifier": "instantiate_from_config", "path": "sheeprlhf/utils/hydra.py", "snippet": "def instantiate_from_config(config: Any, *args, **kwargs):\n \"\"\"Wrapper function to instantiate objects from Hydra config.\"\"\"\n config_copy = deepcopy(config)\n if is_dataclass(config_copy):\n config_copy = asdict(config_copy)\n if isinstance(config_copy, dict) and \"config_name\" in config_copy:\n config_copy.pop(\"config_name\")\n return instantiate(config_copy, *args, **kwargs)" }, { "identifier": "PPOMetricManager", "path": "sheeprlhf/utils/metric.py", "snippet": "class PPOMetricManager(MetricManager): # noqa: D101\n train_actor_loss: LastValueMetric\n train_critic_loss: LastValueMetric\n train_reward_mean: LastValueMetric\n train_kl_div_mean: LastValueMetric\n info_lr: LastValueMetric\n info_ppo_time: LastValueMetric\n info_rollout_time: LastValueMetric\n info_kl_coeff: LastValueMetric\n info_actor_grad_norm: LastValueMetric\n info_critic_grad_norm: LastValueMetric\n debug_reward_scores: StatsMetric\n debug_advantages: StatsMetric\n debug_returns: StatsMetric" }, { "identifier": "compute_grad_norm", "path": "sheeprlhf/utils/model.py", "snippet": "def compute_grad_norm(model: torch.nn.Module) -> float: # noqa: D103\n total_norm = 0\n parameters = [p for p in model.parameters() if p.grad is not None and p.requires_grad]\n for p in parameters:\n param_norm = p.grad.detach().cpu().data.norm(2)\n total_norm += param_norm.item() ** 2\n total_norm = total_norm**0.5\n return total_norm" }, { "identifier": "prepare_optimizer_parameters", "path": "sheeprlhf/utils/model.py", "snippet": "def prepare_optimizer_parameters(model: torch.nn.Module, weight_decay: float) -> List[Dict[str, Any]]:\n \"\"\"Taken from https://github.com/karpathy/nanoGPT.\"\"\"\n param_dict = {pn: p for pn, p in model.named_parameters()}\n # filter out those that do not require grad\n param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}\n # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.\n # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.\n decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]\n nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]\n optim_groups = [\n {\"params\": decay_params, \"weight_decay\": weight_decay},\n {\"params\": nodecay_params, \"weight_decay\": 0.0},\n ]\n num_decay_params = sum(p.numel() for p in decay_params)\n num_nodecay_params = sum(p.numel() for p in nodecay_params)\n\n return optim_groups, num_decay_params, num_nodecay_params" }, { "identifier": "AdaptiveKLController", "path": "sheeprlhf/utils/ppo.py", "snippet": "class AdaptiveKLController:\n \"\"\"A class for controlling the KL divergence between the old and new policy in PPO.\n\n Parameters:\n init_kl_coeff : float\n The initial value for the KL coefficient.\n target_kl_coeff : float\n The target value for the KL coefficient.\n kl_horizon : float\n The number of steps over which to adjust the KL coefficient.\n clip_range : float\n The maximum amount by which to clip the proportional error.\n\n Attributes:\n value : float\n The current value of the KL coefficient.\n \"\"\"\n\n def __init__(self, init_kl_coeff: float, target_kl_coeff: float, kl_horizon: float, clip_range: float):\n self.value = init_kl_coeff\n self.target_kl_coeff = target_kl_coeff\n self.kl_horizon = kl_horizon\n self.clip_range = clip_range\n\n def update(self, current: int, n_steps: int):\n \"\"\"Update the value of the PPO object based on the current KL divergence and the number of steps taken.\n\n Args:\n current (float): The current KL divergence.\n n_steps (int): The number of steps taken.\n \"\"\"\n target = self.target_kl_coeff\n proportional_error = torch.clamp(current / target - 1, -self.clip_range, self.clip_range)\n mult = 1 + proportional_error * n_steps / self.kl_horizon\n self.value *= mult" }, { "identifier": "FixedKLController", "path": "sheeprlhf/utils/ppo.py", "snippet": "class FixedKLController:\n \"\"\"Dummy KL controller that does not update.\"\"\"\n\n def __init__(self, kl_coeff):\n self.value = kl_coeff\n\n def update(self, current, n_steps): # noqa: D102\n pass" }, { "identifier": "collect_rollout", "path": "sheeprlhf/utils/ppo.py", "snippet": "@torch.no_grad()\ndef collect_rollout(\n batch: Dict[str, torch.Tensor],\n agent: PPOAgent,\n kl_controller: Union[FixedKLController, AdaptiveKLController],\n generation_config: GenerationConfig,\n task_cfg: PPOConfig,\n tokenizer: PreTrainedTokenizer,\n fabric: lightning.Fabric,\n metrics: PPOMetricManager,\n) -> Dict[str, torch.Tensor]:\n \"\"\"Collects rollout data for PPO algorithm.\n\n Args:\n batch: The rollout batch data\n agent: The PPO agent.\n kl_controller: The KL controller for penalty.\n generation_config: The generation configuration.\n task_cfg: The PPO configuration used for training\n tokenizer: The model tokenizer.\n fabric: The fabric object.\n metrics: The metric manager for training.\n\n Returns:\n The rollout data.\n \"\"\"\n # We have the batch as dictionary let's create tensordict\n # so we can create dataloader with Fabric that transfers the data\n # to correct devices.\n batch_tdict = make_tensordict(batch)\n mini_batch_dataloader = DataLoader(\n batch_tdict,\n shuffle=False,\n batch_size=task_cfg.rollout_mini_batch_size,\n collate_fn=lambda x: x,\n num_workers=0,\n drop_last=False,\n )\n mini_batch_dataloader = fabric.setup_dataloaders(mini_batch_dataloader, use_distributed_sampler=False)\n rollout_dict_list = []\n\n # We use first generated token index - 1 to obtain correct logprobs.\n # Here we have batch of data fed into all models we have here is the input looks like:\n # Assuming padding tokens are `O` and input tokens are `I`\n # O O I I I\n # O O O I I (left padded batch)\n # O I I I I\n # After responses are generated we have new data assuming response tokens are `R`\n # O O I I I R R R O O O\n # O O O I I R R R R R O (padded from right side to longest text)\n # O I I I I R R R R R R\n start_token_idx = batch[\"prompt_input_ids\"].size(1) - 1\n for i, mini_batch in enumerate(mini_batch_dataloader):\n prompt_input_ids = mini_batch[\"prompt_input_ids\"]\n prompt_attention_mask = mini_batch[\"prompt_attention_mask\"]\n data = {\"input_ids\": prompt_input_ids, \"attention_mask\": prompt_attention_mask}\n\n input_ids = agent.actor.generate(**data, generation_config=generation_config)\n max_len_diff = generation_config.max_new_tokens - (input_ids.size(1) - prompt_input_ids.size(1))\n if max_len_diff > 0:\n input_ids = torch.nn.functional.pad(input_ids, (0, max_len_diff), value=tokenizer.pad_token_id)\n attention_masks = (input_ids != generation_config.pad_token_id).int()\n\n data = {\"input_ids\": input_ids, \"attention_mask\": attention_masks}\n # for logprobs we already omit the last tokens from computation\n actor_log_probs = agent.actor(**data)[:, start_token_idx:]\n ref_log_probs = agent.reference(**data)[:, start_token_idx:]\n # We need to also do the same for value and reward outputs\n values = agent.critic(**data)[:, start_token_idx:-1]\n reward_outputs = agent.reward(**data)[:, start_token_idx:-1]\n\n mini_batch_rollout = {\n \"input_ids\": input_ids, # (B, T) (B, (prompt + generated))\n \"attention_mask\": attention_masks, # (B, T) (B, (prompt + generated))\n \"actor_log_probs\": actor_log_probs, # (B, num_new_tokens)\n \"ref_log_probs\": ref_log_probs, # (B, num_new_tokens)\n \"values\": values, # (B, num_new_tokens)\n \"reward_outputs\": reward_outputs, # (B, num_new_tokens)\n }\n mini_batch_tdict = make_tensordict(mini_batch_rollout).cpu()\n rollout_dict_list.append(mini_batch_tdict)\n if i == 0:\n sample_from_rollout = tokenizer.decode(input_ids[0], skip_special_tokens=True)\n\n rollout = torch.cat(rollout_dict_list, 0)\n action_mask = rollout[\"attention_mask\"][:, start_token_idx:-1].int()\n reward_outputs = rollout.pop(\"reward_outputs\")\n # we already removed the last token from action mask\n # we dont need to remove it from last_token_idx\n last_token_idx = torch.argmax(torch.cumsum(action_mask, dim=1) * action_mask, dim=1, keepdim=True)\n reward_scores = torch.gather(reward_outputs, dim=-1, index=last_token_idx).squeeze(-1)\n kl_div = rollout[\"actor_log_probs\"] - rollout[\"ref_log_probs\"]\n\n mean_kl_div = masked_mean(kl_div, action_mask).mean()\n if task_cfg.clip_rewards:\n torch.clip_(reward_scores, -task_cfg.reward_clip_value, task_cfg.reward_clip_value)\n\n if task_cfg.normalize_rewards:\n # we normalize the reward but do not shift the mean\n # TODO: Does it really important to normalize the rewards?\n reward_scores = normalize(reward_scores, shift_mean=False)\n\n # Rewards are made of two components:\n # 1. Per token kl divergence\n # 2. Last token reward\n # Combination of these two component creates the reward signal\n rewards = kl_div.detach().clone() * -kl_controller.value\n rewards.scatter_add_(dim=1, index=last_token_idx, src=reward_scores.unsqueeze(-1))\n values = rollout[\"values\"]\n\n advantages, returns = compute_advantages_and_returns(\n rewards=rewards * action_mask,\n values=values * action_mask,\n gamma=task_cfg.gae_gamma,\n lambd=task_cfg.gae_lambd,\n )\n rollout[\"advantages\"] = advantages\n rollout[\"returns\"] = returns\n kl_controller.update(mean_kl_div, rollout[\"input_ids\"].size(0))\n metrics.train_kl_div_mean.update(mean_kl_div.item())\n metrics.train_reward_mean.update(reward_scores.mean().item())\n metrics.debug_reward_scores(reward_scores)\n metrics.debug_advantages(advantages)\n metrics.debug_returns(returns)\n\n return rollout, sample_from_rollout" }, { "identifier": "masked_normalize", "path": "sheeprlhf/utils/ppo.py", "snippet": "def masked_normalize( # noqa: D103\n tensor: torch.Tensor, mask: torch.Tensor, shift_mean: bool = True, dim: int = 1, eps: float = 1e-8\n) -> torch.Tensor:\n tensor = tensor * mask\n mean = masked_mean(tensor, mask, dim=dim)\n mean_centered = tensor - mean\n var = masked_mean(mean_centered**2, mask, dim=dim)\n normalized = mean_centered * var.clamp(min=eps).rsqrt()\n if not shift_mean:\n normalized += mean\n return normalized" }, { "identifier": "register_task", "path": "sheeprlhf/utils/registry.py", "snippet": "def register_task():\n \"\"\"Task registery decorator.\"\"\"\n\n def inner_decorator(fn):\n return _register_task(fn)\n\n return inner_decorator" } ]
import copy import time import torch from pathlib import Path from typing import Dict from lightning import Fabric from torch.utils.data import DataLoader from tqdm import tqdm from transformers import GenerationConfig, PreTrainedTokenizer from sheeprlhf.agent.ppo import PPOAgent from sheeprlhf.data.base import TextDataset from sheeprlhf.data.collate import LeftPadCollate from sheeprlhf.loss.ppo import policy_loss, value_loss from sheeprlhf.model.actor import ActorModel from sheeprlhf.structure.data import DataConfig from sheeprlhf.structure.generation import GenConfig from sheeprlhf.structure.model import ModelConfig from sheeprlhf.structure.task import PPOConfig from sheeprlhf.utils.data import prepare_generation_config, validate_dataset from sheeprlhf.utils.helper import create_tensorboard_logger, get_log_dir, log_text from sheeprlhf.utils.hydra import instantiate_from_config from sheeprlhf.utils.metric import PPOMetricManager from sheeprlhf.utils.model import compute_grad_norm, prepare_optimizer_parameters from sheeprlhf.utils.ppo import AdaptiveKLController, FixedKLController, collect_rollout, masked_normalize from sheeprlhf.utils.registry import register_task
11,751
# Setup batch data batch = next(data_iterator) max_prompt_length = batch["prompt_input_ids"].shape[1] agent.actor.eval() agent.critic.eval() t0 = time.time() rollout, sample_output = collect_rollout( batch=batch, agent=agent, generation_config=generation_config, kl_controller=kl_controller, task_cfg=task_cfg, tokenizer=tokenizer, fabric=fabric, metrics=metrics, ) time_rollout = time.time() - t0 rollout_dataloader = DataLoader( rollout, batch_size=task_cfg.micro_batch_size, shuffle=True, collate_fn=lambda x: x ) rollout_dataloader = fabric.setup_dataloaders(rollout_dataloader, use_distributed_sampler=False) agent.actor.train() agent.critic.train() for _ in range(task_cfg.ppo_epochs): accumulator_counter = 0 for micro_batch in rollout_dataloader: is_accumulating = (accumulator_counter) % task_cfg.gradient_accumulation_steps != 0 generated_data = { "input_ids": micro_batch["input_ids"], "attention_mask": micro_batch["attention_mask"], } old_log_probs = micro_batch["actor_log_probs"] old_values = micro_batch["values"] advantages = micro_batch["advantages"] returns = micro_batch["returns"] start_token_idx = max_prompt_length - 1 action_mask = micro_batch["attention_mask"][:, start_token_idx:-1].int() if task_cfg.normalize_advantages: advantages = masked_normalize(advantages, action_mask) with fabric.no_backward_sync(agent.actor, enabled=is_accumulating): log_probs = agent.actor(**generated_data)[:, start_token_idx:] # (B, num_new_tokens) p_loss = policy_loss( log_probs=log_probs, old_log_probs=old_log_probs, advantages=advantages, clip_coeff=task_cfg.clip_coeff, action_mask=action_mask, ) fabric.backward(p_loss / task_cfg.gradient_accumulation_steps) with fabric.no_backward_sync(agent.critic, enabled=is_accumulating): values = agent.critic(**generated_data)[:, start_token_idx:-1] # (B, num_new_tokens) v_loss = value_loss( values=values, old_values=old_values, returns=returns, clip_coeff=task_cfg.clip_coeff, action_mask=action_mask, ) fabric.backward((v_loss * task_cfg.vf_coeff) / task_cfg.gradient_accumulation_steps) if not is_accumulating: actor_grads = compute_grad_norm(model=agent.actor) fabric.clip_gradients( agent.actor, actor_optimizer, max_norm=task_cfg.gradient_clip_val, error_if_nonfinite=True ) actor_optimizer.step() actor_optimizer.zero_grad(set_to_none=True) critic_grads = compute_grad_norm(model=agent.critic) fabric.clip_gradients( agent.critic, critic_optimizer, max_norm=task_cfg.gradient_clip_val, error_if_nonfinite=True ) critic_optimizer.step() critic_optimizer.zero_grad(set_to_none=True) accumulator_counter += 1 time_ppo = time.time() - t0 - time_rollout with torch.no_grad(): metrics.info_rollout_time.update(time_rollout) metrics.info_ppo_time.update(time_ppo) metrics.train_actor_loss.update(p_loss.item()) metrics.train_critic_loss.update(v_loss.item()) metrics.info_actor_grad_norm.update(actor_grads) metrics.info_critic_grad_norm.update(critic_grads) metrics.info_kl_coeff.update(kl_controller.value) if k > 0 and (k % task_cfg.eval_interval == 0 or last_step): agent.actor.eval() agent.critic.eval() if fabric.is_global_zero: gen_text, score = generate( agent=agent, tokenizer=tokenizer, generation_config=eval_generation_config, example_prompt=example_prompt, device=fabric.device, ) log_text(fabric, sample_output, "info/rollout_sample", step=k) log_text(fabric, gen_text, "info/example_sample", step=k) fabric.log("info/example_last_reward", score, step=k) fabric.barrier() if k % task_cfg.log_interval == 0 or last_step: computed_metrics = metrics.compute_all() metrics.log_all(fabric=fabric, step=k, metrics_dict=computed_metrics) if not iterator.disable: description = f"iter {k}, rollout-time: {time_rollout*1000:.2f}ms, ppo-time: {time_ppo*1000:.2f}ms" for metric_name, metric_value in computed_metrics.items(): if metric_name.startswith("info/") or metric_name.startswith("debug/"): continue description += f", {metric_name}: {metric_value:.3f}" iterator.set_description(description) if k > 0 and (k % task_cfg.save_interval == 0 or last_step):
@torch.no_grad() def generate( # noqa: D103 agent: PPOAgent, tokenizer: PreTrainedTokenizer, generation_config: GenerationConfig, example_prompt: Dict[str, torch.Tensor], device: torch.device, ): generated_input_ids = agent.actor.module.generate( input_ids=example_prompt["input_ids"].to(device), attention_mask=example_prompt["attention_mask"].to(device), generation_config=generation_config, use_cache=True, ) prompt_length = example_prompt["input_ids"].shape[1] generated_attention_mask = (generated_input_ids != generation_config.pad_token_id).int() generated_data = {"input_ids": generated_input_ids, "attention_mask": generated_attention_mask} reward = agent.reward(**generated_data)[:, prompt_length:] action_mask = (generated_input_ids != generation_config.pad_token_id).int()[:, prompt_length:] last_token_idx = torch.argmax(torch.cumsum(action_mask, dim=1) * action_mask, dim=1, keepdim=True) reward_score = torch.gather(reward, dim=-1, index=last_token_idx).squeeze(-1) return tokenizer.decode(generated_input_ids[0], skip_special_tokens=True), reward_score.item() @register_task() def main(fabric: Fabric, cfg: Dict): # noqa: D103 task_cfg = PPOConfig(**cfg.task) model_cfg = ModelConfig(**cfg.model) data_cfg = DataConfig(**cfg.data) gen_cfg = GenConfig(**cfg.generation) optim_cfg = cfg.optim fabric.seed_everything(cfg.seed + fabric.global_rank) # Create TensorBoardLogger. This will create the logger only on the # rank-0 process logger = create_tensorboard_logger(fabric, cfg, override_log_level=True) if logger and fabric.is_global_zero: fabric._loggers = [logger] fabric.logger.log_hyperparams(cfg) log_dir = get_log_dir(fabric, cfg.root_dir, cfg.run_name) experiment_dir = Path(log_dir).parent # Setup Metrics metrics = PPOMetricManager(log_interval=task_cfg.log_interval).to(fabric.device) # Setup Dataloaders data_processor = validate_dataset(fabric, data_cfg) dataset_path = Path(data_processor.full_path) tokenizer = data_processor.tokenizer collator = LeftPadCollate(pad_value=tokenizer.pad_token_id, ignore_index=data_cfg.ignore_index) train_dataset = TextDataset(dataframe_path=dataset_path / "finetune_train.pkl") train_dataloader = DataLoader( train_dataset, shuffle=True, batch_size=task_cfg.micro_batch_size, collate_fn=collator, num_workers=task_cfg.num_workers, ) train_dataloader = fabric.setup_dataloaders(train_dataloader) example_prompt = torch.load(dataset_path / "example_prompt.pt") # Setup Model with fabric.init_module(empty_init=model_cfg.fabric_empty_init): agent = PPOAgent(model_cfg=model_cfg, task_cfg=task_cfg) agent.load_checkpoint(device=fabric.device) agent.setup_finetuning() agent.actor = fabric.setup_module(agent.actor) agent.critic = fabric.setup_module(agent.critic) if not agent.share_critic_reward: agent.reward = fabric.setup_module(agent.reward) if not agent.share_actor_critic and not agent.lora_enabled: agent.reference = fabric.setup_module(agent.reference) # Setup Generation Configs generation_config = prepare_generation_config( tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=gen_cfg, fabric=fabric, ) eval_gen_cfg = copy.deepcopy(gen_cfg) eval_gen_cfg.do_sample = False eval_generation_config = prepare_generation_config( tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=eval_gen_cfg, fabric=fabric, ) # Setup Optimizer Scheduler fabric models actor_trainable_params, _, _ = prepare_optimizer_parameters(agent.actor, weight_decay=optim_cfg.weight_decay) actor_optimizer = instantiate_from_config( optim_cfg, params=actor_trainable_params, _convert_="partial", ) actor_optimizer = fabric.setup_optimizers(actor_optimizer) critic_trainable_params, _, _ = prepare_optimizer_parameters(agent.critic, weight_decay=optim_cfg.weight_decay) critic_optimizer = instantiate_from_config( optim_cfg, params=critic_trainable_params, _convert_="partial", ) critic_optimizer = fabric.setup_optimizers(critic_optimizer) if fabric.is_global_zero: gen_text, score = generate( agent=agent, tokenizer=tokenizer, generation_config=eval_generation_config, example_prompt=example_prompt, device=fabric.device, ) log_text(fabric, gen_text, "info/example_sample", step=0) fabric.log("info/example_last_reward", score, step=0) num_training_steps = 2 if cfg.dry_run else task_cfg.epochs * len(train_dataloader) # KL Controller if task_cfg.adaptive_kl_coeff: kl_controller = AdaptiveKLController( init_kl_coef=task_cfg.init_kl_coeff, target=task_cfg.target_kl_coeff, kl_horizon=num_training_steps ) else: kl_controller = FixedKLController(kl_coeff=task_cfg.init_kl_coeff) fabric.print("Model Checkpoint interval: ", task_cfg.save_interval, "steps") fabric.print("Model Evaluation interval: ", task_cfg.eval_interval, "steps") iterator = tqdm(range(num_training_steps), disable=not fabric.is_global_zero) data_iterator = iter(train_dataloader) agent.reward.eval() for k in iterator: # Setup counters and data if k % len(train_dataloader) == 0 or data_iterator is None: data_iterator = iter(train_dataloader) is_accumulating = (k) % task_cfg.gradient_accumulation_steps != 0 last_step = k == num_training_steps - 1 # Setup batch data batch = next(data_iterator) max_prompt_length = batch["prompt_input_ids"].shape[1] agent.actor.eval() agent.critic.eval() t0 = time.time() rollout, sample_output = collect_rollout( batch=batch, agent=agent, generation_config=generation_config, kl_controller=kl_controller, task_cfg=task_cfg, tokenizer=tokenizer, fabric=fabric, metrics=metrics, ) time_rollout = time.time() - t0 rollout_dataloader = DataLoader( rollout, batch_size=task_cfg.micro_batch_size, shuffle=True, collate_fn=lambda x: x ) rollout_dataloader = fabric.setup_dataloaders(rollout_dataloader, use_distributed_sampler=False) agent.actor.train() agent.critic.train() for _ in range(task_cfg.ppo_epochs): accumulator_counter = 0 for micro_batch in rollout_dataloader: is_accumulating = (accumulator_counter) % task_cfg.gradient_accumulation_steps != 0 generated_data = { "input_ids": micro_batch["input_ids"], "attention_mask": micro_batch["attention_mask"], } old_log_probs = micro_batch["actor_log_probs"] old_values = micro_batch["values"] advantages = micro_batch["advantages"] returns = micro_batch["returns"] start_token_idx = max_prompt_length - 1 action_mask = micro_batch["attention_mask"][:, start_token_idx:-1].int() if task_cfg.normalize_advantages: advantages = masked_normalize(advantages, action_mask) with fabric.no_backward_sync(agent.actor, enabled=is_accumulating): log_probs = agent.actor(**generated_data)[:, start_token_idx:] # (B, num_new_tokens) p_loss = policy_loss( log_probs=log_probs, old_log_probs=old_log_probs, advantages=advantages, clip_coeff=task_cfg.clip_coeff, action_mask=action_mask, ) fabric.backward(p_loss / task_cfg.gradient_accumulation_steps) with fabric.no_backward_sync(agent.critic, enabled=is_accumulating): values = agent.critic(**generated_data)[:, start_token_idx:-1] # (B, num_new_tokens) v_loss = value_loss( values=values, old_values=old_values, returns=returns, clip_coeff=task_cfg.clip_coeff, action_mask=action_mask, ) fabric.backward((v_loss * task_cfg.vf_coeff) / task_cfg.gradient_accumulation_steps) if not is_accumulating: actor_grads = compute_grad_norm(model=agent.actor) fabric.clip_gradients( agent.actor, actor_optimizer, max_norm=task_cfg.gradient_clip_val, error_if_nonfinite=True ) actor_optimizer.step() actor_optimizer.zero_grad(set_to_none=True) critic_grads = compute_grad_norm(model=agent.critic) fabric.clip_gradients( agent.critic, critic_optimizer, max_norm=task_cfg.gradient_clip_val, error_if_nonfinite=True ) critic_optimizer.step() critic_optimizer.zero_grad(set_to_none=True) accumulator_counter += 1 time_ppo = time.time() - t0 - time_rollout with torch.no_grad(): metrics.info_rollout_time.update(time_rollout) metrics.info_ppo_time.update(time_ppo) metrics.train_actor_loss.update(p_loss.item()) metrics.train_critic_loss.update(v_loss.item()) metrics.info_actor_grad_norm.update(actor_grads) metrics.info_critic_grad_norm.update(critic_grads) metrics.info_kl_coeff.update(kl_controller.value) if k > 0 and (k % task_cfg.eval_interval == 0 or last_step): agent.actor.eval() agent.critic.eval() if fabric.is_global_zero: gen_text, score = generate( agent=agent, tokenizer=tokenizer, generation_config=eval_generation_config, example_prompt=example_prompt, device=fabric.device, ) log_text(fabric, sample_output, "info/rollout_sample", step=k) log_text(fabric, gen_text, "info/example_sample", step=k) fabric.log("info/example_last_reward", score, step=k) fabric.barrier() if k % task_cfg.log_interval == 0 or last_step: computed_metrics = metrics.compute_all() metrics.log_all(fabric=fabric, step=k, metrics_dict=computed_metrics) if not iterator.disable: description = f"iter {k}, rollout-time: {time_rollout*1000:.2f}ms, ppo-time: {time_ppo*1000:.2f}ms" for metric_name, metric_value in computed_metrics.items(): if metric_name.startswith("info/") or metric_name.startswith("debug/"): continue description += f", {metric_name}: {metric_value:.3f}" iterator.set_description(description) if k > 0 and (k % task_cfg.save_interval == 0 or last_step):
checkpoint_model: ActorModel = agent.actor.module
5
2023-10-31 12:02:02+00:00
16k
cpacker/MemGPT
memgpt/server/rest_api/server.py
[ { "identifier": "JSON_ENSURE_ASCII", "path": "memgpt/constants.py", "snippet": "JSON_ENSURE_ASCII = False\r" }, { "identifier": "setup_agents_index_router", "path": "memgpt/server/rest_api/agents/index.py", "snippet": "def setup_agents_index_router(server: SyncServer, interface: QueuingInterface):\n @router.get(\"/agents\", tags=[\"agents\"], response_model=ListAgentsResponse)\n def list_agents(user_id: str = Query(..., description=\"Unique identifier of the user.\")):\n \"\"\"\n List all agents associated with a given user.\n\n This endpoint retrieves a list of all agents and their configurations associated with the specified user ID.\n \"\"\"\n request = ListAgentsRequest(user_id=user_id)\n\n # TODO remove once chatui adds user selection / pulls user from config\n request.user_id = None if request.user_id == \"null\" else request.user_id\n\n user_id = uuid.UUID(request.user_id) if request.user_id else None\n\n interface.clear()\n agents_data = server.list_agents(user_id=user_id)\n return ListAgentsResponse(**agents_data)\n\n @router.post(\"/agents\", tags=[\"agents\"], response_model=CreateAgentResponse)\n def create_agent(request: CreateAgentRequest = Body(...)):\n \"\"\"\n Create a new agent with the specified configuration.\n \"\"\"\n interface.clear()\n\n # TODO remove once chatui adds user selection / pulls user from config\n request.user_id = None if request.user_id == \"null\" else request.user_id\n\n try:\n user_id = uuid.UUID(request.user_id) if request.user_id else None\n agent_state = server.create_agent(user_id=user_id, agent_config=request.config)\n return CreateAgentResponse(agent_id=agent_state.id)\n except Exception as e:\n raise HTTPException(status_code=500, detail=str(e))\n\n return router" }, { "identifier": "setup_agents_command_router", "path": "memgpt/server/rest_api/agents/command.py", "snippet": "def setup_agents_command_router(server: SyncServer, interface: QueuingInterface):\n @router.post(\"/agents/command\", tags=[\"agents\"], response_model=CommandResponse)\n def run_command(request: CommandRequest = Body(...)):\n \"\"\"\n Execute a command on a specified agent.\n\n This endpoint receives a command to be executed on an agent. It uses the user and agent identifiers to authenticate and route the command appropriately.\n\n Raises an HTTPException for any processing errors.\n \"\"\"\n interface.clear()\n try:\n # TODO remove once chatui adds user selection / pulls user from config\n request.user_id = None if request.user_id == \"null\" else request.user_id\n\n user_id = uuid.UUID(request.user_id) if request.user_id else None\n agent_id = uuid.UUID(request.agent_id) if request.agent_id else None\n response = server.run_command(user_id=user_id, agent_id=agent_id, command=request.command)\n except HTTPException:\n raise\n except Exception as e:\n raise HTTPException(status_code=500, detail=f\"{e}\")\n return CommandResponse(response=response)\n\n return router" }, { "identifier": "setup_agents_config_router", "path": "memgpt/server/rest_api/agents/config.py", "snippet": "def setup_agents_config_router(server: SyncServer, interface: QueuingInterface):\n @router.get(\"/agents/config\", tags=[\"agents\"], response_model=AgentConfigResponse)\n def get_agent_config(\n user_id: str = Query(..., description=\"Unique identifier of the user requesting the config.\"),\n agent_id: str = Query(..., description=\"Identifier of the agent whose config is requested.\"),\n ):\n \"\"\"\n Retrieve the configuration for a specific agent.\n\n This endpoint fetches the configuration details for a given agent, identified by the user and agent IDs.\n \"\"\"\n request = AgentConfigRequest(user_id=user_id, agent_id=agent_id)\n\n # TODO remove once chatui adds user selection / pulls user from config\n request.user_id = None if request.user_id == \"null\" else request.user_id\n\n user_id = uuid.UUID(request.user_id) if request.user_id else None\n agent_id = uuid.UUID(request.agent_id) if request.agent_id else None\n\n interface.clear()\n config = server.get_agent_config(user_id=user_id, agent_id=agent_id)\n return AgentConfigResponse(config=config)\n\n return router" }, { "identifier": "setup_agents_memory_router", "path": "memgpt/server/rest_api/agents/memory.py", "snippet": "def setup_agents_memory_router(server: SyncServer, interface: QueuingInterface):\n @router.get(\"/agents/memory\", tags=[\"agents\"], response_model=GetAgentMemoryResponse)\n def get_agent_memory(\n user_id: str = Query(..., description=\"The unique identifier of the user.\"),\n agent_id: str = Query(..., description=\"The unique identifier of the agent.\"),\n ):\n \"\"\"\n Retrieve the memory state of a specific agent.\n\n This endpoint fetches the current memory state of the agent identified by the user ID and agent ID.\n \"\"\"\n # Validate with the Pydantic model (optional)\n request = GetAgentMemoryRequest(user_id=user_id, agent_id=agent_id)\n\n # TODO remove once chatui adds user selection / pulls user from config\n request.user_id = None if request.user_id == \"null\" else request.user_id\n\n user_id = uuid.UUID(request.user_id) if request.user_id else None\n agent_id = uuid.UUID(request.agent_id) if request.agent_id else None\n\n interface.clear()\n memory = server.get_agent_memory(user_id=user_id, agent_id=agent_id)\n return GetAgentMemoryResponse(**memory)\n\n @router.post(\"/agents/memory\", tags=[\"agents\"], response_model=UpdateAgentMemoryResponse)\n def update_agent_memory(request: UpdateAgentMemoryRequest = Body(...)):\n \"\"\"\n Update the core memory of a specific agent.\n\n This endpoint accepts new memory contents (human and persona) and updates the core memory of the agent identified by the user ID and agent ID.\n \"\"\"\n # TODO remove once chatui adds user selection / pulls user from config\n request.user_id = None if request.user_id == \"null\" else request.user_id\n\n user_id = uuid.UUID(request.user_id) if request.user_id else None\n agent_id = uuid.UUID(request.agent_id) if request.agent_id else None\n\n interface.clear()\n\n new_memory_contents = {\"persona\": request.persona, \"human\": request.human}\n response = server.update_agent_core_memory(user_id=user_id, agent_id=agent_id, new_memory_contents=new_memory_contents)\n return UpdateAgentMemoryResponse(**response)\n\n return router" }, { "identifier": "setup_agents_message_router", "path": "memgpt/server/rest_api/agents/message.py", "snippet": "def setup_agents_message_router(server: SyncServer, interface: QueuingInterface):\n @router.get(\"/agents/message\", tags=[\"agents\"], response_model=GetAgentMessagesResponse)\n def get_agent_messages(\n user_id: str = Query(..., description=\"The unique identifier of the user.\"),\n agent_id: str = Query(..., description=\"The unique identifier of the agent.\"),\n start: int = Query(..., description=\"Message index to start on (reverse chronological).\"),\n count: int = Query(..., description=\"How many messages to retrieve.\"),\n ):\n \"\"\"\n Retrieve the in-context messages of a specific agent. Paginated, provide start and count to iterate.\n \"\"\"\n # Validate with the Pydantic model (optional)\n request = GetAgentMessagesRequest(user_id=user_id, agent_id=agent_id, start=start, count=count)\n\n # TODO remove once chatui adds user selection / pulls user from config\n request.user_id = None if request.user_id == \"null\" else request.user_id\n\n user_id = uuid.UUID(request.user_id) if request.user_id else None\n agent_id = uuid.UUID(request.agent_id) if request.agent_id else None\n\n interface.clear()\n messages = server.get_agent_messages(user_id=user_id, agent_id=agent_id, start=request.start, count=request.count)\n return GetAgentMessagesResponse(messages=messages)\n\n @router.post(\"/agents/message\", tags=[\"agents\"], response_model=UserMessageResponse)\n async def send_message(request: UserMessageRequest = Body(...)):\n \"\"\"\n Process a user message and return the agent's response.\n\n This endpoint accepts a message from a user and processes it through the agent.\n It can optionally stream the response if 'stream' is set to True.\n \"\"\"\n # TODO remove once chatui adds user selection / pulls user from config\n request.user_id = None if request.user_id == \"null\" else request.user_id\n\n user_id = uuid.UUID(request.user_id) if request.user_id else None\n agent_id = uuid.UUID(request.agent_id) if request.agent_id else None\n\n if request.role == \"user\" or request.role is None:\n message_func = server.user_message\n elif request.role == \"system\":\n message_func = server.system_message\n else:\n raise HTTPException(status_code=500, detail=f\"Bad role {request.role}\")\n\n if request.stream:\n # For streaming response\n try:\n # Start the generation process (similar to the non-streaming case)\n # This should be a non-blocking call or run in a background task\n # Check if server.user_message is an async function\n if asyncio.iscoroutinefunction(message_func):\n # Start the async task\n await asyncio.create_task(message_func(user_id=user_id, agent_id=agent_id, message=request.message))\n else:\n\n def handle_exception(exception_loop: AbstractEventLoop, context):\n # context[\"message\"] will always be there; but context[\"exception\"] may not\n error = context.get(\"exception\") or context[\"message\"]\n print(f\"handling asyncio exception {context}\")\n interface.error(str(error))\n\n # Run the synchronous function in a thread pool\n loop = asyncio.get_event_loop()\n loop.set_exception_handler(handle_exception)\n loop.run_in_executor(None, message_func, user_id, agent_id, request.message)\n\n async def formatted_message_generator():\n async for message in interface.message_generator():\n formatted_message = f\"data: {json.dumps(message)}\\n\\n\"\n yield formatted_message\n await asyncio.sleep(1)\n\n # Return the streaming response using the generator\n return StreamingResponse(formatted_message_generator(), media_type=\"text/event-stream\")\n except HTTPException:\n raise\n except Exception as e:\n raise HTTPException(status_code=500, detail=f\"{e}\")\n\n else:\n interface.clear()\n try:\n message_func(user_id=user_id, agent_id=agent_id, message=request.message)\n except HTTPException:\n raise\n except Exception as e:\n raise HTTPException(status_code=500, detail=str(e))\n return UserMessageResponse(messages=interface.to_list())\n\n return router" }, { "identifier": "setup_config_index_router", "path": "memgpt/server/rest_api/config/index.py", "snippet": "def setup_config_index_router(server: SyncServer, interface: QueuingInterface):\n @router.get(\"/config\", tags=[\"config\"], response_model=ConfigResponse)\n def get_server_config(user_id: str = Query(..., description=\"Unique identifier of the user requesting the config.\")):\n \"\"\"\n Retrieve the base configuration for the server.\n \"\"\"\n request = ConfigRequest(user_id=user_id)\n\n # TODO remove once chatui adds user selection / pulls user from config\n request.user_id = None if request.user_id == \"null\" else request.user_id\n\n user_id = uuid.UUID(request.user_id) if request.user_id else None\n\n interface.clear()\n response = server.get_server_config(user_id=user_id)\n return ConfigResponse(config=response)\n\n return router" }, { "identifier": "SyncServer", "path": "memgpt/server/server.py", "snippet": "class SyncServer(LockingServer):\n \"\"\"Simple single-threaded / blocking server process\"\"\"\n\n def __init__(\n self,\n chaining: bool = True,\n max_chaining_steps: bool = None,\n # default_interface_cls: AgentInterface = CLIInterface,\n default_interface: AgentInterface = CLIInterface(),\n # default_persistence_manager_cls: PersistenceManager = LocalStateManager,\n ):\n \"\"\"Server process holds in-memory agents that are being run\"\"\"\n\n # List of {'user_id': user_id, 'agent_id': agent_id, 'agent': agent_obj} dicts\n self.active_agents = []\n\n # chaining = whether or not to run again if request_heartbeat=true\n self.chaining = chaining\n\n # if chaining == true, what's the max number of times we'll chain before yielding?\n # none = no limit, can go on forever\n self.max_chaining_steps = max_chaining_steps\n\n # The default interface that will get assigned to agents ON LOAD\n # self.default_interface_cls = default_interface_cls\n self.default_interface = default_interface\n\n # The default persistence manager that will get assigned to agents ON CREATION\n # self.default_persistence_manager_cls = default_persistence_manager_cls\n\n # Initialize the connection to the DB\n self.config = MemGPTConfig.load()\n\n # TODO figure out how to handle credentials for the server\n self.credentials = MemGPTCredentials.load()\n\n # Ensure valid database configuration\n # TODO: add back once tests are matched\n # assert (\n # self.config.metadata_storage_type == \"postgres\"\n # ), f\"Invalid metadata_storage_type for server: {self.config.metadata_storage_type}\"\n # assert (\n # self.config.archival_storage_type == \"postgres\"\n # ), f\"Invalid archival_storage_type for server: {self.config.archival_storage_type}\"\n # assert self.config.recall_storage_type == \"postgres\", f\"Invalid recall_storage_type for server: {self.config.recall_storage_type}\"\n\n # Generate default LLM/Embedding configs for the server\n # TODO: we may also want to do the same thing with default persona/human/etc.\n self.server_llm_config = LLMConfig(\n model=self.config.default_llm_config.model,\n model_endpoint_type=self.config.default_llm_config.model_endpoint_type,\n model_endpoint=self.config.default_llm_config.model_endpoint,\n model_wrapper=self.config.default_llm_config.model_wrapper,\n context_window=self.config.default_llm_config.context_window,\n # openai_key=self.credentials.openai_key,\n # azure_key=self.credentials.azure_key,\n # azure_endpoint=self.credentials.azure_endpoint,\n # azure_version=self.credentials.azure_version,\n # azure_deployment=self.credentials.azure_deployment,\n )\n self.server_embedding_config = EmbeddingConfig(\n embedding_endpoint_type=self.config.default_embedding_config.embedding_endpoint_type,\n embedding_endpoint=self.config.default_embedding_config.embedding_endpoint,\n embedding_dim=self.config.default_embedding_config.embedding_dim,\n # openai_key=self.credentials.openai_key,\n )\n\n # Initialize the metadata store\n self.ms = MetadataStore(self.config)\n\n # NOTE: removed, since server should be multi-user\n ## Create the default user\n # base_user_id = uuid.UUID(self.config.anon_clientid)\n # if not self.ms.get_user(user_id=base_user_id):\n # base_user = User(id=base_user_id)\n # self.ms.create_user(base_user)\n\n def save_agents(self):\n \"\"\"Saves all the agents that are in the in-memory object store\"\"\"\n for agent_d in self.active_agents:\n try:\n # agent_d[\"agent\"].save()\n save_agent(agent_d[\"agent\"], self.ms)\n logger.info(f\"Saved agent {agent_d['agent_id']}\")\n except Exception as e:\n logger.exception(f\"Error occurred while trying to save agent {agent_d['agent_id']}:\\n{e}\")\n\n def _get_agent(self, user_id: uuid.UUID, agent_id: uuid.UUID) -> Union[Agent, None]:\n \"\"\"Get the agent object from the in-memory object store\"\"\"\n for d in self.active_agents:\n if d[\"user_id\"] == str(user_id) and d[\"agent_id\"] == str(agent_id):\n return d[\"agent\"]\n return None\n\n def _add_agent(self, user_id: uuid.UUID, agent_id: uuid.UUID, agent_obj: Agent) -> None:\n \"\"\"Put an agent object inside the in-memory object store\"\"\"\n # Make sure the agent doesn't already exist\n if self._get_agent(user_id=user_id, agent_id=agent_id) is not None:\n # Can be triggered on concucrent request, so don't throw a full error\n # raise KeyError(f\"Agent (user={user_id}, agent={agent_id}) is already loaded\")\n logger.exception(f\"Agent (user={user_id}, agent={agent_id}) is already loaded\")\n return\n # Add Agent instance to the in-memory list\n self.active_agents.append(\n {\n \"user_id\": str(user_id),\n \"agent_id\": str(agent_id),\n \"agent\": agent_obj,\n }\n )\n\n def _load_agent(self, user_id: uuid.UUID, agent_id: uuid.UUID, interface: Union[AgentInterface, None] = None) -> Agent:\n \"\"\"Loads a saved agent into memory (if it doesn't exist, throw an error)\"\"\"\n assert isinstance(user_id, uuid.UUID), user_id\n assert isinstance(agent_id, uuid.UUID), agent_id\n\n # If an interface isn't specified, use the default\n if interface is None:\n interface = self.default_interface\n\n try:\n logger.info(f\"Grabbing agent user_id={user_id} agent_id={agent_id} from database\")\n agent_state = self.ms.get_agent(agent_id=agent_id, user_id=user_id)\n if not agent_state:\n logger.exception(f\"agent_id {agent_id} does not exist\")\n raise ValueError(f\"agent_id {agent_id} does not exist\")\n # print(f\"server._load_agent :: load got agent state {agent_id}, messages = {agent_state.state['messages']}\")\n\n # Instantiate an agent object using the state retrieved\n logger.info(f\"Creating an agent object\")\n memgpt_agent = Agent(agent_state=agent_state, interface=interface)\n\n # Add the agent to the in-memory store and return its reference\n logger.info(f\"Adding agent to the agent cache: user_id={user_id}, agent_id={agent_id}\")\n self._add_agent(user_id=user_id, agent_id=agent_id, agent_obj=memgpt_agent)\n return memgpt_agent\n\n except Exception as e:\n logger.exception(f\"Error occurred while trying to get agent {agent_id}:\\n{e}\")\n raise\n\n def _get_or_load_agent(self, user_id: uuid.UUID, agent_id: uuid.UUID) -> Agent:\n \"\"\"Check if the agent is in-memory, then load\"\"\"\n logger.info(f\"Checking for agent user_id={user_id} agent_id={agent_id}\")\n memgpt_agent = self._get_agent(user_id=user_id, agent_id=agent_id)\n if not memgpt_agent:\n logger.info(f\"Agent not loaded, loading agent user_id={user_id} agent_id={agent_id}\")\n memgpt_agent = self._load_agent(user_id=user_id, agent_id=agent_id)\n return memgpt_agent\n\n def _step(self, user_id: uuid.UUID, agent_id: uuid.UUID, input_message: str) -> None:\n \"\"\"Send the input message through the agent\"\"\"\n\n logger.debug(f\"Got input message: {input_message}\")\n\n # Get the agent object (loaded in memory)\n memgpt_agent = self._get_or_load_agent(user_id=user_id, agent_id=agent_id)\n if memgpt_agent is None:\n raise KeyError(f\"Agent (user={user_id}, agent={agent_id}) is not loaded\")\n\n logger.debug(f\"Starting agent step\")\n no_verify = True\n next_input_message = input_message\n counter = 0\n while True:\n new_messages, heartbeat_request, function_failed, token_warning = memgpt_agent.step(\n next_input_message, first_message=False, skip_verify=no_verify\n )\n counter += 1\n\n # Chain stops\n if not self.chaining:\n logger.debug(\"No chaining, stopping after one step\")\n break\n elif self.max_chaining_steps is not None and counter > self.max_chaining_steps:\n logger.debug(f\"Hit max chaining steps, stopping after {counter} steps\")\n break\n # Chain handlers\n elif token_warning:\n next_input_message = system.get_token_limit_warning()\n continue # always chain\n elif function_failed:\n next_input_message = system.get_heartbeat(constants.FUNC_FAILED_HEARTBEAT_MESSAGE)\n continue # always chain\n elif heartbeat_request:\n next_input_message = system.get_heartbeat(constants.REQ_HEARTBEAT_MESSAGE)\n continue # always chain\n # MemGPT no-op / yield\n else:\n break\n\n memgpt_agent.interface.step_yield()\n logger.debug(f\"Finished agent step\")\n\n def _command(self, user_id: uuid.UUID, agent_id: uuid.UUID, command: str) -> Union[str, None]:\n \"\"\"Process a CLI command\"\"\"\n\n logger.debug(f\"Got command: {command}\")\n\n # Get the agent object (loaded in memory)\n memgpt_agent = self._get_or_load_agent(user_id=user_id, agent_id=agent_id)\n # print(\"AGENT\", memgpt_agent.agent_state.id, memgpt_agent.agent_state.user_id)\n\n if command.lower() == \"exit\":\n # exit not supported on server.py\n raise ValueError(command)\n\n elif command.lower() == \"save\" or command.lower() == \"savechat\":\n save_agent(memgpt_agent, self.ms)\n\n elif command.lower() == \"attach\":\n # Different from CLI, we extract the data source name from the command\n command = command.strip().split()\n try:\n data_source = int(command[1])\n except:\n raise ValueError(command)\n\n # TODO: check if agent already has it\n data_source_options = StorageConnector.list_loaded_data()\n if len(data_source_options) == 0:\n raise ValueError('No sources available. You must load a souce with \"memgpt load ...\" before running /attach.')\n elif data_source not in data_source_options:\n raise ValueError(f\"Invalid data source name: {data_source} (options={data_source_options})\")\n else:\n # attach new data\n attach(memgpt_agent.agent_state.name, data_source)\n\n # update agent config\n memgpt_agent.agent_state.attach_data_source(data_source)\n\n # reload agent with new data source\n # TODO: maybe make this less ugly...\n memgpt_agent.persistence_manager.archival_memory.storage = StorageConnector.get_storage_connector(\n agent_config=memgpt_agent.agent_state\n )\n\n elif command.lower() == \"dump\" or command.lower().startswith(\"dump \"):\n # Check if there's an additional argument that's an integer\n command = command.strip().split()\n amount = int(command[1]) if len(command) > 1 and command[1].isdigit() else 0\n if amount == 0:\n memgpt_agent.interface.print_messages(memgpt_agent.messages, dump=True)\n else:\n memgpt_agent.interface.print_messages(memgpt_agent.messages[-min(amount, len(memgpt_agent.messages)) :], dump=True)\n\n elif command.lower() == \"dumpraw\":\n memgpt_agent.interface.print_messages_raw(memgpt_agent.messages)\n\n elif command.lower() == \"memory\":\n ret_str = (\n f\"\\nDumping memory contents:\\n\"\n + f\"\\n{str(memgpt_agent.memory)}\"\n + f\"\\n{str(memgpt_agent.persistence_manager.archival_memory)}\"\n + f\"\\n{str(memgpt_agent.persistence_manager.recall_memory)}\"\n )\n return ret_str\n\n elif command.lower() == \"pop\" or command.lower().startswith(\"pop \"):\n # Check if there's an additional argument that's an integer\n command = command.strip().split()\n pop_amount = int(command[1]) if len(command) > 1 and command[1].isdigit() else 3\n n_messages = len(memgpt_agent.messages)\n MIN_MESSAGES = 2\n if n_messages <= MIN_MESSAGES:\n logger.info(f\"Agent only has {n_messages} messages in stack, none left to pop\")\n elif n_messages - pop_amount < MIN_MESSAGES:\n logger.info(f\"Agent only has {n_messages} messages in stack, cannot pop more than {n_messages - MIN_MESSAGES}\")\n else:\n logger.info(f\"Popping last {pop_amount} messages from stack\")\n for _ in range(min(pop_amount, len(memgpt_agent.messages))):\n memgpt_agent.messages.pop()\n\n elif command.lower() == \"retry\":\n # TODO this needs to also modify the persistence manager\n logger.info(f\"Retrying for another answer\")\n while len(memgpt_agent.messages) > 0:\n if memgpt_agent.messages[-1].get(\"role\") == \"user\":\n # we want to pop up to the last user message and send it again\n user_message = memgpt_agent.messages[-1].get(\"content\")\n memgpt_agent.messages.pop()\n break\n memgpt_agent.messages.pop()\n\n elif command.lower() == \"rethink\" or command.lower().startswith(\"rethink \"):\n # TODO this needs to also modify the persistence manager\n if len(command) < len(\"rethink \"):\n logger.warning(\"Missing text after the command\")\n else:\n for x in range(len(memgpt_agent.messages) - 1, 0, -1):\n if memgpt_agent.messages[x].get(\"role\") == \"assistant\":\n text = command[len(\"rethink \") :].strip()\n memgpt_agent.messages[x].update({\"content\": text})\n break\n\n elif command.lower() == \"rewrite\" or command.lower().startswith(\"rewrite \"):\n # TODO this needs to also modify the persistence manager\n if len(command) < len(\"rewrite \"):\n logger.warning(\"Missing text after the command\")\n else:\n for x in range(len(memgpt_agent.messages) - 1, 0, -1):\n if memgpt_agent.messages[x].get(\"role\") == \"assistant\":\n text = command[len(\"rewrite \") :].strip()\n args = json.loads(memgpt_agent.messages[x].get(\"function_call\").get(\"arguments\"))\n args[\"message\"] = text\n memgpt_agent.messages[x].get(\"function_call\").update(\n {\"arguments\": json.dumps(args, ensure_ascii=constants.JSON_ENSURE_ASCII)}\n )\n break\n\n # No skip options\n elif command.lower() == \"wipe\":\n # exit not supported on server.py\n raise ValueError(command)\n\n elif command.lower() == \"heartbeat\":\n input_message = system.get_heartbeat()\n self._step(user_id=user_id, agent_id=agent_id, input_message=input_message)\n\n elif command.lower() == \"memorywarning\":\n input_message = system.get_token_limit_warning()\n self._step(user_id=user_id, agent_id=agent_id, input_message=input_message)\n\n @LockingServer.agent_lock_decorator\n def user_message(self, user_id: uuid.UUID, agent_id: uuid.UUID, message: str) -> None:\n \"\"\"Process an incoming user message and feed it through the MemGPT agent\"\"\"\n if self.ms.get_user(user_id=user_id) is None:\n raise ValueError(f\"User user_id={user_id} does not exist\")\n if self.ms.get_agent(agent_id=agent_id, user_id=user_id) is None:\n raise ValueError(f\"Agent agent_id={agent_id} does not exist\")\n\n # Basic input sanitization\n if not isinstance(message, str) or len(message) == 0:\n raise ValueError(f\"Invalid input: '{message}'\")\n\n # If the input begins with a command prefix, reject\n elif message.startswith(\"/\"):\n raise ValueError(f\"Invalid input: '{message}'\")\n\n # Else, process it as a user message to be fed to the agent\n else:\n # Package the user message first\n packaged_user_message = system.package_user_message(user_message=message)\n # Run the agent state forward\n self._step(user_id=user_id, agent_id=agent_id, input_message=packaged_user_message)\n\n @LockingServer.agent_lock_decorator\n def system_message(self, user_id: uuid.UUID, agent_id: uuid.UUID, message: str) -> None:\n \"\"\"Process an incoming system message and feed it through the MemGPT agent\"\"\"\n if self.ms.get_user(user_id=user_id) is None:\n raise ValueError(f\"User user_id={user_id} does not exist\")\n if self.ms.get_agent(agent_id=agent_id, user_id=user_id) is None:\n raise ValueError(f\"Agent agent_id={agent_id} does not exist\")\n\n # Basic input sanitization\n if not isinstance(message, str) or len(message) == 0:\n raise ValueError(f\"Invalid input: '{message}'\")\n\n # If the input begins with a command prefix, reject\n elif message.startswith(\"/\"):\n raise ValueError(f\"Invalid input: '{message}'\")\n\n # Else, process it as a user message to be fed to the agent\n else:\n # Package the user message first\n packaged_system_message = system.package_system_message(system_message=message)\n # Run the agent state forward\n self._step(user_id=user_id, agent_id=agent_id, input_message=packaged_system_message)\n\n @LockingServer.agent_lock_decorator\n def run_command(self, user_id: uuid.UUID, agent_id: uuid.UUID, command: str) -> Union[str, None]:\n \"\"\"Run a command on the agent\"\"\"\n if self.ms.get_user(user_id=user_id) is None:\n raise ValueError(f\"User user_id={user_id} does not exist\")\n if self.ms.get_agent(agent_id=agent_id, user_id=user_id) is None:\n raise ValueError(f\"Agent agent_id={agent_id} does not exist\")\n\n # If the input begins with a command prefix, attempt to process it as a command\n if command.startswith(\"/\"):\n if len(command) > 1:\n command = command[1:] # strip the prefix\n return self._command(user_id=user_id, agent_id=agent_id, command=command)\n\n def create_user(\n self,\n user_config: Optional[Union[dict, User]] = {},\n ):\n \"\"\"Create a new user using a config\"\"\"\n if not isinstance(user_config, dict):\n raise ValueError(f\"user_config must be provided as a dictionary\")\n\n user = User(\n id=user_config[\"id\"] if \"id\" in user_config else None,\n default_preset=user_config[\"default_preset\"] if \"default_preset\" in user_config else \"memgpt_chat\",\n default_persona=user_config[\"default_persona\"] if \"default_persona\" in user_config else constants.DEFAULT_PERSONA,\n default_human=user_config[\"default_human\"] if \"default_human\" in user_config else constants.DEFAULT_HUMAN,\n )\n self.ms.create_user(user)\n logger.info(f\"Created new user from config: {user}\")\n return user\n\n def create_agent(\n self,\n user_id: uuid.UUID,\n agent_config: Union[dict, AgentState],\n interface: Union[AgentInterface, None] = None,\n # persistence_manager: Union[PersistenceManager, None] = None,\n ) -> AgentState:\n \"\"\"Create a new agent using a config\"\"\"\n if self.ms.get_user(user_id=user_id) is None:\n raise ValueError(f\"User user_id={user_id} does not exist\")\n\n # Initialize the agent based on the provided configuration\n if not isinstance(agent_config, dict):\n raise ValueError(f\"agent_config must be provided as a dictionary\")\n\n if interface is None:\n # interface = self.default_interface_cls()\n interface = self.default_interface\n\n # if persistence_manager is None:\n # persistence_manager = self.default_persistence_manager_cls(agent_config=agent_config)\n\n logger.debug(f\"Attempting to find user: {user_id}\")\n user = self.ms.get_user(user_id=user_id)\n if not user:\n raise ValueError(f\"cannot find user with associated client id: {user_id}\")\n\n agent_state = AgentState(\n user_id=user.id,\n name=agent_config[\"name\"] if \"name\" in agent_config else utils.create_random_username(),\n preset=agent_config[\"preset\"] if \"preset\" in agent_config else user.default_preset,\n # TODO we need to allow passing raw persona/human text via the server request\n persona=agent_config[\"persona\"] if \"persona\" in agent_config else user.default_persona,\n human=agent_config[\"human\"] if \"human\" in agent_config else user.default_human,\n llm_config=agent_config[\"llm_config\"] if \"llm_config\" in agent_config else self.server_llm_config,\n embedding_config=agent_config[\"embedding_config\"] if \"embedding_config\" in agent_config else self.server_embedding_config,\n )\n # NOTE: you MUST add to the metadata store before creating the agent, otherwise the storage connectors will error on creation\n # TODO: fix this db dependency and remove\n self.ms.create_agent(agent_state)\n\n logger.debug(f\"Attempting to create agent from agent_state:\\n{agent_state}\")\n try:\n agent = presets.create_agent_from_preset(agent_state=agent_state, interface=interface)\n\n # FIXME: this is a hacky way to get the system prompts injected into agent into the DB\n # self.ms.update_agent(agent.agent_state)\n except Exception as e:\n logger.exception(e)\n self.ms.delete_agent(agent_id=agent_state.id)\n raise\n\n save_agent(agent, self.ms)\n\n logger.info(f\"Created new agent from config: {agent}\")\n\n return agent.agent_state\n\n def delete_agent(\n self,\n user_id: uuid.UUID,\n agent_id: uuid.UUID,\n ):\n if self.ms.get_user(user_id=user_id) is None:\n raise ValueError(f\"User user_id={user_id} does not exist\")\n if self.ms.get_agent(agent_id=agent_id, user_id=user_id) is None:\n raise ValueError(f\"Agent agent_id={agent_id} does not exist\")\n\n # TODO: Make sure the user owns the agent\n agent = self.ms.get_agent(agent_id=agent_id, user_id=user_id)\n if agent is not None:\n self.ms.delete_agent(agent_id=agent_id)\n\n def list_agents(self, user_id: uuid.UUID) -> dict:\n \"\"\"List all available agents to a user\"\"\"\n if self.ms.get_user(user_id=user_id) is None:\n raise ValueError(f\"User user_id={user_id} does not exist\")\n\n agents_states = self.ms.list_agents(user_id=user_id)\n logger.info(f\"Retrieved {len(agents_states)} agents for user {user_id}:\\n{[vars(s) for s in agents_states]}\")\n return {\n \"num_agents\": len(agents_states),\n \"agents\": [\n {\n \"id\": state.id,\n \"name\": state.name,\n \"human\": state.human,\n \"persona\": state.persona,\n \"created_at\": state.created_at.isoformat(),\n }\n for state in agents_states\n ],\n }\n\n def get_agent(self, user_id: uuid.UUID, agent_id: uuid.UUID):\n \"\"\"Get the agent state\"\"\"\n return self.ms.get_agent(agent_id=agent_id, user_id=user_id)\n\n def get_user(self, user_id: uuid.UUID) -> User:\n \"\"\"Get the user\"\"\"\n return self.ms.get_user(user_id=user_id)\n\n def get_agent_memory(self, user_id: uuid.UUID, agent_id: uuid.UUID) -> dict:\n \"\"\"Return the memory of an agent (core memory + non-core statistics)\"\"\"\n if self.ms.get_user(user_id=user_id) is None:\n raise ValueError(f\"User user_id={user_id} does not exist\")\n if self.ms.get_agent(agent_id=agent_id, user_id=user_id) is None:\n raise ValueError(f\"Agent agent_id={agent_id} does not exist\")\n\n # Get the agent object (loaded in memory)\n memgpt_agent = self._get_or_load_agent(user_id=user_id, agent_id=agent_id)\n\n core_memory = memgpt_agent.memory\n recall_memory = memgpt_agent.persistence_manager.recall_memory\n archival_memory = memgpt_agent.persistence_manager.archival_memory\n\n memory_obj = {\n \"core_memory\": {\n \"persona\": core_memory.persona,\n \"human\": core_memory.human,\n },\n \"recall_memory\": len(recall_memory) if recall_memory is not None else None,\n \"archival_memory\": len(archival_memory) if archival_memory is not None else None,\n }\n\n return memory_obj\n\n def get_in_context_message_ids(self, user_id: uuid.UUID, agent_id: uuid.UUID) -> List[uuid.UUID]:\n \"\"\"Get the message ids of the in-context messages in the agent's memory\"\"\"\n # Get the agent object (loaded in memory)\n memgpt_agent = self._get_or_load_agent(user_id=user_id, agent_id=agent_id)\n return [m.id for m in memgpt_agent._messages]\n\n def get_agent_messages(self, user_id: uuid.UUID, agent_id: uuid.UUID, start: int, count: int) -> list:\n \"\"\"Paginated query of all messages in agent message queue\"\"\"\n if self.ms.get_user(user_id=user_id) is None:\n raise ValueError(f\"User user_id={user_id} does not exist\")\n if self.ms.get_agent(agent_id=agent_id, user_id=user_id) is None:\n raise ValueError(f\"Agent agent_id={agent_id} does not exist\")\n\n # Get the agent object (loaded in memory)\n memgpt_agent = self._get_or_load_agent(user_id=user_id, agent_id=agent_id)\n\n if start < 0 or count < 0:\n raise ValueError(\"Start and count values should be non-negative\")\n\n if start + count < len(memgpt_agent._messages): # messages can be returned from whats in memory\n # Reverse the list to make it in reverse chronological order\n reversed_messages = memgpt_agent._messages[::-1]\n # Check if start is within the range of the list\n if start >= len(reversed_messages):\n raise IndexError(\"Start index is out of range\")\n\n # Calculate the end index, ensuring it does not exceed the list length\n end_index = min(start + count, len(reversed_messages))\n\n # Slice the list for pagination\n messages = reversed_messages[start:end_index]\n\n else:\n # need to access persistence manager for additional messages\n db_iterator = memgpt_agent.persistence_manager.recall_memory.storage.get_all_paginated(page_size=count, offset=start)\n\n # get a single page of messages\n # TODO: handle stop iteration\n page = next(db_iterator, [])\n\n # return messages in reverse chronological order\n messages = sorted(page, key=lambda x: x.created_at, reverse=True)\n\n # convert to json\n json_messages = [vars(record) for record in messages]\n return json_messages\n\n def get_agent_archival(self, user_id: uuid.UUID, agent_id: uuid.UUID, start: int, count: int) -> list:\n \"\"\"Paginated query of all messages in agent archival memory\"\"\"\n if self.ms.get_user(user_id=user_id) is None:\n raise ValueError(f\"User user_id={user_id} does not exist\")\n if self.ms.get_agent(agent_id=agent_id, user_id=user_id) is None:\n raise ValueError(f\"Agent agent_id={agent_id} does not exist\")\n\n # Get the agent object (loaded in memory)\n memgpt_agent = self._get_or_load_agent(user_id=user_id, agent_id=agent_id)\n\n # iterate over records\n db_iterator = memgpt_agent.persistence_manager.archival_memory.storage.get_all_paginated(page_size=count, offset=start)\n\n # get a single page of messages\n page = next(db_iterator, [])\n json_passages = [vars(record) for record in page]\n return json_passages\n\n def get_agent_archival_cursor(\n self,\n user_id: uuid.UUID,\n agent_id: uuid.UUID,\n after: Optional[uuid.UUID] = None,\n before: Optional[uuid.UUID] = None,\n limit: Optional[int] = 100,\n order_by: Optional[str] = \"created_at\",\n reverse: Optional[bool] = False,\n ):\n if self.ms.get_user(user_id=user_id) is None:\n raise ValueError(f\"User user_id={user_id} does not exist\")\n if self.ms.get_agent(agent_id=agent_id, user_id=user_id) is None:\n raise ValueError(f\"Agent agent_id={agent_id} does not exist\")\n\n # Get the agent object (loaded in memory)\n memgpt_agent = self._get_or_load_agent(user_id=user_id, agent_id=agent_id)\n\n # iterate over recorde\n cursor, records = memgpt_agent.persistence_manager.archival_memory.storage.get_all_cursor(\n after=after, before=before, limit=limit, order_by=order_by, reverse=reverse\n )\n json_records = [vars(record) for record in records]\n return cursor, json_records\n\n def get_agent_recall_cursor(\n self,\n user_id: uuid.UUID,\n agent_id: uuid.UUID,\n after: Optional[uuid.UUID] = None,\n before: Optional[uuid.UUID] = None,\n limit: Optional[int] = 100,\n order_by: Optional[str] = \"created_at\",\n reverse: Optional[bool] = False,\n ):\n if self.ms.get_user(user_id=user_id) is None:\n raise ValueError(f\"User user_id={user_id} does not exist\")\n if self.ms.get_agent(agent_id=agent_id, user_id=user_id) is None:\n raise ValueError(f\"Agent agent_id={agent_id} does not exist\")\n\n # Get the agent object (loaded in memory)\n memgpt_agent = self._get_or_load_agent(user_id=user_id, agent_id=agent_id)\n\n # iterate over records\n cursor, records = memgpt_agent.persistence_manager.recall_memory.storage.get_all_cursor(\n after=after, before=before, limit=limit, order_by=order_by, reverse=reverse\n )\n json_records = [vars(record) for record in records]\n\n # TODO: mark what is in-context versus not\n return cursor, json_records\n\n def get_agent_config(self, user_id: uuid.UUID, agent_id: uuid.UUID) -> dict:\n \"\"\"Return the config of an agent\"\"\"\n if self.ms.get_user(user_id=user_id) is None:\n raise ValueError(f\"User user_id={user_id} does not exist\")\n if self.ms.get_agent(agent_id=agent_id, user_id=user_id) is None:\n raise ValueError(f\"Agent agent_id={agent_id} does not exist\")\n\n # Get the agent object (loaded in memory)\n memgpt_agent = self._get_or_load_agent(user_id=user_id, agent_id=agent_id)\n agent_config = vars(memgpt_agent.agent_state)\n\n return agent_config\n\n def get_server_config(self) -> dict:\n \"\"\"Return the base config\"\"\"\n # TODO: do we need a seperate server config?\n base_config = vars(self.config)\n\n def clean_keys(config):\n config_copy = config.copy()\n for k, v in config.items():\n if k == \"key\" or \"_key\" in k:\n config_copy[k] = server_utils.shorten_key_middle(v, chars_each_side=5)\n return config_copy\n\n clean_base_config = clean_keys(base_config)\n return clean_base_config\n\n def update_agent_core_memory(self, user_id: uuid.UUID, agent_id: uuid.UUID, new_memory_contents: dict) -> dict:\n \"\"\"Update the agents core memory block, return the new state\"\"\"\n if self.ms.get_user(user_id=user_id) is None:\n raise ValueError(f\"User user_id={user_id} does not exist\")\n if self.ms.get_agent(agent_id=agent_id, user_id=user_id) is None:\n raise ValueError(f\"Agent agent_id={agent_id} does not exist\")\n\n # Get the agent object (loaded in memory)\n memgpt_agent = self._get_or_load_agent(user_id=user_id, agent_id=agent_id)\n\n old_core_memory = self.get_agent_memory(user_id=user_id, agent_id=agent_id)[\"core_memory\"]\n new_core_memory = old_core_memory.copy()\n\n modified = False\n if \"persona\" in new_memory_contents and new_memory_contents[\"persona\"] is not None:\n new_persona = new_memory_contents[\"persona\"]\n if old_core_memory[\"persona\"] != new_persona:\n new_core_memory[\"persona\"] = new_persona\n memgpt_agent.memory.edit_persona(new_persona)\n modified = True\n\n if \"human\" in new_memory_contents and new_memory_contents[\"human\"] is not None:\n new_human = new_memory_contents[\"human\"]\n if old_core_memory[\"human\"] != new_human:\n new_core_memory[\"human\"] = new_human\n memgpt_agent.memory.edit_human(new_human)\n modified = True\n\n # If we modified the memory contents, we need to rebuild the memory block inside the system message\n if modified:\n memgpt_agent.rebuild_memory()\n\n return {\n \"old_core_memory\": old_core_memory,\n \"new_core_memory\": new_core_memory,\n \"modified\": modified,\n }" }, { "identifier": "QueuingInterface", "path": "memgpt/server/rest_api/interface.py", "snippet": "class QueuingInterface(AgentInterface):\n \"\"\"Messages are queued inside an internal buffer and manually flushed\"\"\"\n\n def __init__(self, debug=True):\n self.buffer = queue.Queue()\n self.debug = debug\n\n def to_list(self):\n \"\"\"Convert queue to a list (empties it out at the same time)\"\"\"\n items = []\n while not self.buffer.empty():\n try:\n items.append(self.buffer.get_nowait())\n except queue.Empty:\n break\n if len(items) > 1 and items[-1] == \"STOP\":\n items.pop()\n return items\n\n def clear(self):\n \"\"\"Clear all messages from the queue.\"\"\"\n with self.buffer.mutex:\n # Empty the queue\n self.buffer.queue.clear()\n\n async def message_generator(self):\n while True:\n if not self.buffer.empty():\n message = self.buffer.get()\n if message == \"STOP\":\n break\n yield message | {\"date\": datetime.now(tz=pytz.utc).isoformat()}\n else:\n await asyncio.sleep(0.1) # Small sleep to prevent a busy loop\n\n def step_yield(self):\n \"\"\"Enqueue a special stop message\"\"\"\n self.buffer.put(\"STOP\")\n\n def error(self, error: str):\n \"\"\"Enqueue a special stop message\"\"\"\n self.buffer.put({\"internal_error\": error})\n self.buffer.put(\"STOP\")\n\n def user_message(self, msg: str):\n \"\"\"Handle reception of a user message\"\"\"\n pass\n\n def internal_monologue(self, msg: str) -> None:\n \"\"\"Handle the agent's internal monologue\"\"\"\n if self.debug:\n print(msg)\n self.buffer.put({\"internal_monologue\": msg})\n\n def assistant_message(self, msg: str) -> None:\n \"\"\"Handle the agent sending a message\"\"\"\n if self.debug:\n print(msg)\n self.buffer.put({\"assistant_message\": msg})\n\n def function_message(self, msg: str) -> None:\n \"\"\"Handle the agent calling a function\"\"\"\n if self.debug:\n print(msg)\n\n if msg.startswith(\"Running \"):\n msg = msg.replace(\"Running \", \"\")\n self.buffer.put({\"function_call\": msg})\n\n elif msg.startswith(\"Success: \"):\n msg = msg.replace(\"Success: \", \"\")\n self.buffer.put({\"function_return\": msg, \"status\": \"success\"})\n\n elif msg.startswith(\"Error: \"):\n msg = msg.replace(\"Error: \", \"\")\n self.buffer.put({\"function_return\": msg, \"status\": \"error\"})\n\n else:\n # NOTE: generic, should not happen\n self.buffer.put({\"function_message\": msg})" }, { "identifier": "mount_static_files", "path": "memgpt/server/rest_api/static_files.py", "snippet": "def mount_static_files(app: FastAPI):\n app.mount(\n \"/\",\n SPAStaticFiles(\n directory=os.path.join(os.getcwd(), \"memgpt\", \"server\", \"static_files\"),\n html=True,\n ),\n name=\"spa-static-files\",\n )" } ]
import json from contextlib import asynccontextmanager from fastapi import FastAPI from starlette.middleware.cors import CORSMiddleware from memgpt.constants import JSON_ENSURE_ASCII from memgpt.server.rest_api.agents.index import setup_agents_index_router from memgpt.server.rest_api.agents.command import setup_agents_command_router from memgpt.server.rest_api.agents.config import setup_agents_config_router from memgpt.server.rest_api.agents.memory import setup_agents_memory_router from memgpt.server.rest_api.agents.message import setup_agents_message_router from memgpt.server.rest_api.config.index import setup_config_index_router from memgpt.server.server import SyncServer from memgpt.server.rest_api.interface import QueuingInterface from memgpt.server.rest_api.static_files import mount_static_files
11,922
""" Basic REST API sitting on top of the internal MemGPT python server (SyncServer) Start the server with: cd memgpt/server/rest_api poetry run uvicorn server:app --reload """ interface: QueuingInterface = QueuingInterface() server: SyncServer = SyncServer(default_interface=interface) API_PREFIX = "/api" CORS_ORIGINS = [ "http://localhost:4200", "http://localhost:4201", "http://localhost:8283", "http://127.0.0.1:4200", "http://127.0.0.1:4201", "http://127.0.0.1:8283", ] app = FastAPI() app.add_middleware( CORSMiddleware, allow_origins=CORS_ORIGINS, allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # /api/agents endpoints app.include_router(setup_agents_command_router(server, interface), prefix=API_PREFIX) app.include_router(setup_agents_config_router(server, interface), prefix=API_PREFIX) app.include_router(setup_agents_index_router(server, interface), prefix=API_PREFIX) app.include_router(setup_agents_memory_router(server, interface), prefix=API_PREFIX)
""" Basic REST API sitting on top of the internal MemGPT python server (SyncServer) Start the server with: cd memgpt/server/rest_api poetry run uvicorn server:app --reload """ interface: QueuingInterface = QueuingInterface() server: SyncServer = SyncServer(default_interface=interface) API_PREFIX = "/api" CORS_ORIGINS = [ "http://localhost:4200", "http://localhost:4201", "http://localhost:8283", "http://127.0.0.1:4200", "http://127.0.0.1:4201", "http://127.0.0.1:8283", ] app = FastAPI() app.add_middleware( CORSMiddleware, allow_origins=CORS_ORIGINS, allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # /api/agents endpoints app.include_router(setup_agents_command_router(server, interface), prefix=API_PREFIX) app.include_router(setup_agents_config_router(server, interface), prefix=API_PREFIX) app.include_router(setup_agents_index_router(server, interface), prefix=API_PREFIX) app.include_router(setup_agents_memory_router(server, interface), prefix=API_PREFIX)
app.include_router(setup_agents_message_router(server, interface), prefix=API_PREFIX)
5
2023-10-11 07:38:37+00:00
16k
xxlong0/Wonder3D
mvdiffusion/models/unet_mv2d_condition.py
[ { "identifier": "CrossAttnDownBlockMV2D", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "class CrossAttnDownBlockMV2D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n num_views: int = 1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n ):\n super().__init__()\n resnets = []\n attentions = []\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if not dual_cross_attention:\n attentions.append(\n TransformerMV2DModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n )\n )\n else:\n raise NotImplementedError\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n additional_residuals=None,\n ):\n output_states = ()\n\n blocks = list(zip(self.resnets, self.attentions))\n\n for i, (resnet, attn) in enumerate(blocks):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n # apply additional residuals to the output of the last pair of resnet and attention blocks\n if i == len(blocks) - 1 and additional_residuals is not None:\n hidden_states = hidden_states + additional_residuals\n\n output_states = output_states + (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "CrossAttnUpBlockMV2D", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "class CrossAttnUpBlockMV2D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n num_views: int = 1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n ):\n super().__init__()\n resnets = []\n attentions = []\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if not dual_cross_attention:\n attentions.append(\n TransformerMV2DModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n )\n )\n else:\n raise NotImplementedError\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n for resnet, attn in zip(self.resnets, self.attentions):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "UNetMidBlockMV2DCrossAttn", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "class UNetMidBlockMV2DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n num_views: int = 1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n attentions = []\n\n for _ in range(num_layers):\n if not dual_cross_attention:\n attentions.append(\n TransformerMV2DModel(\n num_attention_heads,\n in_channels // num_attention_heads,\n in_channels=in_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n )\n )\n else:\n raise NotImplementedError\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ) -> torch.FloatTensor:\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet in zip(self.attentions, self.resnets[1:]):\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states" }, { "identifier": "get_down_block", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n downsample_type=None,\n num_views=1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock2D\":\n return DownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"ResnetDownsampleBlock2D\":\n return ResnetDownsampleBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n elif down_block_type == \"AttnDownBlock2D\":\n if add_downsample is False:\n downsample_type = None\n else:\n downsample_type = downsample_type or \"conv\" # default to 'conv'\n return AttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n downsample_type=downsample_type,\n )\n elif down_block_type == \"CrossAttnDownBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock2D\")\n return CrossAttnDownBlock2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n # custom MV2D attention block\n elif down_block_type == \"CrossAttnDownBlockMV2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlockMV2D\")\n return CrossAttnDownBlockMV2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n )\n elif down_block_type == \"SimpleCrossAttnDownBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnDownBlock2D\")\n return SimpleCrossAttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif down_block_type == \"SkipDownBlock2D\":\n return SkipDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"AttnSkipDownBlock2D\":\n return AttnSkipDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"DownEncoderBlock2D\":\n return DownEncoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"AttnDownEncoderBlock2D\":\n return AttnDownEncoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"KDownBlock2D\":\n return KDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n )\n elif down_block_type == \"KCrossAttnDownBlock2D\":\n return KCrossAttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n add_self_attention=True if not add_downsample else False,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")" }, { "identifier": "get_up_block", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n upsample_type=None,\n num_views=1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock2D\":\n return UpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"ResnetUpsampleBlock2D\":\n return ResnetUpsampleBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n elif up_block_type == \"CrossAttnUpBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock2D\")\n return CrossAttnUpBlock2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n # custom MV2D attention block\n elif up_block_type == \"CrossAttnUpBlockMV2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlockMV2D\")\n return CrossAttnUpBlockMV2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n ) \n elif up_block_type == \"SimpleCrossAttnUpBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D\")\n return SimpleCrossAttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif up_block_type == \"AttnUpBlock2D\":\n if add_upsample is False:\n upsample_type = None\n else:\n upsample_type = upsample_type or \"conv\" # default to 'conv'\n\n return AttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n upsample_type=upsample_type,\n )\n elif up_block_type == \"SkipUpBlock2D\":\n return SkipUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"AttnSkipUpBlock2D\":\n return AttnSkipUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"UpDecoderBlock2D\":\n return UpDecoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n temb_channels=temb_channels,\n )\n elif up_block_type == \"AttnUpDecoderBlock2D\":\n return AttnUpDecoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n temb_channels=temb_channels,\n )\n elif up_block_type == \"KUpBlock2D\":\n return KUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n )\n elif up_block_type == \"KCrossAttnUpBlock2D\":\n return KCrossAttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n )\n\n raise ValueError(f\"{up_block_type} does not exist.\")" } ]
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.loaders import UNet2DConditionLoadersMixin from diffusers.utils import BaseOutput, logging from diffusers.models.activations import get_activation from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor from diffusers.models.embeddings import ( GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps, ) from diffusers.models.modeling_utils import ModelMixin, load_state_dict, _load_state_dict_into_model from diffusers.models.unet_2d_blocks import ( CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UNetMidBlock2DCrossAttn, UNetMidBlock2DSimpleCrossAttn, UpBlock2D, ) from diffusers.utils import ( CONFIG_NAME, DIFFUSERS_CACHE, FLAX_WEIGHTS_NAME, HF_HUB_OFFLINE, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, _add_variant, _get_model_file, deprecate, is_accelerate_available, is_safetensors_available, is_torch_version, logging, ) from diffusers import __version__ from mvdiffusion.models.unet_mv2d_blocks import ( CrossAttnDownBlockMV2D, CrossAttnUpBlockMV2D, UNetMidBlockMV2DCrossAttn, get_down_block, get_up_block, ) import os import torch import torch.nn as nn import torch.utils.checkpoint import copy
11,651
self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, num_views=num_views, cd_attention_last=cd_attention_last, cd_attention_mid=cd_attention_mid, multiview_attention=multiview_attention, sparse_mv_attention=sparse_mv_attention, mvcd_attention=mvcd_attention ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock2DCrossAttn": self.mid_block = UNetMidBlock2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) # custom MV2D attention block elif mid_block_type == "UNetMidBlockMV2DCrossAttn":
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNetMV2DConditionOutput(BaseOutput): """ The output of [`UNet2DConditionModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor = None class UNetMV2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. flip_sin_to_cos (`bool`, *optional*, defaults to `False`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): Block type for middle of UNet, it can be either `UNetMidBlock2DCrossAttn` or `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): The tuple of upsample blocks to use. only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`): Whether to include self-attention in the basic transformer blocks, see [`~models.attention.BasicTransformerBlock`]. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, normalization and activation layers is skipped in post-processing. norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. encoder_hid_dim (`int`, *optional*, defaults to None): If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` dimension to `cross_attention_dim`. encoder_hid_dim_type (`str`, *optional*, defaults to `None`): If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int`, *optional*): The number of attention heads. If not defined, defaults to `attention_head_dim` resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. addition_embed_type (`str`, *optional*, defaults to `None`): Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or "text". "text" will use the `TextTimeEmbedding` layer. addition_time_embed_dim: (`int`, *optional*, defaults to `None`): Dimension for the timestep embeddings. num_class_embeds (`int`, *optional*, defaults to `None`): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. time_embedding_type (`str`, *optional*, defaults to `positional`): The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. time_embedding_dim (`int`, *optional*, defaults to `None`): An optional override for the dimension of the projected time embedding. time_embedding_act_fn (`str`, *optional*, defaults to `None`): Optional activation function to use only once on the time embeddings before they are passed to the rest of the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. timestep_post_act (`str`, *optional*, defaults to `None`): The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. time_cond_proj_dim (`int`, *optional*, defaults to `None`): The dimension of `cond_proj` layer in the timestep embedding. conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time embeddings with the class embeddings. mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`): Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False` otherwise. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlockMV2D", "CrossAttnDownBlockMV2D", "CrossAttnDownBlockMV2D", "DownBlock2D", ), mid_block_type: Optional[str] = "UNetMidBlockMV2DCrossAttn", up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlockMV2D", "CrossAttnUpBlockMV2D", "CrossAttnUpBlockMV2D"), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: int = 1.0, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, num_views: int = 1, cd_attention_last: bool = False, cd_attention_mid: bool = False, multiview_attention: bool = True, sparse_mv_attention: bool = False, mvcd_attention: bool = False ): super().__init__() self.sample_size = sample_size if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) # time if time_embedding_type == "fourier": time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") self.time_proj = GaussianFourierProjection( time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError( f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." ) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, num_views=num_views, cd_attention_last=cd_attention_last, cd_attention_mid=cd_attention_mid, multiview_attention=multiview_attention, sparse_mv_attention=sparse_mv_attention, mvcd_attention=mvcd_attention ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock2DCrossAttn": self.mid_block = UNetMidBlock2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) # custom MV2D attention block elif mid_block_type == "UNetMidBlockMV2DCrossAttn":
self.mid_block = UNetMidBlockMV2DCrossAttn(
2
2023-10-14 12:18:38+00:00
16k
PixArt-alpha/PixArt-alpha
train_scripts/train_pixart_lcm_lora.py
[ { "identifier": "IDDPM", "path": "diffusion/iddpm.py", "snippet": "def IDDPM(\n timestep_respacing,\n noise_schedule=\"linear\",\n use_kl=False,\n sigma_small=False,\n predict_xstart=False,\n learn_sigma=True,\n pred_sigma=True,\n rescale_learned_sigmas=False,\n diffusion_steps=1000,\n snr=False,\n return_startx=False,\n):\n betas = gd.get_named_beta_schedule(noise_schedule, diffusion_steps)\n if use_kl:\n loss_type = gd.LossType.RESCALED_KL\n elif rescale_learned_sigmas:\n loss_type = gd.LossType.RESCALED_MSE\n else:\n loss_type = gd.LossType.MSE\n if timestep_respacing is None or timestep_respacing == \"\":\n timestep_respacing = [diffusion_steps]\n return SpacedDiffusion(\n use_timesteps=space_timesteps(diffusion_steps, timestep_respacing),\n betas=betas,\n model_mean_type=(\n gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X\n ),\n model_var_type=(\n ((\n gd.ModelVarType.FIXED_LARGE\n if not sigma_small\n else gd.ModelVarType.FIXED_SMALL\n )\n if not learn_sigma\n else gd.ModelVarType.LEARNED_RANGE\n )\n if pred_sigma\n else None\n ),\n loss_type=loss_type,\n snr=snr,\n return_startx=return_startx,\n # rescale_timesteps=rescale_timesteps,\n )" }, { "identifier": "get_world_size", "path": "diffusion/utils/dist_utils.py", "snippet": "def get_world_size():\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "clip_grad_norm_", "path": "diffusion/utils/dist_utils.py", "snippet": "@torch.no_grad()\ndef clip_grad_norm_(\n self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0\n) -> None:\n self._lazy_init()\n self._wait_for_previous_optim_step()\n assert self._is_root, \"clip_grad_norm should only be called on the root (parent) instance\"\n self._assert_state(TrainingState_.IDLE)\n\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n # Computes the max norm for this shard's gradients and sync's across workers\n local_norm = _calc_grad_norm(self.params_with_grad, norm_type).cuda() # type: ignore[arg-type]\n if norm_type == math.inf:\n total_norm = local_norm\n dist.all_reduce(total_norm, op=torch.distributed.ReduceOp.MAX, group=self.process_group)\n else:\n total_norm = local_norm ** norm_type\n dist.all_reduce(total_norm, group=self.process_group)\n total_norm = total_norm ** (1.0 / norm_type)\n\n clip_coef = torch.tensor(max_norm, dtype=total_norm.dtype, device=total_norm.device) / (total_norm + 1e-6)\n if clip_coef < 1:\n # multiply by clip_coef, aka, (max_norm/total_norm).\n for p in self.params_with_grad:\n assert p.grad is not None\n p.grad.detach().mul_(clip_coef.to(p.grad.device))\n return total_norm" }, { "identifier": "build_dataset", "path": "diffusion/data/builder.py", "snippet": "def build_dataset(cfg, resolution=224, **kwargs):\n logger = get_root_logger()\n\n dataset_type = cfg.get('type')\n logger.info(f\"Constructing dataset {dataset_type}...\")\n t = time.time()\n transform = cfg.pop('transform', 'default_train')\n transform = get_transform(transform, resolution)\n dataset = build_from_cfg(cfg, DATASETS, default_args=dict(transform=transform, resolution=resolution, **kwargs))\n logger.info(f\"Dataset {dataset_type} constructed. time: {(time.time() - t):.2f} s, length (use/ori): {len(dataset)}/{dataset.ori_imgs_nums}\")\n return dataset" }, { "identifier": "build_dataloader", "path": "diffusion/data/builder.py", "snippet": "def build_dataloader(dataset, batch_size=256, num_workers=4, shuffle=True, **kwargs):\n if 'batch_sampler' in kwargs:\n dataloader = DataLoader(dataset, batch_sampler=kwargs['batch_sampler'], num_workers=num_workers, pin_memory=True)\n else:\n dataloader = DataLoader(dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n pin_memory=True,\n **kwargs)\n return dataloader" }, { "identifier": "set_data_root", "path": "diffusion/data/builder.py", "snippet": "def set_data_root(data_root):\n global DATA_ROOT\n DATA_ROOT = data_root" }, { "identifier": "get_root_logger", "path": "diffusion/utils/logger.py", "snippet": "def get_root_logger(log_file=None, log_level=logging.INFO, name='PixArt'):\n \"\"\"Get root logger.\n\n Args:\n log_file (str, optional): File path of log. Defaults to None.\n log_level (int, optional): The level of logger.\n Defaults to logging.INFO.\n name (str): logger name\n Returns:\n :obj:`logging.Logger`: The obtained logger\n \"\"\"\n if log_file is None:\n log_file = '/dev/null'\n logger = get_logger(name=name, log_file=log_file, log_level=log_level)\n return logger" }, { "identifier": "set_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "read_config", "path": "diffusion/utils/misc.py", "snippet": "def read_config(file):\n # solve config loading conflict when multi-processes\n import time\n while True:\n config = Config.fromfile(file)\n if len(config) == 0:\n time.sleep(0.1)\n continue\n break\n return config" }, { "identifier": "init_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def init_random_seed(seed=None, device='cuda'):\n \"\"\"Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): The device where the seed will be put on.\n Default to 'cuda'.\n\n Returns:\n int: Seed to be used.\n \"\"\"\n if seed is not None:\n return seed\n\n # Make sure all ranks share the same random seed to prevent\n # some potential bugs. Please refer to\n # https://github.com/open-mmlab/mmdetection/issues/6339\n rank, world_size = get_dist_info()\n seed = np.random.randint(2 ** 31)\n if world_size == 1:\n return seed\n\n if rank == 0:\n random_num = torch.tensor(seed, dtype=torch.int32, device=device)\n else:\n random_num = torch.tensor(0, dtype=torch.int32, device=device)\n dist.broadcast(random_num, src=0)\n return random_num.item()" }, { "identifier": "DebugUnderflowOverflow", "path": "diffusion/utils/misc.py", "snippet": "class DebugUnderflowOverflow:\n \"\"\"\n This debug class helps detect and understand where the model starts getting very large or very small, and more\n importantly `nan` or `inf` weight and activation elements.\n There are 2 working modes:\n 1. Underflow/overflow detection (default)\n 2. Specific batch absolute min/max tracing without detection\n Mode 1: Underflow/overflow detection\n To activate the underflow/overflow detection, initialize the object with the model :\n ```python\n debug_overflow = DebugUnderflowOverflow(model)\n ```\n then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or\n output elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this\n event, each frame reporting\n 1. the fully qualified module name plus the class name whose `forward` was run\n 2. the absolute min and max value of all elements for each module weights, and the inputs and output\n For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16 mixed precision :\n ```\n Detected inf/nan during batch_number=0\n Last 21 forward frames:\n abs min abs max metadata\n [...]\n encoder.block.2.layer.1.DenseReluDense.wi_0 Linear\n 2.17e-07 4.50e+00 weight\n 1.79e-06 4.65e+00 input[0]\n 2.68e-06 3.70e+01 output\n encoder.block.2.layer.1.DenseReluDense.wi_1 Linear\n 8.08e-07 2.66e+01 weight\n 1.79e-06 4.65e+00 input[0]\n 1.27e-04 2.37e+02 output\n encoder.block.2.layer.1.DenseReluDense.wo Linear\n 1.01e-06 6.44e+00 weight\n 0.00e+00 9.74e+03 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense\n 1.79e-06 4.65e+00 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.dropout Dropout\n 3.18e-04 6.27e+04 input[0]\n 0.00e+00 inf output\n ```\n You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value\n was around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which\n renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than\n 64K, and we get an overlow.\n As you can see it's the previous frames that we need to look into when the numbers start going into very large for\n fp16 numbers.\n The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed.\n By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)\n ```\n To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may\n take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next\n section.\n Mode 2. Specific batch absolute min/max tracing without detection\n The second work mode is per-batch tracing with the underflow/overflow detection feature turned off.\n Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a\n given batch, and only do that for batches 1 and 3. Then you instantiate this class as :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3])\n ```\n And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed.\n This is helpful if you know that the program starts misbehaving after a certain batch number, so you can\n fast-forward right to that area.\n Early stopping:\n You can also specify the batch number after which to stop the training, with :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3], abort_after_batch_num=3)\n ```\n This feature is mainly useful in the tracing mode, but you can use it for any mode.\n **Performance**:\n As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the\n training down. Therefore remember to turn it off once the debugging needs have been met.\n Args:\n model (`nn.Module`):\n The model to debug.\n max_frames_to_save (`int`, *optional*, defaults to 21):\n How many frames back to record\n trace_batch_nums(`List[int]`, *optional*, defaults to `[]`):\n Which batch numbers to trace (turns detection off)\n abort_after_batch_num (`int``, *optional*):\n Whether to abort after a certain batch number has finished\n \"\"\"\n\n def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None):\n self.model = model\n self.trace_batch_nums = trace_batch_nums\n self.abort_after_batch_num = abort_after_batch_num\n\n # keep a LIFO buffer of frames to dump as soon as inf/nan is encountered to give context to the problem emergence\n self.frames = collections.deque([], max_frames_to_save)\n self.frame = []\n self.batch_number = 0\n self.total_calls = 0\n self.detected_overflow = False\n self.prefix = \" \"\n\n self.analyse_model()\n\n self.register_forward_hook()\n\n def save_frame(self, frame=None):\n if frame is not None:\n self.expand_frame(frame)\n self.frames.append(\"\\n\".join(self.frame))\n self.frame = [] # start a new frame\n\n def expand_frame(self, line):\n self.frame.append(line)\n\n def trace_frames(self):\n print(\"\\n\".join(self.frames))\n self.frames = []\n\n def reset_saved_frames(self):\n self.frames = []\n\n def dump_saved_frames(self):\n print(f\"\\nDetected inf/nan during batch_number={self.batch_number} \"\n f\"Last {len(self.frames)} forward frames:\"\n f\"{'abs min':8} {'abs max':8} metadata\"\n f\"'\\n'.join(self.frames)\"\n f\"\\n\\n\")\n self.frames = []\n\n def analyse_model(self):\n # extract the fully qualified module names, to be able to report at run time. e.g.:\n # encoder.block.2.layer.0.SelfAttention.o\n #\n # for shared weights only the first shared module name will be registered\n self.module_names = {m: name for name, m in self.model.named_modules()}\n # self.longest_module_name = max(len(v) for v in self.module_names.values())\n\n def analyse_variable(self, var, ctx):\n if torch.is_tensor(var):\n self.expand_frame(self.get_abs_min_max(var, ctx))\n if self.detect_overflow(var, ctx):\n self.detected_overflow = True\n elif var is None:\n self.expand_frame(f\"{'None':>17} {ctx}\")\n else:\n self.expand_frame(f\"{'not a tensor':>17} {ctx}\")\n\n def batch_start_frame(self):\n self.expand_frame(f\"\\n\\n{self.prefix} *** Starting batch number={self.batch_number} ***\")\n self.expand_frame(f\"{'abs min':8} {'abs max':8} metadata\")\n\n def batch_end_frame(self):\n self.expand_frame(f\"{self.prefix} *** Finished batch number={self.batch_number - 1} ***\\n\\n\")\n\n def create_frame(self, module, input, output):\n self.expand_frame(f\"{self.prefix} {self.module_names[module]} {module.__class__.__name__}\")\n\n # params\n for name, p in module.named_parameters(recurse=False):\n self.analyse_variable(p, name)\n\n # inputs\n if isinstance(input, tuple):\n for i, x in enumerate(input):\n self.analyse_variable(x, f\"input[{i}]\")\n else:\n self.analyse_variable(input, \"input\")\n\n # outputs\n if isinstance(output, tuple):\n for i, x in enumerate(output):\n # possibly a tuple of tuples\n if isinstance(x, tuple):\n for j, y in enumerate(x):\n self.analyse_variable(y, f\"output[{i}][{j}]\")\n else:\n self.analyse_variable(x, f\"output[{i}]\")\n else:\n self.analyse_variable(output, \"output\")\n\n self.save_frame()\n\n def register_forward_hook(self):\n self.model.apply(self._register_forward_hook)\n\n def _register_forward_hook(self, module):\n module.register_forward_hook(self.forward_hook)\n\n def forward_hook(self, module, input, output):\n # - input is a tuple of packed inputs (could be non-Tensors)\n # - output could be a Tensor or a tuple of Tensors and non-Tensors\n\n last_frame_of_batch = False\n\n trace_mode = True if self.batch_number in self.trace_batch_nums else False\n if trace_mode:\n self.reset_saved_frames()\n\n if self.total_calls == 0:\n self.batch_start_frame()\n self.total_calls += 1\n\n # count batch numbers - the very first forward hook of the batch will be called when the\n # batch completes - i.e. it gets called very last - we know this batch has finished\n if module == self.model:\n self.batch_number += 1\n last_frame_of_batch = True\n\n self.create_frame(module, input, output)\n\n # if last_frame_of_batch:\n # self.batch_end_frame()\n\n if trace_mode:\n self.trace_frames()\n\n if last_frame_of_batch:\n self.batch_start_frame()\n\n if self.detected_overflow and not trace_mode:\n self.dump_saved_frames()\n\n # now we can abort, as it's pointless to continue running\n raise ValueError(\n \"DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. \"\n \"Please scroll up above this traceback to see the activation values prior to this event.\"\n )\n\n # abort after certain batch if requested to do so\n if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num:\n raise ValueError(\n f\"DebugUnderflowOverflow: aborting after {self.batch_number} batches due to `abort_after_batch_num={self.abort_after_batch_num}` arg\"\n )\n\n @staticmethod\n def get_abs_min_max(var, ctx):\n abs_var = var.abs()\n return f\"{abs_var.min():8.2e} {abs_var.max():8.2e} {ctx}\"\n\n @staticmethod\n def detect_overflow(var, ctx):\n \"\"\"\n Report whether the tensor contains any `nan` or `inf` entries.\n This is useful for detecting overflows/underflows and best to call right after the function that did some math that\n modified the tensor in question.\n This function contains a few other helper features that you can enable and tweak directly if you want to track\n various other things.\n Args:\n var: the tensor variable to check\n ctx: the message to print as a context\n Return:\n `True` if `inf` or `nan` was detected, `False` otherwise\n \"\"\"\n detected = False\n if torch.isnan(var).any().item():\n detected = True\n print(f\"{ctx} has nans\")\n if torch.isinf(var).any().item():\n detected = True\n print(f\"{ctx} has infs\")\n if var.dtype == torch.float32 and torch.ge(var.abs(), 65535).any().item():\n detected = True\n print(f\"{ctx} has overflow values {var.abs().max().item()}.\")\n # if needed to monitor large elements can enable the following\n if 0: # and detected:\n n100 = var[torch.ge(var.abs(), 100)]\n if n100.numel() > 0:\n print(f\"{ctx}: n100={n100.numel()}\")\n n1000 = var[torch.ge(var.abs(), 1000)]\n if n1000.numel() > 0:\n print(f\"{ctx}: n1000={n1000.numel()}\")\n n10000 = var[torch.ge(var.abs(), 10000)]\n if n10000.numel() > 0:\n print(f\"{ctx}: n10000={n10000.numel()}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})\")\n\n return detected" }, { "identifier": "build_optimizer", "path": "diffusion/utils/optimizer.py", "snippet": "def build_optimizer(model, optimizer_cfg):\n # default parameter-wise config\n logger = get_root_logger()\n\n if hasattr(model, 'module'):\n model = model.module\n # set optimizer constructor\n optimizer_cfg.setdefault('constructor', 'MyOptimizerConstructor')\n # parameter-wise setting: cancel weight decay for some specific modules\n custom_keys = dict()\n for name, module in model.named_modules():\n if hasattr(module, 'zero_weight_decay'):\n custom_keys.update({(name, key): dict(decay_mult=0) for key in module.zero_weight_decay})\n\n paramwise_cfg = Config(dict(cfg=dict(custom_keys=custom_keys)))\n given_cfg = optimizer_cfg.get('paramwise_cfg')\n if given_cfg:\n paramwise_cfg.merge_from_dict(dict(cfg=given_cfg))\n optimizer_cfg['paramwise_cfg'] = paramwise_cfg.cfg\n # build optimizer\n optimizer = mm_build_optimizer(model, optimizer_cfg)\n\n weight_decay_groups = dict()\n lr_groups = dict()\n for group in optimizer.param_groups:\n if not group.get('requires_grad', True): continue\n lr_groups.setdefault(group['lr'], []).append(group)\n weight_decay_groups.setdefault(group['weight_decay'], []).append(group)\n\n learnable_count, fix_count = 0, 0\n for p in model.parameters():\n if p.requires_grad:\n learnable_count += 1\n else:\n fix_count += 1\n fix_info = f\"{learnable_count} are learnable, {fix_count} are fix\"\n lr_info = \"Lr group: \" + \", \".join([f'{len(group)} params with lr {lr:.5f}' for lr, group in lr_groups.items()])\n wd_info = \"Weight decay group: \" + \", \".join(\n [f'{len(group)} params with weight decay {wd}' for wd, group in weight_decay_groups.items()])\n opt_info = f\"Optimizer: total {len(optimizer.param_groups)} param groups, {fix_info}. {lr_info}; {wd_info}.\"\n logger.info(opt_info)\n\n return optimizer" }, { "identifier": "auto_scale_lr", "path": "diffusion/utils/optimizer.py", "snippet": "def auto_scale_lr(effective_bs, optimizer_cfg, rule='linear', base_batch_size=256):\n assert rule in ['linear', 'sqrt']\n logger = get_root_logger()\n # scale by world size\n if rule == 'sqrt':\n scale_ratio = math.sqrt(effective_bs / base_batch_size)\n elif rule == 'linear':\n scale_ratio = effective_bs / base_batch_size\n optimizer_cfg['lr'] *= scale_ratio\n logger.info(f'Automatically adapt lr to {optimizer_cfg[\"lr\"]:.7f} (using {rule} scaling rule).')\n return scale_ratio" }, { "identifier": "build_lr_scheduler", "path": "diffusion/utils/lr_scheduler.py", "snippet": "def build_lr_scheduler(config, optimizer, train_dataloader, lr_scale_ratio):\n if not config.get('lr_schedule_args', None):\n config.lr_schedule_args = dict()\n if config.get('lr_warmup_steps', None):\n config['num_warmup_steps'] = config.get('lr_warmup_steps') # for compatibility with old version\n\n logger = get_root_logger()\n logger.info(\n f'Lr schedule: {config.lr_schedule}, ' + \",\".join(\n [f\"{key}:{value}\" for key, value in config.lr_schedule_args.items()]) + '.')\n if config.lr_schedule == 'cosine':\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n elif config.lr_schedule == 'constant':\n lr_scheduler = get_constant_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n )\n elif config.lr_schedule == 'cosine_decay_to_constant':\n assert lr_scale_ratio >= 1\n lr_scheduler = get_cosine_decay_to_constant_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n final_lr=1 / lr_scale_ratio,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n else:\n raise RuntimeError(f'Unrecognized lr schedule {config.lr_schedule}.')\n return lr_scheduler" }, { "identifier": "AspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class AspectRatioBatchSampler(BatchSampler):\n \"\"\"A sampler wrapper for grouping images with similar aspect ratio into a same batch.\n\n Args:\n sampler (Sampler): Base sampler.\n dataset (Dataset): Dataset providing data information.\n batch_size (int): Size of mini-batch.\n drop_last (bool): If ``True``, the sampler will drop the last batch if\n its size would be less than ``batch_size``.\n aspect_ratios (dict): The predefined aspect ratios.\n \"\"\"\n\n def __init__(self,\n sampler: Sampler,\n dataset: Dataset,\n batch_size: int,\n aspect_ratios: dict,\n drop_last: bool = False,\n config=None,\n valid_num=0, # take as valid aspect-ratio when sample number >= valid_num\n **kwargs) -> None:\n if not isinstance(sampler, Sampler):\n raise TypeError('sampler should be an instance of ``Sampler``, '\n f'but got {sampler}')\n if not isinstance(batch_size, int) or batch_size <= 0:\n raise ValueError('batch_size should be a positive integer value, '\n f'but got batch_size={batch_size}')\n self.sampler = sampler\n self.dataset = dataset\n self.batch_size = batch_size\n self.aspect_ratios = aspect_ratios\n self.drop_last = drop_last\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n self.config = config\n assert self.ratio_nums_gt\n # buckets for each aspect ratio\n self._aspect_ratio_buckets = {ratio: [] for ratio in aspect_ratios.keys()}\n self.current_available_bucket_keys = [str(k) for k, v in self.ratio_nums_gt.items() if v >= valid_num]\n logger = get_root_logger() if config is None else get_root_logger(os.path.join(config.work_dir, 'train_log.log'))\n logger.warning(f\"Using valid_num={valid_num} in config file. Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n # find the closest aspect ratio\n closest_ratio = min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio))\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n bucket = self._aspect_ratio_buckets[closest_ratio]\n bucket.append(idx)\n # yield a batch of indices in the same aspect ratio group\n if len(bucket) == self.batch_size:\n yield bucket[:]\n del bucket[:]\n\n # yield the rest data and reset the buckets\n for bucket in self._aspect_ratio_buckets.values():\n while len(bucket) > 0:\n if len(bucket) <= self.batch_size:\n if not self.drop_last:\n yield bucket[:]\n bucket = []\n else:\n yield bucket[:self.batch_size]\n bucket = bucket[self.batch_size:]" }, { "identifier": "BalancedAspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class BalancedAspectRatioBatchSampler(AspectRatioBatchSampler):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Assign samples to each bucket\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n assert self.ratio_nums_gt\n self._aspect_ratio_buckets = {float(ratio): [] for ratio in self.aspect_ratios.keys()}\n self.original_buckets = {}\n self.current_available_bucket_keys = [k for k, v in self.ratio_nums_gt.items() if v >= 3000]\n self.all_available_keys = deepcopy(self.current_available_bucket_keys)\n self.exhausted_bucket_keys = []\n self.total_batches = len(self.sampler) // self.batch_size\n self._aspect_ratio_count = {}\n for k in self.all_available_keys:\n self._aspect_ratio_count[float(k)] = 0\n self.original_buckets[float(k)] = []\n logger = get_root_logger(os.path.join(self.config.work_dir, 'train_log.log'))\n logger.warning(f\"Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n i = 0\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n closest_ratio = float(min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio)))\n if closest_ratio not in self.all_available_keys:\n continue\n if self._aspect_ratio_count[closest_ratio] < self.ratio_nums_gt[closest_ratio]:\n self._aspect_ratio_count[closest_ratio] += 1\n self._aspect_ratio_buckets[closest_ratio].append(idx)\n self.original_buckets[closest_ratio].append(idx) # Save the original samples for each bucket\n if not self.current_available_bucket_keys:\n self.current_available_bucket_keys, self.exhausted_bucket_keys = self.exhausted_bucket_keys, []\n\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n key = closest_ratio\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) == self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n i += 1\n self.exhausted_bucket_keys.append(key)\n self.current_available_bucket_keys.remove(key)\n\n for _ in range(self.total_batches - i):\n key = choice(self.all_available_keys)\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) >= self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n\n # If a bucket is exhausted\n if not bucket:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])\n else:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])" } ]
import os import sys import types import argparse import datetime import time import warnings import torch import torch.nn.functional as F import numpy as np import re import accelerate from pathlib import Path from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.utils import DistributedType from torch.utils.data import RandomSampler from mmcv.runner import LogBuffer from packaging import version from diffusion import IDDPM from diffusion.utils.dist_utils import get_world_size, clip_grad_norm_ from diffusion.data.builder import build_dataset, build_dataloader, set_data_root from diffusion.utils.logger import get_root_logger from diffusion.utils.misc import set_random_seed, read_config, init_random_seed, DebugUnderflowOverflow from diffusion.utils.optimizer import build_optimizer, auto_scale_lr from diffusion.utils.lr_scheduler import build_lr_scheduler from diffusion.utils.data_sampler import AspectRatioBatchSampler, BalancedAspectRatioBatchSampler from peft import LoraConfig, get_peft_model, get_peft_model_state_dict from diffusers import AutoencoderKL, Transformer2DModel, StableDiffusionPipeline, PixArtAlphaPipeline from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
10,916
# w = (config.w_max - config.w_min) * torch.rand((bsz,)) + config.w_min w = config.cfg_scale * torch.ones((bsz,)) w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # Get online LCM prediction on z_{t_{n + k}}, w, c, t_{n + k} _, pred_x_0, noisy_model_input = train_diffusion.training_losses_diffusers( model, latents, start_timesteps, model_kwargs=dict(encoder_hidden_states=y, encoder_attention_mask=y_mask, added_cond_kwargs=data_info), noise=noise ) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 with torch.no_grad(): with torch.autocast("cuda"): cond_teacher_output, cond_pred_x0, _ = train_diffusion.training_losses_diffusers( model_teacher, latents, start_timesteps, model_kwargs=dict(encoder_hidden_states=y, encoder_attention_mask=y_mask, added_cond_kwargs=data_info), noise=noise ) # Get teacher model prediction on noisy_latents and unconditional embedding uncond_teacher_output, uncond_pred_x0, _ = train_diffusion.training_losses_diffusers( model_teacher, latents, start_timesteps, model_kwargs=dict(encoder_hidden_states=uncond_prompt_embeds, encoder_attention_mask=y_mask, added_cond_kwargs=data_info), noise=noise ) # Perform "CFG" to get x_prev estimate (using the LCM paper's CFG formulation) pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_teacher_output + w * (cond_teacher_output - uncond_teacher_output) x_prev = solver.ddim_step(pred_x0, pred_noise, index) # Get target LCM prediction on x_prev, w, c, t_n with torch.no_grad(): with torch.autocast("cuda", enabled=True): _, pred_x_0, _ = train_diffusion.training_losses_diffusers( model, x_prev.float(), timesteps, model_kwargs=dict(encoder_hidden_states=y, encoder_attention_mask=y_mask, added_cond_kwargs=data_info), skip_noise=True ) target = c_skip * x_prev + c_out * pred_x_0 # Calculate loss if config.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif config.loss_type == "huber": loss = torch.mean(torch.sqrt((model_pred.float() - target.float()) ** 2 + config.huber_c**2) - config.huber_c) accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch-1)*len(train_dataloader)+step+1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['resolution'][0][0].item()}, {data_info['resolution'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) global_step += 1 data_time_start= time.time() accelerator.wait_for_everyone() if accelerator.is_main_process: if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: save_path = os.path.join(os.path.join(config.work_dir, 'checkpoints'), f"checkpoint-{(epoch - 1) * len(train_dataloader) + step + 1}") os.umask(0o000) logger.info(f"Start to save state to {save_path}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") accelerator.wait_for_everyone() if epoch % config.save_model_epochs == 0 or epoch == config.num_epochs: os.umask(0o000) save_path = os.path.join(os.path.join(config.work_dir, 'checkpoints'), f"checkpoint-{(epoch - 1) * len(train_dataloader) + step + 1}") logger.info(f"Start to save state to {save_path}") model = accelerator.unwrap_model(model) model.save_pretrained(save_path) lora_state_dict = get_peft_model_state_dict(model, adapter_name="default") StableDiffusionPipeline.save_lora_weights(os.path.join(save_path, "transformer_lora"), lora_state_dict) logger.info(f"Saved state to {save_path}") def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument("--work-dir", default='output', help='the dir to save logs and models') parser.add_argument("--resume-from", help='the dir to save logs and models') parser.add_argument("--local-rank", type=int, default=-1) parser.add_argument("--local_rank", type=int, default=-1) parser.add_argument("--debug", action='store_true') parser.add_argument("--lora_rank", type=int, default=64, help="The rank of the LoRA projection matrix.", ) args = parser.parse_args() return args if __name__ == '__main__': args = parse_args()
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def filter_keys(key_set): def _f(dictionary): return {k: v for k, v in dictionary.items() if k in key_set} return _f def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] # From LCMScheduler.get_scalings_for_boundary_condition_discrete def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=10.0): c_skip = sigma_data**2 / ((timestep / 0.1) ** 2 + sigma_data**2) c_out = (timestep / 0.1) / ((timestep / 0.1) ** 2 + sigma_data**2) ** 0.5 return c_skip, c_out # Compare LCMScheduler.step, Step 4 def predicted_origin(model_output, timesteps, sample, prediction_type, alphas, sigmas): if prediction_type == "epsilon": sigmas = extract_into_tensor(sigmas, timesteps, sample.shape) alphas = extract_into_tensor(alphas, timesteps, sample.shape) pred_x_0 = (sample - sigmas * model_output) / alphas elif prediction_type == "v_prediction": sigmas = extract_into_tensor(sigmas, timesteps, sample.shape) alphas = extract_into_tensor(alphas, timesteps, sample.shape) pred_x_0 = alphas * sample - sigmas * model_output else: raise ValueError(f"Prediction type {prediction_type} currently not supported.") return pred_x_0 def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) class DDIMSolver: def __init__(self, alpha_cumprods, timesteps=1000, ddim_timesteps=50): # DDIM sampling parameters step_ratio = timesteps // ddim_timesteps self.ddim_timesteps = (np.arange(1, ddim_timesteps + 1) * step_ratio).round().astype(np.int64) - 1 self.ddim_alpha_cumprods = alpha_cumprods[self.ddim_timesteps] self.ddim_alpha_cumprods_prev = np.asarray( [alpha_cumprods[0]] + alpha_cumprods[self.ddim_timesteps[:-1]].tolist() ) # convert to torch tensors self.ddim_timesteps = torch.from_numpy(self.ddim_timesteps).long() self.ddim_alpha_cumprods = torch.from_numpy(self.ddim_alpha_cumprods) self.ddim_alpha_cumprods_prev = torch.from_numpy(self.ddim_alpha_cumprods_prev) def to(self, device): self.ddim_timesteps = self.ddim_timesteps.to(device) self.ddim_alpha_cumprods = self.ddim_alpha_cumprods.to(device) self.ddim_alpha_cumprods_prev = self.ddim_alpha_cumprods_prev.to(device) return self def ddim_step(self, pred_x0, pred_noise, timestep_index): alpha_cumprod_prev = extract_into_tensor(self.ddim_alpha_cumprods_prev, timestep_index, pred_x0.shape) dir_xt = (1.0 - alpha_cumprod_prev).sqrt() * pred_noise x_prev = alpha_cumprod_prev.sqrt() * pred_x0 + dir_xt return x_prev def train(model): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() global_step = start_step load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) # Create uncond embeds for classifier free guidance uncond_prompt_embeds = torch.load('output/pretrained_models/null_embed.pth', map_location='cpu').to(accelerator.device).repeat(config.train_batch_size, 1, 1, 1) # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start= time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start if load_vae_feat: z = batch[0] else: with torch.no_grad(): with torch.cuda.amp.autocast(enabled=config.mixed_precision == 'fp16'): posterior = vae.encode(batch[0]).latent_dist if config.sample_posterior: z = posterior.sample() else: z = posterior.mode() latents = (z * config.scale_factor).to(weight_dtype) y = batch[1].squeeze(1).to(weight_dtype) y_mask = batch[2].squeeze(1).squeeze(1).to(weight_dtype) data_info = {'resolution': batch[3]['img_hw'].to(weight_dtype), 'aspect_ratio': batch[3]['aspect_ratio'].to(weight_dtype),} # Sample a random timestep for each image grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image t_n ~ U[0, N - k - 1] without bias. topk = config.train_sampling_steps // config.num_ddim_timesteps index = torch.randint(0, config.num_ddim_timesteps, (bsz,), device=latents.device).long() start_timesteps = solver.ddim_timesteps[index] timesteps = start_timesteps - topk timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps) # Get boundary scalings for start_timesteps and (end) timesteps. c_skip_start, c_out_start = scalings_for_boundary_conditions(start_timesteps) c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]] c_skip, c_out = scalings_for_boundary_conditions(timesteps) c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]] # Sample a random guidance scale w from U[w_min, w_max] and embed it # w = (config.w_max - config.w_min) * torch.rand((bsz,)) + config.w_min w = config.cfg_scale * torch.ones((bsz,)) w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # Get online LCM prediction on z_{t_{n + k}}, w, c, t_{n + k} _, pred_x_0, noisy_model_input = train_diffusion.training_losses_diffusers( model, latents, start_timesteps, model_kwargs=dict(encoder_hidden_states=y, encoder_attention_mask=y_mask, added_cond_kwargs=data_info), noise=noise ) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 with torch.no_grad(): with torch.autocast("cuda"): cond_teacher_output, cond_pred_x0, _ = train_diffusion.training_losses_diffusers( model_teacher, latents, start_timesteps, model_kwargs=dict(encoder_hidden_states=y, encoder_attention_mask=y_mask, added_cond_kwargs=data_info), noise=noise ) # Get teacher model prediction on noisy_latents and unconditional embedding uncond_teacher_output, uncond_pred_x0, _ = train_diffusion.training_losses_diffusers( model_teacher, latents, start_timesteps, model_kwargs=dict(encoder_hidden_states=uncond_prompt_embeds, encoder_attention_mask=y_mask, added_cond_kwargs=data_info), noise=noise ) # Perform "CFG" to get x_prev estimate (using the LCM paper's CFG formulation) pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_teacher_output + w * (cond_teacher_output - uncond_teacher_output) x_prev = solver.ddim_step(pred_x0, pred_noise, index) # Get target LCM prediction on x_prev, w, c, t_n with torch.no_grad(): with torch.autocast("cuda", enabled=True): _, pred_x_0, _ = train_diffusion.training_losses_diffusers( model, x_prev.float(), timesteps, model_kwargs=dict(encoder_hidden_states=y, encoder_attention_mask=y_mask, added_cond_kwargs=data_info), skip_noise=True ) target = c_skip * x_prev + c_out * pred_x_0 # Calculate loss if config.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif config.loss_type == "huber": loss = torch.mean(torch.sqrt((model_pred.float() - target.float()) ** 2 + config.huber_c**2) - config.huber_c) accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch-1)*len(train_dataloader)+step+1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['resolution'][0][0].item()}, {data_info['resolution'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) global_step += 1 data_time_start= time.time() accelerator.wait_for_everyone() if accelerator.is_main_process: if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: save_path = os.path.join(os.path.join(config.work_dir, 'checkpoints'), f"checkpoint-{(epoch - 1) * len(train_dataloader) + step + 1}") os.umask(0o000) logger.info(f"Start to save state to {save_path}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") accelerator.wait_for_everyone() if epoch % config.save_model_epochs == 0 or epoch == config.num_epochs: os.umask(0o000) save_path = os.path.join(os.path.join(config.work_dir, 'checkpoints'), f"checkpoint-{(epoch - 1) * len(train_dataloader) + step + 1}") logger.info(f"Start to save state to {save_path}") model = accelerator.unwrap_model(model) model.save_pretrained(save_path) lora_state_dict = get_peft_model_state_dict(model, adapter_name="default") StableDiffusionPipeline.save_lora_weights(os.path.join(save_path, "transformer_lora"), lora_state_dict) logger.info(f"Saved state to {save_path}") def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument("--work-dir", default='output', help='the dir to save logs and models') parser.add_argument("--resume-from", help='the dir to save logs and models') parser.add_argument("--local-rank", type=int, default=-1) parser.add_argument("--local_rank", type=int, default=-1) parser.add_argument("--debug", action='store_true') parser.add_argument("--lora_rank", type=int, default=64, help="The rank of the LoRA projection matrix.", ) args = parser.parse_args() return args if __name__ == '__main__': args = parse_args()
config = read_config(args.config)
8
2023-10-12 14:16:33+00:00
16k
showlab/MotionDirector
MotionDirector_train.py
[ { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, it will skip the normalization and activation layers in post-processing\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n up_block_types: Tuple[str] = (\"UpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\"),\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1024,\n attention_head_dim: Union[int, Tuple[int]] = 64,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n self.gradient_checkpointing = False\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_out_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n self.time_proj = Timesteps(block_out_channels[0], True, 0)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n self.transformer_in = TransformerTemporalModel(\n num_attention_heads=8,\n attention_head_dim=attention_head_dim,\n in_channels=block_out_channels[0],\n num_layers=1,\n )\n\n # class embedding\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=False,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=False,\n )\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=False,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, value=False):\n self.gradient_checkpointing = value\n self.mid_block.gradient_checkpointing = value\n for module in self.down_blocks + self.up_blocks:\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Returns:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n num_frames = sample.shape[2]\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n emb = emb.repeat_interleave(repeats=num_frames, dim=0)\n encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0)\n\n # 2. pre-process\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in(sample)\n \n if num_frames > 1:\n if self.gradient_checkpointing:\n sample = transformer_g_c(self.transformer_in, sample, num_frames)\n else:\n sample = self.transformer_in(sample, num_frames=num_frames).sample\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)\n\n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n num_frames=num_frames,\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n\n sample = self.conv_out(sample)\n\n # reshape to (batch, channel, framerate, width, height)\n sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)" }, { "identifier": "VideoJsonDataset", "path": "utils/dataset.py", "snippet": "class VideoJsonDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n sample_start_idx: int = 1,\n frame_step: int = 1,\n json_path: str =\"\",\n json_data = None,\n vid_data_key: str = \"video_path\",\n preprocessed: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.use_bucketing = use_bucketing\n self.tokenizer = tokenizer\n self.preprocessed = preprocessed\n \n self.vid_data_key = vid_data_key\n self.train_data = self.load_from_json(json_path, json_data)\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.sample_start_idx = sample_start_idx\n self.frame_step = frame_step\n\n def build_json(self, json_data):\n extended_data = []\n for data in json_data['data']:\n for nested_data in data['data']:\n self.build_json_dict(\n data, \n nested_data, \n extended_data\n )\n json_data = extended_data\n return json_data\n\n def build_json_dict(self, data, nested_data, extended_data):\n clip_path = nested_data['clip_path'] if 'clip_path' in nested_data else None\n \n extended_data.append({\n self.vid_data_key: data[self.vid_data_key],\n 'frame_index': nested_data['frame_index'],\n 'prompt': nested_data['prompt'],\n 'clip_path': clip_path\n })\n \n def load_from_json(self, path, json_data):\n try:\n with open(path) as jpath:\n print(f\"Loading JSON from {path}\")\n json_data = json.load(jpath)\n\n return self.build_json(json_data)\n\n except:\n self.train_data = []\n print(\"Non-existant JSON path. Skipping.\")\n \n def validate_json(self, base_path, path):\n return os.path.exists(f\"{base_path}/{path}\")\n\n def get_frame_range(self, vr):\n return get_video_frames(\n vr, \n self.sample_start_idx, \n self.frame_step, \n self.n_sample_frames\n )\n \n def get_vid_idx(self, vr, vid_data=None):\n frames = self.n_sample_frames\n\n if vid_data is not None:\n idx = vid_data['frame_index']\n else:\n idx = self.sample_start_idx\n\n return idx\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n # width, height = self.width, self.height\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n frame_range = self.get_frame_range(vr)\n frames = vr.get_batch(frame_range)\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def train_data_batch(self, index):\n\n # If we are training on individual clips.\n if 'clip_path' in self.train_data[index] and \\\n self.train_data[index]['clip_path'] is not None:\n\n vid_data = self.train_data[index]\n\n clip_path = vid_data['clip_path']\n \n # Get video prompt\n prompt = vid_data['prompt']\n\n video, _ = self.process_video_wrapper(clip_path)\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n\n # Assign train data\n train_data = self.train_data[index]\n \n # Get the frame of the current index.\n self.sample_start_idx = train_data['frame_index']\n \n # Initialize resize\n resize = None\n\n video, vr = self.process_video_wrapper(train_data[self.vid_data_key])\n\n # Get video prompt\n prompt = train_data['prompt']\n vr.seek(0)\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'json'\n\n def __len__(self):\n if self.train_data is not None:\n return len(self.train_data)\n else: \n return 0\n\n def __getitem__(self, index):\n \n # Initialize variables\n video = None\n prompt = None\n prompt_ids = None\n\n # Use default JSON training\n if self.train_data is not None:\n video, prompt, prompt_ids = self.train_data_batch(index)\n\n example = {\n \"pixel_values\": (video / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "SingleVideoDataset", "path": "utils/dataset.py", "snippet": "class SingleVideoDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n frame_step: int = 1,\n single_video_path: str = \"\",\n single_video_prompt: str = \"\",\n use_caption: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n self.frames = []\n self.index = 1\n\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.n_sample_frames = n_sample_frames\n self.frame_step = frame_step\n\n self.single_video_path = single_video_path\n self.single_video_prompt = single_video_prompt\n\n self.width = width\n self.height = height\n def create_video_chunks(self):\n vr = decord.VideoReader(self.single_video_path)\n vr_range = range(0, len(vr), self.frame_step)\n\n self.frames = list(self.chunk(vr_range, self.n_sample_frames))\n return self.frames\n\n def chunk(self, it, size):\n it = iter(it)\n return iter(lambda: tuple(islice(it, size)), ())\n\n def get_frame_batch(self, vr, resize=None):\n index = self.index\n frames = vr.get_batch(self.frames[self.index])\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def get_frame_buckets(self, vr):\n h, w, c = vr[0].shape\n width, height = sensible_buckets(self.width, self.height, w, h)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def single_video_batch(self, index):\n train_data = self.single_video_path\n self.index = index\n\n if train_data.endswith(self.vid_types):\n video, _ = self.process_video_wrapper(train_data)\n\n prompt = self.single_video_prompt\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n else:\n raise ValueError(f\"Single video is not a video type. Types: {self.vid_types}\")\n \n @staticmethod\n def __getname__(): return 'single_video'\n\n def __len__(self):\n \n return len(self.create_video_chunks())\n\n def __getitem__(self, index):\n\n video, prompt, prompt_ids = self.single_video_batch(index)\n\n example = {\n \"pixel_values\": (video / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "ImageDataset", "path": "utils/dataset.py", "snippet": "class ImageDataset(Dataset):\n \n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n base_width: int = 256,\n base_height: int = 256,\n use_caption: bool = False,\n image_dir: str = '',\n single_img_prompt: str = '',\n use_bucketing: bool = False,\n fallback_prompt: str = '',\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.img_types = (\".png\", \".jpg\", \".jpeg\", '.bmp')\n self.use_bucketing = use_bucketing\n\n self.image_dir = self.get_images_list(image_dir)\n self.fallback_prompt = fallback_prompt\n\n self.use_caption = use_caption\n self.single_img_prompt = single_img_prompt\n\n self.width = width\n self.height = height\n\n def get_images_list(self, image_dir):\n if os.path.exists(image_dir):\n imgs = [x for x in os.listdir(image_dir) if x.endswith(self.img_types)]\n full_img_dir = []\n\n for img in imgs: \n full_img_dir.append(f\"{image_dir}/{img}\")\n\n return sorted(full_img_dir)\n\n return ['']\n\n def image_batch(self, index):\n train_data = self.image_dir[index]\n img = train_data\n\n try:\n img = torchvision.io.read_image(img, mode=torchvision.io.ImageReadMode.RGB)\n except:\n img = T.transforms.PILToTensor()(Image.open(img).convert(\"RGB\"))\n\n width = self.width\n height = self.height\n\n if self.use_bucketing:\n _, h, w = img.shape\n width, height = sensible_buckets(width, height, w, h)\n \n resize = T.transforms.Resize((height, width), antialias=True)\n\n img = resize(img) \n img = repeat(img, 'c h w -> f c h w', f=16)\n\n prompt = get_text_prompt(\n file_path=train_data,\n text_prompt=self.single_img_prompt,\n fallback_prompt=self.fallback_prompt,\n ext_types=self.img_types, \n use_caption=True\n )\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return img, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'image'\n \n def __len__(self):\n # Image directory\n if os.path.exists(self.image_dir[0]):\n return len(self.image_dir)\n else:\n return 0\n\n def __getitem__(self, index):\n img, prompt, prompt_ids = self.image_batch(index)\n example = {\n \"pixel_values\": (img / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt, \n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "VideoFolderDataset", "path": "utils/dataset.py", "snippet": "class VideoFolderDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n path: str = \"./data\",\n fallback_prompt: str = \"\",\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n\n self.fallback_prompt = fallback_prompt\n\n self.video_files = glob(f\"{path}/*.mp4\")\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.fps = fps\n\n def get_frame_buckets(self, vr):\n h, w, c = vr[0].shape\n width, height = sensible_buckets(self.width, self.height, w, h)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n n_sample_frames = self.n_sample_frames\n native_fps = vr.get_avg_fps()\n \n every_nth_frame = max(1, round(native_fps / self.fps))\n every_nth_frame = min(len(vr), every_nth_frame)\n \n effective_length = len(vr) // every_nth_frame\n if effective_length < n_sample_frames:\n n_sample_frames = effective_length\n\n effective_idx = random.randint(0, (effective_length - n_sample_frames))\n idxs = every_nth_frame * np.arange(effective_idx, effective_idx + n_sample_frames)\n\n video = vr.get_batch(idxs)\n video = rearrange(video, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video, vr\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n return video, vr\n \n def get_prompt_ids(self, prompt):\n return self.tokenizer(\n prompt,\n truncation=True,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n return_tensors=\"pt\",\n ).input_ids\n\n @staticmethod\n def __getname__(): return 'folder'\n\n def __len__(self):\n return len(self.video_files)\n\n def __getitem__(self, index):\n\n video, _ = self.process_video_wrapper(self.video_files[index])\n\n prompt = self.fallback_prompt\n\n prompt_ids = self.get_prompt_ids(prompt)\n\n return {\"pixel_values\": (video[0] / 127.5 - 1.0), \"prompt_ids\": prompt_ids[0], \"text_prompt\": prompt, 'dataset': self.__getname__()}" }, { "identifier": "CachedDataset", "path": "utils/dataset.py", "snippet": "class CachedDataset(Dataset):\n def __init__(self,cache_dir: str = ''):\n self.cache_dir = cache_dir\n self.cached_data_list = self.get_files_list()\n\n def get_files_list(self):\n tensors_list = [f\"{self.cache_dir}/{x}\" for x in os.listdir(self.cache_dir) if x.endswith('.pt')]\n return sorted(tensors_list)\n\n def __len__(self):\n return len(self.cached_data_list)\n\n def __getitem__(self, index):\n cached_latent = torch.load(self.cached_data_list[index], map_location='cuda:0')\n return cached_latent" }, { "identifier": "LoraHandler", "path": "utils/lora_handler.py", "snippet": "class LoraHandler(object):\n def __init__(\n self, \n version: LORA_VERSIONS = LoraVersions.cloneofsimo, \n use_unet_lora: bool = False,\n use_text_lora: bool = False,\n save_for_webui: bool = False,\n only_for_webui: bool = False,\n lora_bias: str = 'none',\n unet_replace_modules: list = None,\n text_encoder_replace_modules: list = None\n ):\n self.version = version\n self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)\n self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)\n self.lora_bias = lora_bias\n self.use_unet_lora = use_unet_lora\n self.use_text_lora = use_text_lora\n self.save_for_webui = save_for_webui\n self.only_for_webui = only_for_webui\n self.unet_replace_modules = unet_replace_modules\n self.text_encoder_replace_modules = text_encoder_replace_modules\n self.use_lora = any([use_text_lora, use_unet_lora])\n\n def is_cloneofsimo_lora(self):\n return self.version == LoraVersions.cloneofsimo\n\n\n def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader):\n\n if self.is_cloneofsimo_lora():\n\n if func_type == LoraFuncTypes.loader:\n return monkeypatch_or_replace_lora_extended\n\n if func_type == LoraFuncTypes.injector:\n return inject_trainable_lora_extended\n \n assert \"LoRA Version does not exist.\"\n\n def check_lora_ext(self, lora_file: str):\n return lora_file.endswith(tuple(LORA_FILE_TYPES))\n\n def get_lora_file_path(\n self, \n lora_path: str, \n model: Union[UNet3DConditionModel, CLIPTextModel]\n ):\n if os.path.exists(lora_path):\n lora_filenames = [fns for fns in os.listdir(lora_path)]\n is_lora = self.check_lora_ext(lora_path)\n\n is_unet = isinstance(model, UNet3DConditionModel)\n is_text = isinstance(model, CLIPTextModel)\n idx = 0 if is_unet else 1\n\n base_name = FILE_BASENAMES[idx]\n \n for lora_filename in lora_filenames:\n is_lora = self.check_lora_ext(lora_filename)\n if not is_lora:\n continue\n \n if base_name in lora_filename:\n return os.path.join(lora_path, lora_filename)\n\n return None\n\n def handle_lora_load(self, file_name:str, lora_loader_args: dict = None):\n self.lora_loader(**lora_loader_args)\n print(f\"Successfully loaded LoRA from: {file_name}\")\n \n def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,):\n try:\n lora_file = self.get_lora_file_path(lora_path, model)\n\n if lora_file is not None:\n lora_loader_args.update({\"lora_path\": lora_file})\n self.handle_lora_load(lora_file, lora_loader_args)\n\n else:\n print(f\"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...\")\n\n except Exception as e:\n print(f\"An error occurred while loading a LoRA file: {e}\")\n \n def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias, scale):\n return_dict = lora_args.copy()\n \n if self.is_cloneofsimo_lora():\n return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)\n return_dict.update({\n \"model\": model,\n \"loras\": self.get_lora_file_path(lora_path, model),\n \"target_replace_module\": replace_modules,\n \"r\": r,\n \"scale\": scale,\n \"dropout_p\": dropout,\n })\n\n return return_dict\n\n def do_lora_injection(\n self, \n model, \n replace_modules, \n bias='none',\n dropout=0,\n r=4,\n lora_loader_args=None,\n ): \n REPLACE_MODULES = replace_modules\n\n params = None\n negation = None\n is_injection_hybrid = False\n \n if self.is_cloneofsimo_lora():\n is_injection_hybrid = True\n injector_args = lora_loader_args\n\n params, negation = self.lora_injector(**injector_args) # inject_trainable_lora_extended\n for _up, _down in extract_lora_ups_down(\n model, \n target_replace_module=REPLACE_MODULES):\n\n if all(x is not None for x in [_up, _down]):\n print(f\"Lora successfully injected into {model.__class__.__name__}.\")\n\n break\n\n return params, negation, is_injection_hybrid\n\n return params, negation, is_injection_hybrid\n\n def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16, scale=1.0):\n\n params = None\n negation = None\n\n lora_loader_args = self.get_lora_func_args(\n lora_path,\n use_lora,\n model,\n replace_modules,\n r,\n dropout,\n self.lora_bias,\n scale\n )\n\n if use_lora:\n params, negation, is_injection_hybrid = self.do_lora_injection(\n model, \n replace_modules, \n bias=self.lora_bias,\n lora_loader_args=lora_loader_args,\n dropout=dropout,\n r=r\n )\n\n if not is_injection_hybrid:\n self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args)\n \n params = model if params is None else params\n return params, negation\n\n def save_cloneofsimo_lora(self, model, save_path, step, flag):\n \n def save_lora(model, name, condition, replace_modules, step, save_path, flag=None):\n if condition and replace_modules is not None:\n save_path = f\"{save_path}/{step}_{name}.pt\"\n save_lora_weight(model, save_path, replace_modules, flag)\n\n save_lora(\n model.unet, \n FILE_BASENAMES[0], \n self.use_unet_lora, \n self.unet_replace_modules, \n step,\n save_path,\n flag\n )\n save_lora(\n model.text_encoder, \n FILE_BASENAMES[1], \n self.use_text_lora, \n self.text_encoder_replace_modules, \n step, \n save_path,\n flag\n )\n\n # train_patch_pipe(model, self.use_unet_lora, self.use_text_lora)\n\n def save_lora_weights(self, model: None, save_path: str ='',step: str = '', flag=None):\n save_path = f\"{save_path}/lora\"\n os.makedirs(save_path, exist_ok=True)\n\n if self.is_cloneofsimo_lora():\n if any([self.save_for_webui, self.only_for_webui]):\n warnings.warn(\n \"\"\"\n You have 'save_for_webui' enabled, but are using cloneofsimo's LoRA implemention.\n Only 'stable_lora' is supported for saving to a compatible webui file.\n \"\"\"\n )\n self.save_cloneofsimo_lora(model, save_path, step, flag)" }, { "identifier": "extract_lora_child_module", "path": "utils/lora.py", "snippet": "def extract_lora_child_module(model, target_replace_module=DEFAULT_TARGET_REPLACE):\n\n loras = []\n\n for target_replace_module_i in target_replace_module:\n\n for _m, _n, _child_module in _find_modules(\n model,\n [target_replace_module_i],\n search_class=[LoraInjectedLinear, LoraInjectedConv2d, LoraInjectedConv3d],\n ):\n loras.append(_child_module)\n\n return loras" }, { "identifier": "ddim_inversion", "path": "utils/ddim_utils.py", "snippet": "@torch.no_grad()\ndef ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=\"\"):\n ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)\n return ddim_latents" } ]
import argparse import datetime import logging import inspect import math import os import random import gc import copy import torch import torch.nn.functional as F import torch.utils.checkpoint import diffusers import transformers import imageio import numpy as np import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from torchvision import transforms from tqdm.auto import tqdm from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from models.unet_3d_condition import UNet3DConditionModel from diffusers.models import AutoencoderKL from diffusers import DDIMScheduler, TextToVideoSDPipeline from diffusers.optimization import get_scheduler from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset from einops import rearrange, repeat from utils.lora_handler import LoraHandler from utils.lora import extract_lora_child_module from utils.ddim_utils import ddim_inversion from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
11,326
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process.
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process.
for DataSet in [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset]:
4
2023-10-12 12:06:55+00:00
16k
NVlabs/EmerNeRF
builders.py
[ { "identifier": "SceneDataset", "path": "datasets/base/scene_dataset.py", "snippet": "class SceneDataset(abc.ABC):\n \"\"\"\n Base class for scene dataset.\n \"\"\"\n\n data_cfg: OmegaConf = None\n pixel_source: ScenePixelSource = None\n lidar_source: SceneLidarSource = None\n # training and testing indices are indices into the full dataset\n # train_indices are img indices, so the length is num_cams * num_timesteps\n train_indices: List[int] = None\n test_indices: List[int] = None\n # train_timesteps are timesteps, so the length is num_timesteps (len(unique_timesteps))\n train_timesteps: Tensor = None\n test_timesteps: Tensor = None\n\n # dataset wrappers\n # full: includes all data\n full_pixel_set: SplitWrapper = None\n full_lidar_set: SplitWrapper = None\n # train: includes only training data\n train_pixel_set: SplitWrapper = None\n train_lidar_set: SplitWrapper = None\n # test: includes only testing data\n test_pixel_set: SplitWrapper = None\n test_lidar_set: SplitWrapper = None\n\n def __init__(\n self,\n data_config: OmegaConf,\n ):\n super().__init__()\n self.data_cfg = data_config\n\n @abc.abstractmethod\n def build_data_source(self):\n \"\"\"\n Create the data source for the dataset.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def build_split_wrapper(self):\n \"\"\"\n Makes each data source as a Pytorch Dataset.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def split_train_test(self):\n raise NotImplementedError\n\n def get_aabb(self) -> Tensor:\n if self.lidar_source is not None:\n aabb = self.lidar_source.get_aabb()\n else:\n aabb = self.pixel_source.get_aabb()\n return aabb\n\n @property\n def num_cams(self) -> int:\n return self.pixel_source.num_cams\n\n @property\n def scene_idx(self) -> int:\n return self.data_cfg.scene_idx\n\n @property\n def num_img_timesteps(self) -> int:\n return self.pixel_source.num_timesteps\n\n @property\n def num_lidar_timesteps(self) -> int:\n if self.lidar_source is None:\n logger.warning(\"No lidar source, returning num_img_timesteps\")\n return self.num_img_timesteps\n return self.lidar_source.num_timesteps\n\n @property\n def num_train_timesteps(self) -> int:\n return len(self.train_timesteps)\n\n @property\n def num_test_timesteps(self) -> int:\n return len(self.test_timesteps)\n\n @property\n def unique_normalized_training_timestamps(self) -> Tensor:\n return self.pixel_source.unique_normalized_timestamps[self.train_timesteps]\n\n @property\n def device(self):\n return self.data_cfg.preload_device" }, { "identifier": "DensityField", "path": "radiance_fields/radiance_field.py", "snippet": "class DensityField(nn.Module):\n def __init__(\n self,\n xyz_encoder: HashEncoder,\n aabb: Union[Tensor, List[float]] = [[-1.0, -1.0, -1.0, 1.0, 1.0, 1.0]],\n num_dims: int = 3,\n density_activation: Callable = lambda x: trunc_exp(x - 1),\n unbounded: bool = False,\n base_mlp_layer_width: int = 64,\n ) -> None:\n super().__init__()\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n self.register_buffer(\"aabb\", aabb)\n self.num_dims = num_dims\n self.density_activation = density_activation\n self.unbounded = unbounded\n self.xyz_encoder = xyz_encoder\n\n # density head\n self.base_mlp = nn.Sequential(\n nn.Linear(self.xyz_encoder.n_output_dims, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, 1),\n )\n\n @property\n def device(self) -> torch.device:\n return self.aabb.device\n\n def set_aabb(self, aabb: Union[Tensor, List[float]]) -> None:\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n logger.info(f\"Set propnet aabb from {self.aabb} to {aabb}\")\n self.aabb.copy_(aabb)\n self.aabb = self.aabb.to(self.device)\n\n def forward(\n self, positions: Tensor, data_dict: Dict[str, Tensor] = None\n ) -> Dict[str, Tensor]:\n if self.unbounded:\n # use infinte norm to contract the positions for cuboid aabb\n positions = contract(positions, self.aabb, ord=float(\"inf\"))\n else:\n aabb_min, aabb_max = torch.split(self.aabb, 3, dim=-1)\n positions = (positions - aabb_min) / (aabb_max - aabb_min)\n selector = ((positions > 0.0) & (positions < 1.0)).all(dim=-1).to(positions)\n positions = positions * selector.unsqueeze(-1)\n xyz_encoding = self.xyz_encoder(positions.view(-1, self.num_dims))\n density_before_activation = self.base_mlp(xyz_encoding).view(\n list(positions.shape[:-1]) + [-1]\n )\n density = self.density_activation(density_before_activation)\n return {\"density\": density}" }, { "identifier": "RadianceField", "path": "radiance_fields/radiance_field.py", "snippet": "class RadianceField(nn.Module):\n def __init__(\n self,\n xyz_encoder: HashEncoder,\n dynamic_xyz_encoder: Optional[HashEncoder] = None,\n flow_xyz_encoder: Optional[HashEncoder] = None,\n aabb: Union[Tensor, List[float]] = [-1, -1, -1, 1, 1, 1],\n num_dims: int = 3,\n density_activation: Callable = lambda x: trunc_exp(x - 1),\n unbounded: bool = True,\n geometry_feature_dim: int = 15,\n base_mlp_layer_width: int = 64,\n head_mlp_layer_width: int = 64,\n enable_cam_embedding: bool = False,\n enable_img_embedding: bool = False,\n num_cams: int = 3,\n appearance_embedding_dim: int = 16,\n semantic_feature_dim: int = 64,\n feature_mlp_layer_width: int = 256,\n feature_embedding_dim: int = 768,\n enable_sky_head: bool = False,\n enable_shadow_head: bool = False,\n enable_feature_head: bool = False,\n num_train_timesteps: int = 0,\n interpolate_xyz_encoding: bool = False,\n enable_learnable_pe: bool = True,\n enable_temporal_interpolation: bool = False,\n ) -> None:\n super().__init__()\n # scene properties\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n self.register_buffer(\"aabb\", aabb)\n self.unbounded = unbounded\n self.num_cams = num_cams\n self.num_dims = num_dims\n self.density_activation = density_activation\n\n # appearance embedding\n self.enable_cam_embedding = enable_cam_embedding\n self.enable_img_embedding = enable_img_embedding\n self.appearance_embedding_dim = appearance_embedding_dim\n\n self.geometry_feature_dim = geometry_feature_dim\n # add semantic feature dim if feature head is enabled\n if not enable_feature_head:\n semantic_feature_dim = 0\n self.semantic_feature_dim = semantic_feature_dim\n\n # note: we use very conservative default values for mlps\n # usually you want to use larger ones\n\n # ======== Static Field ======== #\n self.xyz_encoder = xyz_encoder\n self.base_mlp = nn.Sequential(\n nn.Linear(self.xyz_encoder.n_output_dims, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(\n base_mlp_layer_width, geometry_feature_dim + semantic_feature_dim\n ),\n )\n\n # ======== Dynamic Field ======== #\n self.interpolate_xyz_encoding = interpolate_xyz_encoding\n self.dynamic_xyz_encoder = dynamic_xyz_encoder\n self.enable_temporal_interpolation = enable_temporal_interpolation\n if self.dynamic_xyz_encoder is not None:\n # for temporal interpolation\n self.register_buffer(\"training_timesteps\", torch.zeros(num_train_timesteps))\n self.dynamic_base_mlp = nn.Sequential(\n nn.Linear(self.dynamic_xyz_encoder.n_output_dims, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(\n base_mlp_layer_width,\n geometry_feature_dim + semantic_feature_dim,\n ),\n )\n\n # ======== Flow Field ======== #\n self.flow_xyz_encoder = flow_xyz_encoder\n if self.flow_xyz_encoder is not None:\n self.flow_mlp = nn.Sequential(\n nn.Linear(\n self.flow_xyz_encoder.n_output_dims,\n base_mlp_layer_width,\n ),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, 6), # 3 for forward, 3 for backward\n # no activation function for flow\n )\n\n # appearance embedding\n if self.enable_cam_embedding:\n # per-camera embedding\n self.appearance_embedding = nn.Embedding(num_cams, appearance_embedding_dim)\n elif self.enable_img_embedding:\n # per-image embedding\n self.appearance_embedding = nn.Embedding(\n num_train_timesteps * num_cams, appearance_embedding_dim\n )\n else:\n self.appearance_embedding = None\n\n # direction encoding\n self.direction_encoding = SinusoidalEncoder(\n n_input_dims=3, min_deg=0, max_deg=4\n )\n\n # ======== Color Head ======== #\n self.rgb_head = MLP(\n in_dims=geometry_feature_dim\n + self.direction_encoding.n_output_dims\n + (\n appearance_embedding_dim\n if self.enable_cam_embedding or self.enable_img_embedding\n else 0 # 2 or 0?\n ),\n out_dims=3,\n num_layers=3,\n hidden_dims=head_mlp_layer_width,\n skip_connections=[1],\n )\n\n # ======== Shadow Head ======== #\n self.enable_shadow_head = enable_shadow_head\n if self.enable_shadow_head:\n self.shadow_head = nn.Sequential(\n nn.Linear(geometry_feature_dim, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, 1),\n nn.Sigmoid(),\n )\n\n # ======== Sky Head ======== #\n self.enable_sky_head = enable_sky_head\n if self.enable_sky_head:\n self.sky_head = MLP(\n in_dims=self.direction_encoding.n_output_dims\n + (\n appearance_embedding_dim\n if self.enable_cam_embedding or self.enable_img_embedding\n else 0\n ),\n out_dims=3,\n num_layers=3,\n hidden_dims=head_mlp_layer_width,\n skip_connections=[1],\n )\n if enable_feature_head:\n # feature sky head\n self.dino_sky_head = nn.Sequential(\n # TODO: remove appearance embedding from dino sky head\n nn.Linear(\n self.direction_encoding.n_output_dims\n + (\n appearance_embedding_dim\n if self.enable_cam_embedding or self.enable_img_embedding\n else 0\n ),\n feature_mlp_layer_width,\n ),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_embedding_dim),\n )\n\n # ======== Feature Head ======== #\n self.enable_feature_head = enable_feature_head\n if self.enable_feature_head:\n self.dino_head = nn.Sequential(\n nn.Linear(semantic_feature_dim, feature_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_embedding_dim),\n )\n # placeholders for visualization, will be registered when available\n self.register_buffer(\n \"feats_reduction_mat\", torch.zeros(feature_embedding_dim, 3)\n )\n self.register_buffer(\"feat_color_min\", torch.zeros(3, dtype=torch.float32))\n self.register_buffer(\"feat_color_max\", torch.ones(3, dtype=torch.float32))\n\n # positional embedding (PE) decomposition\n self.enable_learnable_pe = enable_learnable_pe\n if self.enable_learnable_pe:\n # globally-shared low-resolution learnable PE map\n self.learnable_pe_map = nn.Parameter(\n 0.05 * torch.randn(1, feature_embedding_dim // 2, 80, 120),\n requires_grad=True,\n )\n # a PE head to decode PE features\n self.pe_head = nn.Sequential(\n nn.Linear(feature_embedding_dim // 2, feature_embedding_dim),\n )\n\n def register_normalized_training_timesteps(\n self, normalized_timesteps: Tensor, time_diff: float = None\n ) -> None:\n \"\"\"\n register normalized timesteps for temporal interpolation\n\n Args:\n normalized_timesteps (Tensor): normalized timesteps in [0, 1]\n time_diff (float, optional): time difference between two consecutive timesteps. Defaults to None.\n \"\"\"\n if self.dynamic_xyz_encoder is not None:\n # register timesteps for temporal interpolation\n self.training_timesteps.copy_(normalized_timesteps)\n self.training_timesteps = self.training_timesteps.to(self.device)\n if time_diff is not None:\n # use the provided time difference if available\n self.time_diff = time_diff\n else:\n if len(self.training_timesteps) > 1:\n # otherwise, compute the time difference from the provided timesteps\n # it's important to make sure the provided timesteps are consecutive\n self.time_diff = (\n self.training_timesteps[1] - self.training_timesteps[0]\n )\n else:\n self.time_diff = 0\n\n def set_aabb(self, aabb: Union[Tensor, List[float]]) -> None:\n \"\"\"\n register aabb for scene space\n \"\"\"\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n logger.info(f\"Set aabb from {self.aabb} to {aabb}\")\n self.aabb.copy_(aabb)\n self.aabb = self.aabb.to(self.device)\n\n def register_feats_reduction_mat(\n self,\n feats_reduction_mat: Tensor,\n feat_color_min: Tensor,\n feat_color_max: Tensor,\n ) -> None:\n \"\"\"\n A placeholder for registering the PCA reduction matrix and min/max values for visualization.\n You may not want to compute PCA reduction matrix every time from the dataset.\n \"\"\"\n # for visualization\n self.feats_reduction_mat.copy_(feats_reduction_mat)\n self.feat_color_min.copy_(feat_color_min)\n self.feat_color_max.copy_(feat_color_max)\n self.feats_reduction_mat = self.feats_reduction_mat.to(self.device)\n self.feat_color_min = self.feat_color_min.to(self.device)\n self.feat_color_max = self.feat_color_max.to(self.device)\n\n @property\n def device(self) -> torch.device:\n return self.aabb.device\n\n def contract_points(\n self,\n positions: Tensor,\n ) -> Tensor:\n \"\"\"\n contract [-inf, inf] points to the range [0, 1] for hash encoding\n\n Returns:\n normed_positions: [..., 3] in [0, 1]\n \"\"\"\n if self.unbounded:\n # use infinte norm to contract the positions for cuboid aabb\n normed_positions = contract(positions, self.aabb, ord=float(\"inf\"))\n else:\n aabb_min, aabb_max = torch.split(self.aabb, 3, dim=-1)\n normed_positions = (positions - aabb_min) / (aabb_max - aabb_min)\n selector = (\n ((normed_positions > 0.0) & (normed_positions < 1.0))\n .all(dim=-1)\n .to(positions)\n )\n normed_positions = normed_positions * selector.unsqueeze(-1)\n return normed_positions\n\n def forward_static_hash(\n self,\n positions: Tensor,\n ) -> Tensor:\n \"\"\"\n forward pass for static hash encoding\n\n Returns:\n encoded_features: [..., geometry_feature_dim + (semantic_feature_dim)]\n normed_positions: [..., 3] in [0, 1]\n \"\"\"\n normed_positions = self.contract_points(positions)\n xyz_encoding = self.xyz_encoder(normed_positions.view(-1, self.num_dims))\n encoded_features = self.base_mlp(xyz_encoding).view(\n list(normed_positions.shape[:-1]) + [-1]\n )\n return encoded_features, normed_positions\n\n def forward_dynamic_hash(\n self,\n normed_positions: Tensor,\n normed_timestamps: Tensor,\n return_hash_encodings: bool = False,\n ) -> Union[Tuple[Tensor, Tensor], Tensor]:\n \"\"\"\n forward pass for dynamic hash encoding\n\n Returns:\n encoded_dynamic_feats: [..., geometry_feature_dim + (semantic_feature_dim)]\n dynamic_xyz_encoding: [..., n_output_dims] (optional)\n \"\"\"\n if normed_timestamps.shape[-1] != 1:\n normed_timestamps = normed_timestamps.unsqueeze(-1)\n # To be fixed.\n # if self.training or not self.enable_temporal_interpolation:\n if True:\n temporal_positions = torch.cat(\n [normed_positions, normed_timestamps], dim=-1\n )\n dynamic_xyz_encoding = self.dynamic_xyz_encoder(\n temporal_positions.view(-1, self.num_dims + 1)\n ).view(list(temporal_positions.shape[:-1]) + [-1])\n encoded_dynamic_feats = self.dynamic_base_mlp(dynamic_xyz_encoding)\n else:\n encoded_dynamic_feats = temporal_interpolation(\n normed_timestamps,\n self.training_timesteps,\n normed_positions,\n self.dynamic_xyz_encoder,\n self.dynamic_base_mlp,\n interpolate_xyz_encoding=self.interpolate_xyz_encoding,\n )\n if return_hash_encodings:\n return encoded_dynamic_feats, dynamic_xyz_encoding\n else:\n return encoded_dynamic_feats\n\n def forward_flow_hash(\n self,\n normed_positions: Tensor,\n normed_timestamps: Tensor,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"\n forward pass for flow hash encoding\n\n Returns:\n flow: [..., 6] (forward_flow, backward_flow)\n \"\"\"\n if normed_timestamps.shape[-1] != 1:\n normed_timestamps = normed_timestamps.unsqueeze(-1)\n if self.training or not self.enable_temporal_interpolation:\n temporal_positions = torch.cat(\n [normed_positions, normed_timestamps], dim=-1\n )\n flow_xyz_encoding = self.flow_xyz_encoder(\n temporal_positions.view(-1, self.num_dims + 1)\n ).view(list(temporal_positions.shape[:-1]) + [-1])\n flow = self.flow_mlp(flow_xyz_encoding)\n else:\n flow = temporal_interpolation(\n normed_timestamps,\n self.training_timesteps,\n normed_positions,\n self.flow_xyz_encoder,\n self.flow_mlp,\n interpolate_xyz_encoding=True,\n )\n return flow\n\n def forward(\n self,\n positions: Tensor,\n directions: Tensor = None,\n data_dict: Dict[str, Tensor] = {},\n return_density_only: bool = False,\n combine_static_dynamic: bool = False,\n query_feature_head: bool = True,\n query_pe_head: bool = True,\n ) -> Dict[str, Tensor]:\n \"\"\"\n Args:\n positions: [..., 3]\n directions: [..., 3]\n data_dict: a dictionary containing additional data\n return_density_only: if True, only return density without querying other heads\n combine_static_dynamic: if True, combine static and dynamic predictions based on static and dynamic density\n in addition to returning separate results for static and dynamic fields\n query_feature_head: if True, query feature head\n query_pe_head: if True, query PE head. Disable this if we want to directly query 3D features.\n Returns:\n results_dict: a dictionary containing everything\n \"\"\"\n results_dict = {}\n # forward static branch\n encoded_features, normed_positions = self.forward_static_hash(positions)\n geo_feats, semantic_feats = torch.split(\n encoded_features,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n static_density = self.density_activation(geo_feats[..., 0])\n\n has_timestamps = (\n \"normed_timestamps\" in data_dict or \"lidar_normed_timestamps\" in data_dict\n )\n if self.dynamic_xyz_encoder is not None and has_timestamps:\n # forward dynamic branch\n if \"normed_timestamps\" in data_dict:\n normed_timestamps = data_dict[\"normed_timestamps\"]\n elif \"lidar_normed_timestamps\" in data_dict:\n # we use `lidar_` prefix as an identifier to skip querying other heads\n normed_timestamps = data_dict[\"lidar_normed_timestamps\"]\n dynamic_feats, dynamic_hash_encodings = self.forward_dynamic_hash(\n normed_positions, normed_timestamps, return_hash_encodings=True\n )\n if self.flow_xyz_encoder is not None:\n flow = self.forward_flow_hash(normed_positions, normed_timestamps)\n forward_flow, backward_flow = flow[..., :3], flow[..., 3:]\n results_dict[\"forward_flow\"] = forward_flow\n results_dict[\"backward_flow\"] = backward_flow\n temporal_aggregation_results = self.temporal_aggregation(\n positions,\n normed_timestamps,\n forward_flow,\n backward_flow,\n dynamic_feats,\n )\n # overwrite dynamic feats using temporal aggregation results\n dynamic_feats = temporal_aggregation_results[\"dynamic_feats\"]\n # to be studied\n temporal_aggregation_results[\n \"current_dynamic_hash_encodings\"\n ] = dynamic_hash_encodings\n results_dict.update(temporal_aggregation_results)\n (dynamic_geo_feats, dynamic_semantic_feats,) = torch.split(\n dynamic_feats,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n dynamic_density = self.density_activation(dynamic_geo_feats[..., 0])\n # blend static and dynamic density to get the final density\n density = static_density + dynamic_density\n results_dict.update(\n {\n \"density\": density,\n \"static_density\": static_density,\n \"dynamic_density\": dynamic_density,\n }\n )\n if return_density_only:\n # skip querying other heads\n return results_dict\n\n if directions is not None:\n rgb_results = self.query_rgb(\n directions, geo_feats, dynamic_geo_feats, data_dict=data_dict\n )\n results_dict[\"dynamic_rgb\"] = rgb_results[\"dynamic_rgb\"]\n results_dict[\"static_rgb\"] = rgb_results[\"rgb\"]\n if combine_static_dynamic:\n static_ratio = static_density / (density + 1e-6)\n dynamic_ratio = dynamic_density / (density + 1e-6)\n results_dict[\"rgb\"] = (\n static_ratio[..., None] * results_dict[\"static_rgb\"]\n + dynamic_ratio[..., None] * results_dict[\"dynamic_rgb\"]\n )\n if self.enable_shadow_head:\n shadow_ratio = self.shadow_head(dynamic_geo_feats)\n results_dict[\"shadow_ratio\"] = shadow_ratio\n if combine_static_dynamic and \"rgb\" in results_dict:\n results_dict[\"rgb\"] = (\n static_ratio[..., None]\n * results_dict[\"rgb\"]\n * (1 - shadow_ratio)\n + dynamic_ratio[..., None] * results_dict[\"dynamic_rgb\"]\n )\n else:\n # if no dynamic branch, use static density\n results_dict[\"density\"] = static_density\n if return_density_only:\n # skip querying other heads\n return results_dict\n if directions is not None:\n rgb_results = self.query_rgb(directions, geo_feats, data_dict=data_dict)\n results_dict[\"rgb\"] = rgb_results[\"rgb\"]\n\n if self.enable_feature_head and query_feature_head:\n if self.enable_learnable_pe and query_pe_head:\n learnable_pe_map = (\n F.grid_sample(\n self.learnable_pe_map,\n # assume pixel coords have been normalize to [-1, 1]\n data_dict[\"pixel_coords\"].reshape(1, 1, -1, 2) * 2 - 1,\n align_corners=False, # didn't test with True\n mode=\"bilinear\", # didn't test with other modes\n )\n .squeeze(2)\n .squeeze(0)\n .permute(1, 0)\n )\n dino_pe = self.pe_head(learnable_pe_map)\n results_dict[\"dino_pe\"] = dino_pe\n dino_feats = self.dino_head(semantic_feats)\n\n if self.dynamic_xyz_encoder is not None and has_timestamps:\n dynamic_dino_feats = self.dino_head(dynamic_semantic_feats)\n results_dict[\"static_dino_feat\"] = dino_feats\n results_dict[\"dynamic_dino_feat\"] = dynamic_dino_feats\n if combine_static_dynamic:\n static_ratio = static_density / (density + 1e-6)\n dynamic_ratio = dynamic_density / (density + 1e-6)\n results_dict[\"dino_feat\"] = (\n static_ratio[..., None] * dino_feats\n + dynamic_ratio[..., None] * dynamic_dino_feats\n )\n else:\n results_dict[\"dino_feat\"] = dino_feats\n\n # query sky if not in lidar mode\n if (\n self.enable_sky_head\n and \"lidar_origin\" not in data_dict\n and directions is not None\n ):\n directions = directions[:, 0]\n reduced_data_dict = {k: v[:, 0] for k, v in data_dict.items()}\n sky_results = self.query_sky(directions, data_dict=reduced_data_dict)\n results_dict.update(sky_results)\n\n return results_dict\n\n def temporal_aggregation(\n self,\n positions: Tensor, # current world coordinates\n normed_timestamps: Tensor, # current normalized timestamps\n forward_flow: Tensor,\n backward_flow: Tensor,\n dynamic_feats: Tensor,\n ) -> Tensor:\n \"\"\"\n temporal aggregation for dynamic features\n Eq. (8) in the emernerf paper\n \"\"\"\n if normed_timestamps.shape[-1] != 1:\n normed_timestamps = normed_timestamps.unsqueeze(-1)\n if self.training:\n noise = torch.rand_like(forward_flow)[..., 0:1]\n else:\n noise = torch.ones_like(forward_flow)[..., 0:1]\n # forward and backward warped positions\n forward_warped_positions = self.contract_points(\n positions + forward_flow * noise\n )\n backward_warped_positions = self.contract_points(\n positions + backward_flow * noise\n )\n # forward and backward warped timestamps\n forward_warped_time = torch.clamp(\n normed_timestamps + self.time_diff * noise, 0, 1.0\n )\n backward_warped_time = torch.clamp(\n normed_timestamps - self.time_diff * noise, 0, 1.0\n )\n (\n forward_dynamic_feats,\n forward_dynamic_hash_encodings,\n ) = self.forward_dynamic_hash(\n forward_warped_positions,\n forward_warped_time,\n return_hash_encodings=True,\n )\n (\n backward_dynamic_feats,\n backward_dynamic_hash_encodings,\n ) = self.forward_dynamic_hash(\n backward_warped_positions,\n backward_warped_time,\n return_hash_encodings=True,\n )\n forward_pred_flow = self.forward_flow_hash(\n forward_warped_positions,\n forward_warped_time,\n )\n backward_pred_flow = self.forward_flow_hash(\n backward_warped_positions,\n backward_warped_time,\n )\n # simple weighted sum\n aggregated_dynamic_feats = (\n dynamic_feats + 0.5 * forward_dynamic_feats + 0.5 * backward_dynamic_feats\n ) / 2.0\n return {\n \"dynamic_feats\": aggregated_dynamic_feats,\n \"forward_pred_backward_flow\": forward_pred_flow[..., 3:],\n \"backward_pred_forward_flow\": backward_pred_flow[..., :3],\n # to be studied\n \"forward_dynamic_hash_encodings\": forward_dynamic_hash_encodings,\n \"backward_dynamic_hash_encodings\": backward_dynamic_hash_encodings,\n }\n\n def query_rgb(\n self,\n directions: Tensor,\n geo_feats: Tensor,\n dynamic_geo_feats: Tensor = None,\n data_dict: Dict[str, Tensor] = None,\n ) -> Tensor:\n directions = (directions + 1.0) / 2.0 # do we need this?\n h = self.direction_encoding(directions.reshape(-1, directions.shape[-1])).view(\n *directions.shape[:-1], -1\n )\n if self.enable_cam_embedding or self.enable_img_embedding:\n if \"cam_idx\" in data_dict and self.enable_cam_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"cam_idx\"])\n elif \"img_idx\" in data_dict and self.enable_img_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"img_idx\"])\n else:\n # use mean appearance embedding\n # print(\"using mean appearance embedding\")\n appearance_embedding = torch.ones(\n (*directions.shape[:-1], self.appearance_embedding_dim),\n device=directions.device,\n ) * self.appearance_embedding.weight.mean(dim=0)\n h = torch.cat([h, appearance_embedding], dim=-1)\n\n rgb = self.rgb_head(torch.cat([h, geo_feats], dim=-1))\n rgb = F.sigmoid(rgb)\n results = {\"rgb\": rgb}\n\n if self.dynamic_xyz_encoder is not None:\n assert (\n dynamic_geo_feats is not None\n ), \"Dynamic geometry features are not provided.\"\n dynamic_rgb = self.rgb_head(torch.cat([h, dynamic_geo_feats], dim=-1))\n dynamic_rgb = F.sigmoid(dynamic_rgb)\n results[\"dynamic_rgb\"] = dynamic_rgb\n return results\n\n def query_sky(\n self, directions: Tensor, data_dict: Dict[str, Tensor] = None\n ) -> Dict[str, Tensor]:\n if len(directions.shape) == 2:\n dd = self.direction_encoding(directions).to(directions)\n else:\n dd = self.direction_encoding(directions[:, 0]).to(directions)\n if self.enable_cam_embedding or self.enable_img_embedding:\n # optionally add appearance embedding\n if \"cam_idx\" in data_dict and self.enable_cam_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"cam_idx\"])\n elif \"img_idx\" in data_dict and self.enable_img_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"img_idx\"])\n else:\n # use mean appearance embedding\n appearance_embedding = torch.ones(\n (*directions.shape[:-1], self.appearance_embedding_dim),\n device=directions.device,\n ) * self.appearance_embedding.weight.mean(dim=0)\n dd = torch.cat([dd, appearance_embedding], dim=-1)\n rgb_sky = self.sky_head(dd).to(directions)\n rgb_sky = F.sigmoid(rgb_sky)\n results = {\"rgb_sky\": rgb_sky}\n if self.enable_feature_head:\n self.dino_sky_head(dd).to(directions)\n results[\"dino_sky_feat\"] = self.dino_sky_head(dd).to(directions)\n return results\n\n def query_flow(\n self, positions: Tensor, normed_timestamps: Tensor, query_density: bool = True\n ) -> Dict[str, Tensor]:\n \"\"\"\n query flow field\n \"\"\"\n normed_positions = self.contract_points(positions)\n flow = self.forward_flow_hash(normed_positions, normed_timestamps)\n results = {\n \"forward_flow\": flow[..., :3],\n \"backward_flow\": flow[..., 3:],\n }\n if query_density:\n # it's important to filter valid flows based on a dynamic density threshold.\n # flows are valid only if they are on dynamic points.\n dynamic_feats = self.forward_dynamic_hash(\n normed_positions, normed_timestamps\n )\n (dynamic_geo_feats, _,) = torch.split(\n dynamic_feats,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n dynamic_density = self.density_activation(dynamic_geo_feats[..., 0])\n results[\"dynamic_density\"] = dynamic_density\n return results\n\n def query_attributes(\n self,\n positions: Tensor,\n normed_timestamps: Tensor = None,\n query_feature_head: bool = True,\n ):\n \"\"\"\n query attributes (density, dino features, etc.)\n \"\"\"\n results_dict = {}\n encoded_features, normed_positions = self.forward_static_hash(positions)\n geo_feats, semantic_feats = torch.split(\n encoded_features,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n static_density = self.density_activation(geo_feats[..., 0])\n if self.dynamic_xyz_encoder is not None and normed_timestamps is not None:\n dynamic_feats, dynamic_hash_encodings = self.forward_dynamic_hash(\n normed_positions, normed_timestamps, return_hash_encodings=True\n )\n if self.flow_xyz_encoder is not None:\n flow = self.forward_flow_hash(normed_positions, normed_timestamps)\n forward_flow = flow[..., :3]\n backward_flow = flow[..., 3:]\n results_dict[\"forward_flow\"] = forward_flow\n results_dict[\"backward_flow\"] = backward_flow\n temporal_aggregation_results = self.temporal_aggregation(\n positions,\n normed_timestamps,\n forward_flow,\n backward_flow,\n dynamic_feats,\n )\n dynamic_feats = temporal_aggregation_results[\"dynamic_feats\"]\n temporal_aggregation_results[\n \"current_dynamic_hash_encodings\"\n ] = dynamic_hash_encodings\n results_dict.update(temporal_aggregation_results)\n\n (dynamic_geo_feats, dynamic_semantic_feats,) = torch.split(\n dynamic_feats,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n dynamic_density = self.density_activation(dynamic_geo_feats[..., 0])\n density = static_density + dynamic_density\n results_dict.update(\n {\n \"density\": density,\n \"static_density\": static_density,\n \"dynamic_density\": dynamic_density,\n # \"occupancy\": occupancy,\n }\n )\n else:\n results_dict[\"density\"] = static_density\n if self.enable_feature_head and query_feature_head:\n # query on demand\n dino_feats = self.dino_head(semantic_feats)\n if self.dynamic_xyz_encoder is not None and normed_timestamps is not None:\n dynamic_dino_feats = self.dino_head(dynamic_semantic_feats)\n results_dict[\"static_dino_feat\"] = dino_feats\n results_dict[\"dynamic_dino_feat\"] = dynamic_dino_feats\n results_dict[\"dino_feat\"] = (\n static_density.unsqueeze(-1) * dino_feats\n + dynamic_density.unsqueeze(-1) * dynamic_dino_feats\n ) / (density.unsqueeze(-1) + 1e-6)\n else:\n results_dict[\"dino_feat\"] = dino_feats\n return results_dict" }, { "identifier": "build_density_field", "path": "radiance_fields/radiance_field.py", "snippet": "def build_density_field(\n aabb: Union[Tensor, List[float]] = [[-1.0, -1.0, -1.0, 1.0, 1.0, 1.0]],\n type: Literal[\"HashEncoder\"] = \"HashEncoder\",\n n_input_dims: int = 3,\n n_levels: int = 5,\n base_resolution: int = 16,\n max_resolution: int = 128,\n log2_hashmap_size: int = 20,\n n_features_per_level: int = 2,\n unbounded: bool = True,\n) -> DensityField:\n if type == \"HashEncoder\":\n xyz_encoder = HashEncoder(\n n_input_dims=n_input_dims,\n n_levels=n_levels,\n base_resolution=base_resolution,\n max_resolution=max_resolution,\n log2_hashmap_size=log2_hashmap_size,\n n_features_per_level=n_features_per_level,\n )\n else:\n raise NotImplementedError(f\"Unknown (xyz_encoder) type: {type}\")\n return DensityField(\n xyz_encoder=xyz_encoder,\n aabb=aabb,\n unbounded=unbounded,\n )" }, { "identifier": "build_radiance_field_from_cfg", "path": "radiance_fields/radiance_field.py", "snippet": "def build_radiance_field_from_cfg(cfg, verbose=True) -> RadianceField:\n xyz_encoder = build_xyz_encoder_from_cfg(cfg.xyz_encoder, verbose=verbose)\n dynamic_xyz_encoder = None\n flow_xyz_encoder = None\n if cfg.head.enable_dynamic_branch:\n dynamic_xyz_encoder = build_xyz_encoder_from_cfg(\n cfg.dynamic_xyz_encoder, verbose=verbose\n )\n if cfg.head.enable_flow_branch:\n flow_xyz_encoder = HashEncoder(\n n_input_dims=4,\n n_levels=10,\n base_resolution=16,\n max_resolution=4096,\n log2_hashmap_size=18,\n n_features_per_level=4,\n )\n return RadianceField(\n xyz_encoder=xyz_encoder,\n dynamic_xyz_encoder=dynamic_xyz_encoder,\n flow_xyz_encoder=flow_xyz_encoder,\n unbounded=cfg.unbounded,\n num_cams=cfg.num_cams,\n geometry_feature_dim=cfg.neck.geometry_feature_dim,\n base_mlp_layer_width=cfg.neck.base_mlp_layer_width,\n head_mlp_layer_width=cfg.head.head_mlp_layer_width,\n enable_cam_embedding=cfg.head.enable_cam_embedding,\n enable_img_embedding=cfg.head.enable_img_embedding,\n appearance_embedding_dim=cfg.head.appearance_embedding_dim,\n enable_sky_head=cfg.head.enable_sky_head,\n enable_feature_head=cfg.head.enable_feature_head,\n semantic_feature_dim=cfg.neck.semantic_feature_dim,\n feature_mlp_layer_width=cfg.head.feature_mlp_layer_width,\n feature_embedding_dim=cfg.head.feature_embedding_dim,\n enable_shadow_head=cfg.head.enable_shadow_head,\n num_train_timesteps=cfg.num_train_timesteps, # placeholder\n interpolate_xyz_encoding=cfg.head.interpolate_xyz_encoding,\n enable_learnable_pe=cfg.head.enable_learnable_pe,\n enable_temporal_interpolation=cfg.head.enable_temporal_interpolation,\n )" }, { "identifier": "PropNetEstimator", "path": "third_party/nerfacc_prop_net.py", "snippet": "class PropNetEstimator(AbstractEstimator):\n \"\"\"Proposal network transmittance estimator.\n\n References: \"Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields.\"\n\n Args:\n optimizer: The optimizer to use for the proposal networks.\n scheduler: The learning rate scheduler to use for the proposal networks.\n \"\"\"\n\n def __init__(\n self,\n optimizer: Optional[torch.optim.Optimizer] = None,\n scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n enable_anti_aliasing_loss: Optional[bool] = True,\n anti_aliasing_pulse_width: Optional[List[float]] = [0.03, 0.003],\n ) -> None:\n super().__init__()\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.prop_cache: List = []\n self.enable_anti_aliasing_loss = enable_anti_aliasing_loss\n self.pulse_width = anti_aliasing_pulse_width\n if self.enable_anti_aliasing_loss:\n logger.info(\"Enable anti-aliasing loss, pulse width: %s\", self.pulse_width)\n\n @torch.no_grad()\n def sampling(\n self,\n prop_sigma_fns: List[Callable],\n prop_samples: List[int],\n num_samples: int,\n # rendering options\n n_rays: int,\n near_plane: float,\n far_plane: float,\n sampling_type: Literal[\n \"uniform\", \"lindisp\", \"sqrt\", \"log\", \"uniform_lindisp\"\n ] = \"uniform_lindisp\",\n # training options\n stratified: bool = False,\n requires_grad: bool = False,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"Sampling with CDFs from proposal networks.\n\n Note:\n When `requires_grad` is `True`, the gradients are allowed to flow\n through the proposal networks, and the outputs of the proposal\n networks are cached to update them later when calling `update_every_n_steps()`\n\n Args:\n prop_sigma_fns: Proposal network evaluate functions. It should be a list\n of functions that take in samples {t_starts (n_rays, n_samples),\n t_ends (n_rays, n_samples)} and returns the post-activation densities\n (n_rays, n_samples).\n prop_samples: Number of samples to draw from each proposal network. Should\n be the same length as `prop_sigma_fns`.\n num_samples: Number of samples to draw in the end.\n n_rays: Number of rays.\n near_plane: Near plane.\n far_plane: Far plane.\n sampling_type: Sampling type. Either \"uniform\" or \"lindisp\". Default to\n \"lindisp\".\n stratified: Whether to use stratified sampling. Default to `False`.\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n\n Returns:\n A tuple of {Tensor, Tensor}:\n\n - **t_starts**: The starts of the samples. Shape (n_rays, num_samples).\n - **t_ends**: The ends of the samples. Shape (n_rays, num_samples).\n\n \"\"\"\n assert len(prop_sigma_fns) == len(prop_samples), (\n \"The number of proposal networks and the number of samples \"\n \"should be the same.\"\n )\n cdfs = torch.cat(\n [\n torch.zeros((n_rays, 1), device=self.device),\n torch.ones((n_rays, 1), device=self.device),\n ],\n dim=-1,\n )\n intervals = RayIntervals(vals=cdfs)\n\n for i, (level_fn, level_samples) in enumerate(\n zip(prop_sigma_fns, prop_samples)\n ):\n intervals, _ = importance_sampling(\n intervals, cdfs, level_samples, stratified\n )\n t_vals = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n\n with torch.set_grad_enabled(requires_grad):\n sigmas = level_fn(t_starts, t_ends)[\"density\"].squeeze(-1)\n assert sigmas.shape == t_starts.shape\n trans, _ = render_transmittance_from_density(t_starts, t_ends, sigmas)\n cdfs = 1.0 - torch.cat(\n [trans, torch.zeros_like(trans[..., :1])], dim=-1\n )\n if requires_grad:\n self.prop_cache.append((intervals, cdfs, i))\n\n intervals, _ = importance_sampling(intervals, cdfs, num_samples, stratified)\n t_vals = _transform_stot(sampling_type, intervals.vals, near_plane, far_plane)\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n if requires_grad:\n self.prop_cache.append((intervals, None, None))\n\n return t_starts, t_ends\n\n @torch.enable_grad()\n def compute_loss(self, trans: Tensor, loss_scaler: float = 1.0) -> Tensor:\n \"\"\"Compute the loss for the proposal networks.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n loss_scaler: The loss scaler. Default to 1.0.\n\n Returns:\n The loss for the proposal networks.\n \"\"\"\n if len(self.prop_cache) == 0:\n return torch.zeros((), device=self.device)\n\n intervals, _, _ = self.prop_cache.pop()\n # get cdfs at all edges of intervals\n cdfs = 1.0 - torch.cat([trans, torch.zeros_like(trans[..., :1])], dim=-1)\n cdfs = cdfs.detach()\n loss = 0.0\n\n if self.enable_anti_aliasing_loss:\n w_normalize = (cdfs[..., 1:] - cdfs[..., :-1]) / (\n intervals.vals[..., 1:] - intervals.vals[..., :-1]\n )\n c1, w1 = blur_stepfun(intervals.vals, w_normalize, self.pulse_width[0])\n c2, w2 = blur_stepfun(intervals.vals, w_normalize, self.pulse_width[1])\n area1 = 0.5 * (w1[..., 1:] + w1[..., :-1]) * (c1[..., 1:] - c1[..., :-1])\n area2 = 0.5 * (w2[..., 1:] + w2[..., :-1]) * (c2[..., 1:] - c2[..., :-1])\n cdfs1 = torch.cat(\n [\n torch.zeros_like(area1[..., :1]),\n torch.cumsum(area1, dim=-1),\n ],\n dim=-1,\n )\n cdfs2 = torch.cat(\n [\n torch.zeros_like(area2[..., :1]),\n torch.cumsum(area2, dim=-1),\n ],\n dim=-1,\n )\n cs = [c1, c2]\n ws = [w1, w2]\n _cdfs = [cdfs1, cdfs2]\n while self.prop_cache:\n prop_intervals, prop_cdfs, prop_id = self.prop_cache.pop()\n wp = prop_cdfs[..., 1:] - prop_cdfs[..., :-1]\n cdf_interp = sorted_interp_quad(\n prop_intervals.vals, cs[prop_id], ws[prop_id], _cdfs[prop_id]\n )\n w_s = torch.diff(cdf_interp, dim=-1)\n loss += ((w_s - wp).clamp_min(0) ** 2 / (wp + 1e-5)).mean()\n else:\n while self.prop_cache:\n prop_intervals, prop_cdfs, _ = self.prop_cache.pop()\n loss += _pdf_loss(intervals, cdfs, prop_intervals, prop_cdfs).mean()\n return loss * loss_scaler\n\n @torch.enable_grad()\n def update_every_n_steps(\n self,\n trans: Tensor,\n requires_grad: bool = False,\n loss_scaler: float = 1.0,\n ) -> float:\n \"\"\"Update the estimator every n steps during training.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n loss_scaler: The loss scaler to use. Default to 1.0.\n\n Returns:\n The loss of the proposal networks for logging (a float scalar).\n \"\"\"\n if requires_grad:\n return self._update(trans=trans, loss_scaler=loss_scaler)\n else:\n if self.scheduler is not None:\n self.scheduler.step()\n return 0.0\n\n @torch.enable_grad()\n def _update(self, trans: Tensor, loss_scaler: float = 1.0) -> float:\n assert len(self.prop_cache) > 0\n assert self.optimizer is not None, \"No optimizer is provided.\"\n\n loss = self.compute_loss(trans, loss_scaler)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if self.scheduler is not None:\n self.scheduler.step()\n return loss.item()" } ]
import itertools import logging import torch from typing import List, Tuple from omegaconf import OmegaConf from datasets.base import SceneDataset from radiance_fields import ( DensityField, RadianceField, build_density_field, build_radiance_field_from_cfg, ) from third_party.nerfacc_prop_net import PropNetEstimator
12,181
logger = logging.getLogger() def build_model_from_cfg( cfg: OmegaConf, dataset: SceneDataset, device: torch.device = torch.device("cpu"), ) -> RadianceField: cfg.num_train_timesteps = dataset.num_train_timesteps if dataset.test_pixel_set is not None: if cfg.head.enable_img_embedding: cfg.head.enable_cam_embedding = True cfg.head.enable_img_embedding = False logger.info( "Overriding enable_img_embedding to False because we have a test set." ) model = build_radiance_field_from_cfg(cfg) model.register_normalized_training_timesteps( dataset.unique_normalized_training_timestamps, time_diff=1 / dataset.num_img_timesteps, ) if dataset.aabb is not None and cfg.resume_from is None: model.set_aabb(dataset.aabb) if dataset.pixel_source.features is not None and cfg.head.enable_feature_head: # we cache the PCA reduction matrix and min/max values for visualization model.register_feats_reduction_mat( dataset.pixel_source.feat_dimension_reduction_mat, dataset.pixel_source.feat_color_min, dataset.pixel_source.feat_color_max, ) return model.to(device) def build_optimizer_from_cfg( cfg: OmegaConf, model: RadianceField ) -> torch.optim.Optimizer: # a very simple optimizer for now optimizer = torch.optim.Adam( model.parameters(), lr=cfg.lr, eps=1e-15, weight_decay=cfg.weight_decay, betas=(0.9, 0.99), ) return optimizer def build_scheduler_from_cfg( cfg: OmegaConf, optimizer: torch.optim.Optimizer ) -> torch.optim.Optimizer: # ------ build scheduler -------- # scheduler_milestones = [ cfg.num_iters // 2, cfg.num_iters * 3 // 4, cfg.num_iters * 9 // 10, ] if cfg.num_iters >= 10000: scheduler_milestones.insert(0, cfg.num_iters // 4) scheduler = torch.optim.lr_scheduler.ChainedScheduler( [ # warmup torch.optim.lr_scheduler.LinearLR( optimizer, start_factor=0.01, total_iters=cfg.num_iters // 10 ), # Linear decay torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=scheduler_milestones, gamma=0.33, ), ] ) return scheduler def build_estimator_and_propnet_from_cfg( nerf_cfg: OmegaConf, optim_cfg: OmegaConf, dataset: SceneDataset, device: torch.device = torch.device("cpu"),
logger = logging.getLogger() def build_model_from_cfg( cfg: OmegaConf, dataset: SceneDataset, device: torch.device = torch.device("cpu"), ) -> RadianceField: cfg.num_train_timesteps = dataset.num_train_timesteps if dataset.test_pixel_set is not None: if cfg.head.enable_img_embedding: cfg.head.enable_cam_embedding = True cfg.head.enable_img_embedding = False logger.info( "Overriding enable_img_embedding to False because we have a test set." ) model = build_radiance_field_from_cfg(cfg) model.register_normalized_training_timesteps( dataset.unique_normalized_training_timestamps, time_diff=1 / dataset.num_img_timesteps, ) if dataset.aabb is not None and cfg.resume_from is None: model.set_aabb(dataset.aabb) if dataset.pixel_source.features is not None and cfg.head.enable_feature_head: # we cache the PCA reduction matrix and min/max values for visualization model.register_feats_reduction_mat( dataset.pixel_source.feat_dimension_reduction_mat, dataset.pixel_source.feat_color_min, dataset.pixel_source.feat_color_max, ) return model.to(device) def build_optimizer_from_cfg( cfg: OmegaConf, model: RadianceField ) -> torch.optim.Optimizer: # a very simple optimizer for now optimizer = torch.optim.Adam( model.parameters(), lr=cfg.lr, eps=1e-15, weight_decay=cfg.weight_decay, betas=(0.9, 0.99), ) return optimizer def build_scheduler_from_cfg( cfg: OmegaConf, optimizer: torch.optim.Optimizer ) -> torch.optim.Optimizer: # ------ build scheduler -------- # scheduler_milestones = [ cfg.num_iters // 2, cfg.num_iters * 3 // 4, cfg.num_iters * 9 // 10, ] if cfg.num_iters >= 10000: scheduler_milestones.insert(0, cfg.num_iters // 4) scheduler = torch.optim.lr_scheduler.ChainedScheduler( [ # warmup torch.optim.lr_scheduler.LinearLR( optimizer, start_factor=0.01, total_iters=cfg.num_iters // 10 ), # Linear decay torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=scheduler_milestones, gamma=0.33, ), ] ) return scheduler def build_estimator_and_propnet_from_cfg( nerf_cfg: OmegaConf, optim_cfg: OmegaConf, dataset: SceneDataset, device: torch.device = torch.device("cpu"),
) -> Tuple[PropNetEstimator, List[DensityField]]:
1
2023-10-11 20:56:27+00:00
16k
alibaba-damo-academy/FunCodec
funcodec/train/gan_trainer.py
[ { "identifier": "AbsBatchStepScheduler", "path": "funcodec/schedulers/abs_scheduler.py", "snippet": "class AbsBatchStepScheduler(AbsScheduler):\n @abstractmethod\n def step(self, epoch: int = None):\n pass\n\n @abstractmethod\n def state_dict(self):\n pass\n\n @abstractmethod\n def load_state_dict(self, state):\n pass" }, { "identifier": "AbsScheduler", "path": "funcodec/schedulers/abs_scheduler.py", "snippet": "class AbsScheduler(ABC):\n @abstractmethod\n def step(self, epoch: int = None):\n pass\n\n @abstractmethod\n def state_dict(self):\n pass\n\n @abstractmethod\n def load_state_dict(self, state):\n pass" }, { "identifier": "to_device", "path": "funcodec/torch_utils/device_funcs.py", "snippet": "def to_device(data, device=None, dtype=None, non_blocking=False, copy=False):\n \"\"\"Change the device of object recursively\"\"\"\n if isinstance(data, dict):\n return {\n k: to_device(v, device, dtype, non_blocking, copy) for k, v in data.items()\n }\n elif dataclasses.is_dataclass(data) and not isinstance(data, type):\n return type(data)(\n *[\n to_device(v, device, dtype, non_blocking, copy)\n for v in dataclasses.astuple(data)\n ]\n )\n # maybe namedtuple. I don't know the correct way to judge namedtuple.\n elif isinstance(data, tuple) and type(data) is not tuple:\n return type(data)(\n *[to_device(o, device, dtype, non_blocking, copy) for o in data]\n )\n elif isinstance(data, (list, tuple)):\n return type(data)(to_device(v, device, dtype, non_blocking, copy) for v in data)\n elif isinstance(data, np.ndarray):\n return to_device(torch.from_numpy(data), device, dtype, non_blocking, copy)\n elif isinstance(data, torch.Tensor):\n return data.to(device, dtype, non_blocking, copy)\n else:\n return data" }, { "identifier": "recursive_average", "path": "funcodec/torch_utils/recursive_op.py", "snippet": "def recursive_average(obj, weight: torch.Tensor, distributed: bool = False):\n obj = recursive_sum(obj, weight, distributed)\n weight = weight.sum()\n if distributed:\n torch.distributed.all_reduce(weight, op=ReduceOp.SUM)\n # Normalize weight to be sum-to-1\n obj = recursive_divide(obj, weight)\n return obj, weight" }, { "identifier": "DistributedOption", "path": "funcodec/train/distributed_utils.py", "snippet": "class DistributedOption:\n # Enable distributed Training\n distributed: bool = False\n # torch.distributed.Backend: \"nccl\", \"mpi\", \"gloo\", or \"tcp\"\n dist_backend: str = \"nccl\"\n # if init_method=\"env://\",\n # env values of \"MASTER_PORT\", \"MASTER_ADDR\", \"WORLD_SIZE\", and \"RANK\" are referred.\n dist_init_method: str = \"env://\"\n dist_world_size: Optional[int] = None\n dist_rank: Optional[int] = None\n local_rank: Optional[int] = None\n ngpu: int = 0\n dist_master_addr: Optional[str] = None\n dist_master_port: Optional[int] = None\n dist_launcher: Optional[str] = None\n multiprocessing_distributed: bool = True\n\n def init_options(self):\n if self.distributed:\n if self.dist_init_method == \"env://\":\n if get_master_addr(self.dist_master_addr, self.dist_launcher) is None:\n raise RuntimeError(\n \"--dist_master_addr or MASTER_ADDR must be set \"\n \"if --dist_init_method == 'env://'\"\n )\n if get_master_port(self.dist_master_port) is None:\n raise RuntimeError(\n \"--dist_master_port or MASTER_PORT must be set \"\n \"if --dist_init_port == 'env://'\"\n )\n\n def init_torch_distributed(self, args):\n if self.distributed:\n # See:\n # https://docs.nvidia.com/deeplearning/sdk/nccl-developer-guide/docs/env.html\n os.environ.setdefault(\"NCCL_DEBUG\", \"INFO\")\n\n # See:\n # https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group\n os.environ.setdefault(\"NCCL_BLOCKING_WAIT\", \"1\")\n\n if args.dist_rank is not None and args.dist_world_size is not None:\n torch.distributed.init_process_group(backend='nccl',\n init_method=self.dist_init_method,\n world_size=args.dist_world_size,\n rank=args.dist_rank)\n else:\n torch.distributed.init_process_group(backend='nccl', init_method='env://')\n self.dist_rank = torch.distributed.get_rank()\n self.dist_world_size = torch.distributed.get_world_size()\n self.local_rank = args.local_rank\n\n def init_options_pai(self):\n if self.distributed:\n if self.dist_init_method == \"env://\":\n if get_master_addr(self.dist_master_addr, self.dist_launcher) is None:\n raise RuntimeError(\n \"--dist_master_addr or MASTER_ADDR must be set \"\n \"if --dist_init_method == 'env://'\"\n )\n if get_master_port(self.dist_master_port) is None:\n raise RuntimeError(\n \"--dist_master_port or MASTER_PORT must be set \"\n \"if --dist_init_port == 'env://'\"\n )\n\n self.dist_rank = get_rank(self.dist_rank, self.dist_launcher)\n self.dist_world_size = get_world_size(\n self.dist_world_size, self.dist_launcher\n )\n self.local_rank = get_local_rank(self.local_rank, self.dist_launcher)\n\n if (\n self.dist_rank is not None\n and self.dist_world_size is not None\n and self.dist_rank >= self.dist_world_size\n ):\n raise RuntimeError(\n f\"RANK >= WORLD_SIZE: {self.dist_rank} >= {self.dist_world_size}\"\n )\n\n if self.dist_init_method == \"env://\":\n self.dist_master_addr = get_master_addr(\n self.dist_master_addr, self.dist_launcher\n )\n self.dist_master_port = get_master_port(self.dist_master_port)\n if (\n self.dist_master_addr is not None\n and self.dist_master_port is not None\n ):\n self.dist_init_method = (\n f\"tcp://{self.dist_master_addr}:{self.dist_master_port}\"\n )\n\n def init_torch_distributed_pai(self, args):\n if self.distributed:\n # See:\n # https://docs.nvidia.com/deeplearning/sdk/nccl-developer-guide/docs/env.html\n os.environ.setdefault(\"NCCL_DEBUG\", \"INFO\")\n\n # See:\n # https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group\n os.environ.setdefault(\"NCCL_BLOCKING_WAIT\", \"1\")\n\n torch.distributed.init_process_group(backend='nccl', init_method='env://')\n torch.distributed.barrier()\n self.dist_rank = torch.distributed.get_rank()\n self.dist_world_size = torch.distributed.get_world_size()\n self.local_rank = args.local_rank" }, { "identifier": "SubReporter", "path": "funcodec/train/reporter.py", "snippet": "class SubReporter:\n \"\"\"This class is used in Reporter.\n\n See the docstring of Reporter for the usage.\n \"\"\"\n\n def __init__(self, key: str, epoch: int, total_count: int):\n assert check_argument_types()\n self.key = key\n self.epoch = epoch\n self.start_time = time.perf_counter()\n self.stats = defaultdict(list)\n self._finished = False\n self.total_count = total_count\n self.count = 0\n self._seen_keys_in_the_step = set()\n\n def get_total_count(self) -> int:\n \"\"\"Returns the number of iterations over all epochs.\"\"\"\n return self.total_count\n\n def get_epoch(self) -> int:\n return self.epoch\n\n def next(self):\n \"\"\"Close up this step and reset state for the next step\"\"\"\n for key, stats_list in self.stats.items():\n if key not in self._seen_keys_in_the_step:\n # Fill nan value if the key is not registered in this step\n if isinstance(stats_list[0], WeightedAverage):\n stats_list.append(to_reported_value(np.nan, 0))\n elif isinstance(stats_list[0], Average):\n stats_list.append(to_reported_value(np.nan))\n else:\n raise NotImplementedError(f\"type={type(stats_list[0])}\")\n\n assert len(stats_list) == self.count, (len(stats_list), self.count)\n\n self._seen_keys_in_the_step = set()\n\n def register(\n self,\n stats: Dict[str, Optional[Union[Num, Dict[str, Num]]]],\n weight: Num = None,\n ) -> None:\n assert check_argument_types()\n if self._finished:\n raise RuntimeError(\"Already finished\")\n if len(self._seen_keys_in_the_step) == 0:\n # Increment count as the first register in this step\n self.total_count += 1\n self.count += 1\n\n for key2, v in stats.items():\n if key2 in _reserved:\n raise RuntimeError(f\"{key2} is reserved.\")\n if key2 in self._seen_keys_in_the_step:\n raise RuntimeError(f\"{key2} is registered twice.\")\n if v is None:\n v = np.nan\n r = to_reported_value(v, weight)\n\n if key2 not in self.stats:\n # If it's the first time to register the key,\n # append nan values in front of the the value\n # to make it same length to the other stats\n # e.g.\n # stat A: [0.4, 0.3, 0.5]\n # stat B: [nan, nan, 0.2]\n nan = to_reported_value(np.nan, None if weight is None else 0)\n self.stats[key2].extend(\n r if i == self.count - 1 else nan for i in range(self.count)\n )\n else:\n self.stats[key2].append(r)\n self._seen_keys_in_the_step.add(key2)\n\n def log_message(self, start: int = None, end: int = None, num_updates: int = None) -> str:\n if self._finished:\n raise RuntimeError(\"Already finished\")\n if start is None:\n start = 0\n if start < 0:\n start = self.count + start\n if end is None:\n end = self.count\n\n if self.count == 0 or start == end:\n return \"\"\n\n message = f\"{self.epoch}epoch:{self.key}:\" f\"{start + 1}-{end}batch:\"\n if num_updates is not None:\n message += f\"{num_updates}num_updates: \"\n\n for idx, (key2, stats_list) in enumerate(self.stats.items()):\n assert len(stats_list) == self.count, (len(stats_list), self.count)\n # values: List[ReportValue]\n values = stats_list[start:end]\n if idx != 0 and idx != len(stats_list):\n message += \", \"\n\n v = aggregate(values)\n if abs(v) > 1.0e3:\n message += f\"{key2}={v:.3e}\"\n elif abs(v) > 1.0e-3:\n message += f\"{key2}={v:.3f}\"\n else:\n message += f\"{key2}={v:.3e}\"\n return message\n\n def tensorboard_add_scalar(self, summary_writer, start: int = None):\n if start is None:\n start = 0\n if start < 0:\n start = self.count + start\n\n for key2, stats_list in self.stats.items():\n assert len(stats_list) == self.count, (len(stats_list), self.count)\n # values: List[ReportValue]\n values = stats_list[start:]\n v = aggregate(values)\n summary_writer.add_scalar(f\"{key2}\", v, self.total_count)\n\n def wandb_log(self, start: int = None):\n import wandb\n\n if start is None:\n start = 0\n if start < 0:\n start = self.count + start\n\n d = {}\n for key2, stats_list in self.stats.items():\n assert len(stats_list) == self.count, (len(stats_list), self.count)\n # values: List[ReportValue]\n values = stats_list[start:]\n v = aggregate(values)\n d[wandb_get_prefix(key2) + key2] = v\n d[\"iteration\"] = self.total_count\n wandb.log(d)\n\n def finished(self) -> None:\n self._finished = True\n\n @contextmanager\n def measure_time(self, name: str):\n start = time.perf_counter()\n yield start\n t = time.perf_counter() - start\n self.register({name: t})\n\n def measure_iter_time(self, iterable, name: str):\n iterator = iter(iterable)\n while True:\n try:\n start = time.perf_counter()\n retval = next(iterator)\n t = time.perf_counter() - start\n self.register({name: t})\n yield retval\n except StopIteration:\n break" }, { "identifier": "Trainer", "path": "funcodec/train/trainer.py", "snippet": "class Trainer:\n \"\"\"Trainer having an optimizer.\n\n If you'd like to use multiple optimizers, then inherit this class\n and override the methods if necessary - at least \"train_one_epoch()\"\n\n >>> class TwoOptimizerTrainer(Trainer):\n ... @classmethod\n ... def add_arguments(cls, parser):\n ... ...\n ...\n ... @classmethod\n ... def train_one_epoch(cls, model, optimizers, ...):\n ... loss1 = model.model1(...)\n ... loss1.backward()\n ... optimizers[0].step()\n ...\n ... loss2 = model.model2(...)\n ... loss2.backward()\n ... optimizers[1].step()\n\n \"\"\"\n\n def __init__(self):\n raise RuntimeError(\"This class can't be instantiated.\")\n\n @classmethod\n def build_options(cls, args: argparse.Namespace) -> TrainerOptions:\n \"\"\"Build options consumed by train(), eval()\"\"\"\n assert check_argument_types()\n return build_dataclass(TrainerOptions, args)\n\n @classmethod\n def add_arguments(cls, parser: argparse.ArgumentParser):\n \"\"\"Reserved for future development of another Trainer\"\"\"\n pass\n\n @staticmethod\n def resume(\n checkpoint: Union[str, Path],\n model: torch.nn.Module,\n reporter: Reporter,\n optimizers: Sequence[torch.optim.Optimizer],\n schedulers: Sequence[Optional[AbsScheduler]],\n scaler: Optional[GradScaler],\n ngpu: int = 0,\n oss_bucket=None,\n ):\n logging.info(f\"Try to resume from {checkpoint}.\")\n if oss_bucket is None:\n if os.path.exists(checkpoint):\n states = torch.load(\n checkpoint,\n map_location=f\"cuda:{torch.cuda.current_device()}\" if ngpu > 0 else \"cpu\",\n )\n else:\n return 0\n else:\n if oss_bucket.object_exists(checkpoint):\n buffer = BytesIO(oss_bucket.get_object(checkpoint).read())\n states = torch.load(buffer, map_location=f\"cuda:{torch.cuda.current_device()}\" if ngpu > 0 else \"cpu\",)\n else:\n return 0\n from funcodec.torch_utils.load_pretrained_model import filter_state_dict\n dst_state = model.state_dict()\n src_state = states[\"model\"]\n src_state = filter_state_dict(dst_state, src_state)\n # logging.info(\"Resumed src_state keys: {}\".format(src_state.keys()))\n dst_state.update(src_state)\n model.load_state_dict(dst_state)\n # model.load_state_dict(states[\"model\"])\n reporter.load_state_dict(states[\"reporter\"])\n for optimizer, state in zip(optimizers, states[\"optimizers\"]):\n optimizer.load_state_dict(state)\n for scheduler, state in zip(schedulers, states[\"schedulers\"]):\n if scheduler is not None:\n scheduler.load_state_dict(state)\n if scaler is not None:\n if states[\"scaler\"] is None:\n logging.warning(\"scaler state is not found\")\n else:\n scaler.load_state_dict(states[\"scaler\"])\n\n logging.info(f\"The training was resumed using {checkpoint}\")\n\n @classmethod\n def run(\n cls,\n model: AbsESPnetModel,\n optimizers: Sequence[torch.optim.Optimizer],\n schedulers: Sequence[Optional[AbsScheduler]],\n train_iter_factory: AbsIterFactory,\n valid_iter_factory: AbsIterFactory,\n trainer_options,\n distributed_option: DistributedOption,\n ) -> None:\n \"\"\"Perform training. This method performs the main process of training.\"\"\"\n assert check_argument_types()\n # NOTE(kamo): Don't check the type more strictly as far trainer_options\n assert is_dataclass(trainer_options), type(trainer_options)\n assert len(optimizers) == len(schedulers), (len(optimizers), len(schedulers))\n\n if isinstance(trainer_options.keep_nbest_models, int):\n keep_nbest_models = [trainer_options.keep_nbest_models]\n else:\n if len(trainer_options.keep_nbest_models) == 0:\n logging.warning(\"No keep_nbest_models is given. Change to [1]\")\n trainer_options.keep_nbest_models = [1]\n keep_nbest_models = trainer_options.keep_nbest_models\n\n output_dir = Path(trainer_options.output_dir)\n reporter = Reporter()\n if trainer_options.use_amp:\n if LooseVersion(torch.__version__) < LooseVersion(\"1.6.0\"):\n raise RuntimeError(\n \"Require torch>=1.6.0 for Automatic Mixed Precision\"\n )\n if trainer_options.sharded_ddp:\n if fairscale is None:\n raise RuntimeError(\n \"Requiring fairscale. Do 'pip install fairscale'\"\n )\n scaler = fairscale.optim.grad_scaler.ShardedGradScaler()\n else:\n scaler = GradScaler()\n else:\n scaler = None\n\n if trainer_options.resume:\n cls.resume(\n checkpoint=os.path.join(trainer_options.output_dir, \"checkpoint.pth\"),\n model=model,\n optimizers=optimizers,\n schedulers=schedulers,\n reporter=reporter,\n scaler=scaler,\n ngpu=trainer_options.ngpu,\n oss_bucket=trainer_options.oss_bucket,\n )\n\n start_epoch = reporter.get_epoch() + 1\n if start_epoch == trainer_options.max_epoch + 1:\n logging.warning(\n f\"The training has already reached at max_epoch: {start_epoch}\"\n )\n\n if distributed_option.distributed:\n if trainer_options.sharded_ddp:\n dp_model = fairscale.nn.data_parallel.ShardedDataParallel(\n module=model,\n sharded_optimizer=optimizers,\n )\n else:\n dp_model = torch.nn.parallel.DistributedDataParallel(\n model, find_unused_parameters=trainer_options.unused_parameters)\n elif distributed_option.ngpu > 1:\n dp_model = torch.nn.parallel.DataParallel(\n model,\n device_ids=list(range(distributed_option.ngpu)),\n )\n else:\n # NOTE(kamo): DataParallel also should work with ngpu=1,\n # but for debuggability it's better to keep this block.\n dp_model = model\n\n if trainer_options.use_tensorboard and (\n not distributed_option.distributed or distributed_option.dist_rank == 0\n ):\n from torch.utils.tensorboard import SummaryWriter\n if trainer_options.use_pai:\n train_summary_writer = SummaryWriter(\n os.path.join(trainer_options.output_dir, \"tensorboard/train\")\n )\n valid_summary_writer = SummaryWriter(\n os.path.join(trainer_options.output_dir, \"tensorboard/valid\")\n )\n else:\n train_summary_writer = SummaryWriter(\n str(output_dir / \"tensorboard\" / \"train\")\n )\n valid_summary_writer = SummaryWriter(\n str(output_dir / \"tensorboard\" / \"valid\")\n )\n else:\n train_summary_writer = None\n\n start_time = time.perf_counter()\n for iepoch in range(start_epoch, trainer_options.max_epoch + 1):\n if iepoch != start_epoch:\n logging.info(\n \"{}/{}epoch started. Estimated time to finish: {}\".format(\n iepoch,\n trainer_options.max_epoch,\n humanfriendly.format_timespan(\n (time.perf_counter() - start_time)\n / (iepoch - start_epoch)\n * (trainer_options.max_epoch - iepoch + 1)\n ),\n )\n )\n else:\n logging.info(f\"{iepoch}/{trainer_options.max_epoch}epoch started\")\n set_all_random_seed(trainer_options.seed + iepoch)\n\n reporter.set_epoch(iepoch)\n # 1. Train and validation for one-epoch\n with reporter.observe(\"train\") as sub_reporter:\n all_steps_are_invalid, max_update_stop = cls.train_one_epoch(\n model=dp_model,\n optimizers=optimizers,\n schedulers=schedulers,\n iterator=train_iter_factory.build_iter(iepoch),\n reporter=sub_reporter,\n scaler=scaler,\n summary_writer=train_summary_writer,\n options=trainer_options,\n distributed_option=distributed_option,\n par_reporter=reporter,\n )\n\n with reporter.observe(\"valid\") as sub_reporter:\n cls.validate_one_epoch(\n model=dp_model,\n iterator=valid_iter_factory.build_iter(iepoch),\n reporter=sub_reporter,\n options=trainer_options,\n distributed_option=distributed_option,\n )\n\n # 2. LR Scheduler step\n for scheduler in schedulers:\n if isinstance(scheduler, AbsValEpochStepScheduler):\n scheduler.step(\n reporter.get_value(*trainer_options.val_scheduler_criterion)\n )\n elif isinstance(scheduler, AbsEpochStepScheduler):\n scheduler.step()\n if trainer_options.sharded_ddp:\n for optimizer in optimizers:\n if isinstance(optimizer, fairscale.optim.oss.OSS):\n optimizer.consolidate_state_dict()\n\n if not distributed_option.distributed or distributed_option.dist_rank == 0:\n # 3. Report the results\n logging.info(reporter.log_message())\n if train_summary_writer is not None:\n reporter.tensorboard_add_scalar(train_summary_writer, key1=\"train\")\n reporter.tensorboard_add_scalar(valid_summary_writer, key1=\"valid\")\n if trainer_options.use_wandb:\n reporter.wandb_log()\n\n # save tensorboard on oss\n if trainer_options.use_pai and train_summary_writer is not None:\n def write_tensorboard_summary(summary_writer_path, oss_bucket):\n file_list = []\n for root, dirs, files in os.walk(summary_writer_path, topdown=False):\n for name in files:\n file_full_path = os.path.join(root, name)\n file_list.append(file_full_path)\n\n for file_full_path in file_list:\n with open(file_full_path, \"rb\") as f:\n oss_bucket.put_object(file_full_path, f)\n\n write_tensorboard_summary(os.path.join(trainer_options.output_dir, \"tensorboard/train\"), trainer_options.oss_bucket)\n write_tensorboard_summary(os.path.join(trainer_options.output_dir, \"tensorboard/valid\"), trainer_options.oss_bucket)\n\n\n # 4. Save/Update the checkpoint\n if trainer_options.use_pai:\n buffer = BytesIO()\n torch.save(\n {\n \"model\": model.state_dict(),\n \"reporter\": reporter.state_dict(),\n \"optimizers\": [o.state_dict() for o in optimizers],\n \"schedulers\": [\n s.state_dict() if s is not None else None\n for s in schedulers\n ],\n \"scaler\": scaler.state_dict() if scaler is not None else None,\n \"ema_model\": model.encoder.ema.model.state_dict()\n if hasattr(model.encoder, \"ema\") and model.encoder.ema is not None else None,\n },\n buffer,\n )\n trainer_options.oss_bucket.put_object(os.path.join(trainer_options.output_dir, \"checkpoint.pth\"), buffer.getvalue())\n else:\n torch.save(\n {\n \"model\": model.state_dict(),\n \"reporter\": reporter.state_dict(),\n \"optimizers\": [o.state_dict() for o in optimizers],\n \"schedulers\": [\n s.state_dict() if s is not None else None\n for s in schedulers\n ],\n \"scaler\": scaler.state_dict() if scaler is not None else None,\n },\n output_dir / \"checkpoint.pth\",\n )\n\n # 5. Save and log the model and update the link to the best model\n if trainer_options.use_pai:\n buffer = BytesIO()\n torch.save(model.state_dict(), buffer)\n trainer_options.oss_bucket.put_object(os.path.join(trainer_options.output_dir,\n f\"{iepoch}epoch.pth\"),buffer.getvalue())\n else:\n torch.save(model.state_dict(), output_dir / f\"{iepoch}epoch.pth\")\n\n # Creates a sym link latest.pth -> {iepoch}epoch.pth\n if trainer_options.use_pai:\n p = os.path.join(trainer_options.output_dir, \"latest.pth\")\n if trainer_options.oss_bucket.object_exists(p):\n trainer_options.oss_bucket.delete_object(p)\n trainer_options.oss_bucket.copy_object(trainer_options.oss_bucket.bucket_name,\n os.path.join(trainer_options.output_dir, f\"{iepoch}epoch.pth\"), p)\n else:\n p = output_dir / \"latest.pth\"\n if p.is_symlink() or p.exists():\n p.unlink()\n p.symlink_to(f\"{iepoch}epoch.pth\")\n\n _improved = []\n for _phase, k, _mode in trainer_options.best_model_criterion:\n # e.g. _phase, k, _mode = \"train\", \"loss\", \"min\"\n if reporter.has(_phase, k):\n best_epoch = reporter.get_best_epoch(_phase, k, _mode)\n # Creates sym links if it's the best result\n if best_epoch == iepoch:\n if trainer_options.use_pai:\n p = os.path.join(trainer_options.output_dir, f\"{_phase}.{k}.best.pth\")\n if trainer_options.oss_bucket.object_exists(p):\n trainer_options.oss_bucket.delete_object(p)\n trainer_options.oss_bucket.copy_object(trainer_options.oss_bucket.bucket_name,\n os.path.join(trainer_options.output_dir, f\"{iepoch}epoch.pth\"),p)\n else:\n p = output_dir / f\"{_phase}.{k}.best.pth\"\n if p.is_symlink() or p.exists():\n p.unlink()\n p.symlink_to(f\"{iepoch}epoch.pth\")\n _improved.append(f\"{_phase}.{k}\")\n if len(_improved) == 0:\n logging.info(\"There are no improvements in this epoch\")\n else:\n logging.info(\n \"The best model has been updated: \" + \", \".join(_improved)\n )\n\n log_model = (\n trainer_options.wandb_model_log_interval > 0\n and iepoch % trainer_options.wandb_model_log_interval == 0\n )\n if log_model and trainer_options.use_wandb:\n import wandb\n\n logging.info(\"Logging Model on this epoch :::::\")\n artifact = wandb.Artifact(\n name=f\"model_{wandb.run.id}\",\n type=\"model\",\n metadata={\"improved\": _improved},\n )\n artifact.add_file(str(output_dir / f\"{iepoch}epoch.pth\"))\n aliases = [\n f\"epoch-{iepoch}\",\n \"best\" if best_epoch == iepoch else \"\",\n ]\n wandb.log_artifact(artifact, aliases=aliases)\n\n # 6. Remove the model files excluding n-best epoch and latest epoch\n _removed = []\n # Get the union set of the n-best among multiple criterion\n nbests = set().union(\n *[\n set(reporter.sort_epochs(ph, k, m)[: max(keep_nbest_models)])\n for ph, k, m in trainer_options.best_model_criterion\n if reporter.has(ph, k)\n ]\n )\n\n # Generated n-best averaged model\n if (\n trainer_options.nbest_averaging_interval > 0\n and iepoch % trainer_options.nbest_averaging_interval == 0\n ):\n average_nbest_models(\n reporter=reporter,\n output_dir=output_dir,\n best_model_criterion=trainer_options.best_model_criterion,\n nbest=keep_nbest_models,\n suffix=f\"till{iepoch}epoch\",\n oss_bucket=trainer_options.oss_bucket,\n pai_output_dir=trainer_options.output_dir,\n )\n\n for e in range(1, iepoch):\n if trainer_options.use_pai:\n p = os.path.join(trainer_options.output_dir, f\"{e}epoch.pth\")\n if trainer_options.oss_bucket.object_exists(p) and e not in nbests:\n trainer_options.oss_bucket.delete_object(p)\n _removed.append(str(p))\n else:\n p = output_dir / f\"{e}epoch.pth\"\n if p.exists() and e not in nbests:\n p.unlink()\n _removed.append(str(p))\n if len(_removed) != 0:\n logging.info(\"The model files were removed: \" + \", \".join(_removed))\n\n # 7. If any updating haven't happened, stops the training\n if all_steps_are_invalid:\n logging.warning(\n f\"The gradients at all steps are invalid in this epoch. \"\n f\"Something seems wrong. This training was stopped at {iepoch}epoch\"\n )\n break\n\n if max_update_stop:\n logging.info(\n f\"Stopping training due to \"\n f\"num_updates: {trainer_options.num_updates} >= max_update: {trainer_options.max_update}\"\n )\n break\n\n # 8. Check early stopping\n if trainer_options.patience is not None:\n if reporter.check_early_stopping(\n trainer_options.patience, *trainer_options.early_stopping_criterion\n ):\n break\n\n gc.collect()\n\n else:\n logging.info(\n f\"The training was finished at {trainer_options.max_epoch} epochs \"\n )\n\n # Generated n-best averaged model\n if not distributed_option.distributed or distributed_option.dist_rank == 0:\n average_nbest_models(\n reporter=reporter,\n output_dir=output_dir,\n best_model_criterion=trainer_options.best_model_criterion,\n nbest=keep_nbest_models,\n oss_bucket=trainer_options.oss_bucket,\n pai_output_dir=trainer_options.output_dir,\n )\n\n @classmethod\n def train_one_epoch(\n cls,\n model: torch.nn.Module,\n iterator: Iterable[Tuple[List[str], Dict[str, torch.Tensor]]],\n optimizers: Sequence[torch.optim.Optimizer],\n schedulers: Sequence[Optional[AbsScheduler]],\n scaler: Optional[GradScaler],\n reporter: SubReporter,\n summary_writer,\n options: TrainerOptions,\n distributed_option: DistributedOption,\n **kwargs\n ) -> Tuple[bool, bool]:\n assert check_argument_types()\n\n grad_noise = options.grad_noise\n accum_grad = options.accum_grad\n grad_clip = options.grad_clip\n grad_clip_type = options.grad_clip_type\n log_interval = options.log_interval\n no_forward_run = options.no_forward_run\n ngpu = options.ngpu\n use_wandb = options.use_wandb\n distributed = distributed_option.distributed\n\n if log_interval is None:\n try:\n log_interval = max(len(iterator) // 20, 10)\n except TypeError:\n log_interval = 100\n\n model.train()\n all_steps_are_invalid = True\n max_update_stop = False\n # [For distributed] Because iteration counts are not always equals between\n # processes, send stop-flag to the other processes if iterator is finished\n iterator_stop = torch.tensor(0).to(\"cuda\" if ngpu > 0 else \"cpu\")\n\n start_time = time.perf_counter()\n for iiter, (_, batch) in enumerate(\n reporter.measure_iter_time(iterator, \"iter_time\"), 1\n ):\n assert isinstance(batch, dict), type(batch)\n\n if distributed:\n torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)\n if iterator_stop > 0:\n break\n\n batch = to_device(batch, \"cuda\" if ngpu > 0 else \"cpu\")\n if no_forward_run:\n all_steps_are_invalid = False\n continue\n\n with autocast(scaler is not None):\n with reporter.measure_time(\"forward_time\"):\n retval = model(**batch)\n\n # Note(kamo):\n # Supporting two patterns for the returned value from the model\n # a. dict type\n if isinstance(retval, dict):\n loss = retval[\"loss\"]\n stats = retval[\"stats\"]\n weight = retval[\"weight\"]\n optim_idx = retval.get(\"optim_idx\")\n if optim_idx is not None and not isinstance(optim_idx, int):\n if not isinstance(optim_idx, torch.Tensor):\n raise RuntimeError(\n \"optim_idx must be int or 1dim torch.Tensor, \"\n f\"but got {type(optim_idx)}\"\n )\n if optim_idx.dim() >= 2:\n raise RuntimeError(\n \"optim_idx must be int or 1dim torch.Tensor, \"\n f\"but got {optim_idx.dim()}dim tensor\"\n )\n if optim_idx.dim() == 1:\n for v in optim_idx:\n if v != optim_idx[0]:\n raise RuntimeError(\n \"optim_idx must be 1dim tensor \"\n \"having same values for all entries\"\n )\n optim_idx = optim_idx[0].item()\n else:\n optim_idx = optim_idx.item()\n\n # b. tuple or list type\n else:\n loss, stats, weight = retval\n optim_idx = None\n\n stats = {k: v for k, v in stats.items() if v is not None}\n if ngpu > 1 or distributed:\n # Apply weighted averaging for loss and stats\n loss = (loss * weight.type(loss.dtype)).sum()\n\n # if distributed, this method can also apply all_reduce()\n stats, weight = recursive_average(stats, weight, distributed)\n\n # Now weight is summation over all workers\n loss /= weight\n if distributed:\n # NOTE(kamo): Multiply world_size because DistributedDataParallel\n # automatically normalizes the gradient by world_size.\n loss *= torch.distributed.get_world_size()\n\n loss /= accum_grad\n\n reporter.register(stats, weight)\n\n with reporter.measure_time(\"backward_time\"):\n if scaler is not None:\n # Scales loss. Calls backward() on scaled loss\n # to create scaled gradients.\n # Backward passes under autocast are not recommended.\n # Backward ops run in the same dtype autocast chose\n # for corresponding forward ops.\n scaler.scale(loss).backward()\n else:\n loss.backward()\n\n if iiter % accum_grad == 0:\n if scaler is not None:\n # Unscales the gradients of optimizer's assigned params in-place\n for iopt, optimizer in enumerate(optimizers):\n if optim_idx is not None and iopt != optim_idx:\n continue\n scaler.unscale_(optimizer)\n\n # gradient noise injection\n if grad_noise:\n add_gradient_noise(\n model,\n reporter.get_total_count(),\n duration=100,\n eta=1.0,\n scale_factor=0.55,\n )\n\n # compute the gradient norm to check if it is normal or not\n grad_norm = torch.nn.utils.clip_grad_norm_(\n model.parameters(),\n max_norm=grad_clip,\n norm_type=grad_clip_type,\n )\n # PyTorch<=1.4, clip_grad_norm_ returns float value\n if not isinstance(grad_norm, torch.Tensor):\n grad_norm = torch.tensor(grad_norm)\n\n if not torch.isfinite(grad_norm):\n logging.warning(\n f\"The grad norm is {grad_norm}. Skipping updating the model.\"\n )\n\n # Must invoke scaler.update() if unscale_() is used in the iteration\n # to avoid the following error:\n # RuntimeError: unscale_() has already been called\n # on this optimizer since the last update().\n # Note that if the gradient has inf/nan values,\n # scaler.step skips optimizer.step().\n if scaler is not None:\n for iopt, optimizer in enumerate(optimizers):\n if optim_idx is not None and iopt != optim_idx:\n continue\n scaler.step(optimizer)\n scaler.update()\n\n else:\n all_steps_are_invalid = False\n with reporter.measure_time(\"optim_step_time\"):\n for iopt, (optimizer, scheduler) in enumerate(\n zip(optimizers, schedulers)\n ):\n if optim_idx is not None and iopt != optim_idx:\n continue\n if scaler is not None:\n # scaler.step() first unscales the gradients of\n # the optimizer's assigned params.\n scaler.step(optimizer)\n # Updates the scale for next iteration.\n scaler.update()\n else:\n optimizer.step()\n if isinstance(scheduler, AbsBatchStepScheduler):\n scheduler.step()\n for iopt, optimizer in enumerate(optimizers):\n if optim_idx is not None and iopt != optim_idx:\n continue\n optimizer.zero_grad()\n\n # Register lr and train/load time[sec/step],\n # where step refers to accum_grad * mini-batch\n reporter.register(\n dict(\n {\n f\"optim{i}_lr{j}\": pg[\"lr\"]\n for i, optimizer in enumerate(optimizers)\n for j, pg in enumerate(optimizer.param_groups)\n if \"lr\" in pg\n },\n train_time=time.perf_counter() - start_time,\n ),\n )\n start_time = time.perf_counter()\n\n # update num_updates\n if distributed:\n if hasattr(model.module, \"num_updates\"):\n model.module.set_num_updates(model.module.get_num_updates() + 1)\n options.num_updates = model.module.get_num_updates()\n if model.module.get_num_updates() >= options.max_update:\n max_update_stop = True\n else:\n if hasattr(model, \"num_updates\"):\n model.set_num_updates(model.get_num_updates() + 1)\n options.num_updates = model.get_num_updates()\n if model.get_num_updates() >= options.max_update:\n max_update_stop = True\n\n to_save_model = model.module if distributed else model\n if (hasattr(options, \"num_updates\") and\n options.save_ckpt_every_steps > 0 and\n options.num_updates % options.save_ckpt_every_steps == 0):\n if options.use_pai:\n buffer = BytesIO()\n torch.save(\n {\n \"model\": to_save_model.state_dict(),\n \"optimizers\": [o.state_dict() for o in optimizers],\n \"reporter\": kwargs[\"par_reporter\"].state_dict(),\n \"schedulers\": [\n s.state_dict() if s is not None else None\n for s in schedulers\n ],\n \"scaler\": scaler.state_dict() if scaler is not None else None,\n \"ema_model\": to_save_model.encoder.ema.model.state_dict()\n if hasattr(to_save_model.encoder, \"ema\") and to_save_model.encoder.ema is not None else None,\n },\n buffer,\n )\n options.oss_bucket.put_object(\n os.path.join(options.output_dir, f\"checkpoint-{options.num_updates}.pth\"), buffer.getvalue())\n else:\n torch.save(\n {\n \"model\": to_save_model.state_dict(),\n \"optimizers\": [o.state_dict() for o in optimizers],\n \"reporter\": kwargs[\"par_reporter\"].state_dict(),\n \"schedulers\": [\n s.state_dict() if s is not None else None\n for s in schedulers\n ],\n \"scaler\": scaler.state_dict() if scaler is not None else None,\n },\n options.output_dir / f\"checkpoint-{options.num_updates}.pth\",\n )\n\n # NOTE(kamo): Call log_message() after next()\n reporter.next()\n if iiter % log_interval == 0:\n num_updates = options.num_updates if hasattr(options, \"num_updates\") else None\n logging.info(reporter.log_message(-log_interval, num_updates=num_updates))\n if summary_writer is not None:\n reporter.tensorboard_add_scalar(summary_writer, -log_interval)\n if use_wandb:\n reporter.wandb_log()\n\n if max_update_stop:\n break\n\n else:\n if distributed:\n iterator_stop.fill_(1)\n torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)\n return all_steps_are_invalid, max_update_stop\n\n @classmethod\n @torch.no_grad()\n def validate_one_epoch(\n cls,\n model: torch.nn.Module,\n iterator: Iterable[Dict[str, torch.Tensor]],\n reporter: SubReporter,\n options: TrainerOptions,\n distributed_option: DistributedOption,\n ) -> None:\n assert check_argument_types()\n ngpu = options.ngpu\n no_forward_run = options.no_forward_run\n distributed = distributed_option.distributed\n\n model.eval()\n\n # [For distributed] Because iteration counts are not always equals between\n # processes, send stop-flag to the other processes if iterator is finished\n iterator_stop = torch.tensor(0).to(\"cuda\" if ngpu > 0 else \"cpu\")\n for (_, batch) in iterator:\n assert isinstance(batch, dict), type(batch)\n if distributed:\n torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)\n if iterator_stop > 0:\n break\n\n batch = to_device(batch, \"cuda\" if ngpu > 0 else \"cpu\")\n if no_forward_run:\n continue\n\n retval = model(**batch)\n if isinstance(retval, dict):\n stats = retval[\"stats\"]\n weight = retval[\"weight\"]\n else:\n _, stats, weight = retval\n if ngpu > 1 or distributed:\n # Apply weighted averaging for stats.\n # if distributed, this method can also apply all_reduce()\n stats, weight = recursive_average(stats, weight, distributed)\n\n reporter.register(stats, weight)\n reporter.next()\n\n else:\n if distributed:\n iterator_stop.fill_(1)\n torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)" }, { "identifier": "TrainerOptions", "path": "funcodec/train/trainer.py", "snippet": "class TrainerOptions:\n ngpu: int\n resume: bool\n use_amp: bool\n train_dtype: str\n grad_noise: bool\n accum_grad: int\n grad_clip: float\n grad_clip_type: float\n log_interval: Optional[int]\n no_forward_run: bool\n use_tensorboard: bool\n use_wandb: bool\n output_dir: Union[Path, str]\n max_epoch: int\n max_update: int\n seed: int\n sharded_ddp: bool\n patience: Optional[int]\n keep_nbest_models: Union[int, List[int]]\n nbest_averaging_interval: int\n early_stopping_criterion: Sequence[str]\n best_model_criterion: Sequence[Sequence[str]]\n val_scheduler_criterion: Sequence[str]\n unused_parameters: bool\n wandb_model_log_interval: int\n use_pai: bool\n oss_bucket: Any\n save_ckpt_every_steps: int" }, { "identifier": "build_dataclass", "path": "funcodec/utils/build_dataclass.py", "snippet": "def build_dataclass(dataclass, args: argparse.Namespace):\n \"\"\"Helper function to build dataclass from 'args'.\"\"\"\n kwargs = {}\n for field in dataclasses.fields(dataclass):\n if not hasattr(args, field.name):\n raise ValueError(\n f\"args doesn't have {field.name}. You need to set it to ArgumentsParser\"\n )\n check_type(field.name, getattr(args, field.name), field.type)\n kwargs[field.name] = getattr(args, field.name)\n return dataclass(**kwargs)" }, { "identifier": "str2bool", "path": "funcodec/utils/types.py", "snippet": "def str2bool(value: str) -> bool:\n return bool(strtobool(value))" } ]
import argparse import dataclasses import logging import time import numpy as np import torch import os import soundfile import gc import fairscale from contextlib import contextmanager from distutils.version import LooseVersion from typing import Dict from typing import Iterable from typing import List from typing import Optional from typing import Sequence from typing import Tuple from io import BytesIO from typeguard import check_argument_types from funcodec.schedulers.abs_scheduler import AbsBatchStepScheduler from funcodec.schedulers.abs_scheduler import AbsScheduler from funcodec.torch_utils.device_funcs import to_device from funcodec.torch_utils.recursive_op import recursive_average from funcodec.train.distributed_utils import DistributedOption from funcodec.train.reporter import SubReporter from funcodec.train.trainer import Trainer from funcodec.train.trainer import TrainerOptions from funcodec.utils.build_dataclass import build_dataclass from funcodec.utils.types import str2bool from torch.distributed import ReduceOp from torch.cuda.amp import autocast from torch.cuda.amp import GradScaler
11,357
# Copyright 2021 Tomoki Hayashi # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) # Adapted by Zhihao Du for GAN-based Codec models. """Trainer module for GAN-based training.""" if torch.distributed.is_available(): if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"): else: # Nothing to do if torch<1.6.0 @contextmanager def autocast(enabled=True): # NOQA yield GradScaler = None try: except ImportError: fairscale = None @dataclasses.dataclass class GANTrainerOptions(TrainerOptions): """Trainer option dataclass for GANTrainer.""" generator_first: bool disc_grad_clip: float disc_grad_clip_type: float gen_train_interval: int disc_train_interval: int sampling_rate: int class GANTrainer(Trainer): """Trainer for GAN-based training. If you'd like to use this trainer, the model must inherit espnet.train.abs_gan_espnet_model.AbsGANESPnetModel. """ @classmethod def build_options(cls, args: argparse.Namespace) -> TrainerOptions: """Build options consumed by train(), eval(), and plot_attention().""" assert check_argument_types() return build_dataclass(GANTrainerOptions, args) @classmethod def add_arguments(cls, parser: argparse.ArgumentParser): """Add additional arguments for GAN-trainer.""" parser.add_argument( "--generator_first", type=str2bool, default=False, help="Whether to update generator first.", ) @classmethod def train_one_epoch( cls, model: torch.nn.Module, iterator: Iterable[Tuple[List[str], Dict[str, torch.Tensor]]], optimizers: Sequence[torch.optim.Optimizer],
# Copyright 2021 Tomoki Hayashi # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) # Adapted by Zhihao Du for GAN-based Codec models. """Trainer module for GAN-based training.""" if torch.distributed.is_available(): if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"): else: # Nothing to do if torch<1.6.0 @contextmanager def autocast(enabled=True): # NOQA yield GradScaler = None try: except ImportError: fairscale = None @dataclasses.dataclass class GANTrainerOptions(TrainerOptions): """Trainer option dataclass for GANTrainer.""" generator_first: bool disc_grad_clip: float disc_grad_clip_type: float gen_train_interval: int disc_train_interval: int sampling_rate: int class GANTrainer(Trainer): """Trainer for GAN-based training. If you'd like to use this trainer, the model must inherit espnet.train.abs_gan_espnet_model.AbsGANESPnetModel. """ @classmethod def build_options(cls, args: argparse.Namespace) -> TrainerOptions: """Build options consumed by train(), eval(), and plot_attention().""" assert check_argument_types() return build_dataclass(GANTrainerOptions, args) @classmethod def add_arguments(cls, parser: argparse.ArgumentParser): """Add additional arguments for GAN-trainer.""" parser.add_argument( "--generator_first", type=str2bool, default=False, help="Whether to update generator first.", ) @classmethod def train_one_epoch( cls, model: torch.nn.Module, iterator: Iterable[Tuple[List[str], Dict[str, torch.Tensor]]], optimizers: Sequence[torch.optim.Optimizer],
schedulers: Sequence[Optional[AbsScheduler]],
1
2023-10-07 02:00:40+00:00
16k
longzw1997/Open-GroundingDino
models/GroundingDINO/groundingdino.py
[ { "identifier": "box_ops", "path": "groundingdino/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "get_tokenlizer", "path": "groundingdino/util/get_tokenlizer.py", "snippet": "def get_tokenlizer(text_encoder_type):\n if not isinstance(text_encoder_type, str):\n # print(\"text_encoder_type is not a str\")\n if hasattr(text_encoder_type, \"text_encoder_type\"):\n text_encoder_type = text_encoder_type.text_encoder_type\n elif text_encoder_type.get(\"text_encoder_type\", False):\n text_encoder_type = text_encoder_type.get(\"text_encoder_type\")\n elif os.path.isdir(text_encoder_type) and os.path.exists(text_encoder_type):\n pass\n else:\n raise ValueError(\n \"Unknown type of text_encoder_type: {}\".format(type(text_encoder_type))\n )\n print(\"final text_encoder_type: {}\".format(text_encoder_type))\n tokenizer = AutoTokenizer.from_pretrained(text_encoder_type)\n print(\"load tokenizer done.\")\n return tokenizer" }, { "identifier": "NestedTensor", "path": "groundingdino/util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n if mask == \"auto\":\n self.mask = torch.zeros_like(tensors).to(tensors.device)\n if self.mask.dim() == 3:\n self.mask = self.mask.sum(0).to(bool)\n elif self.mask.dim() == 4:\n self.mask = self.mask.sum(1).to(bool)\n else:\n raise ValueError(\n \"tensors dim must be 3 or 4 but {}({})\".format(\n self.tensors.dim(), self.tensors.shape\n )\n )\n\n def imgsize(self):\n res = []\n for i in range(self.tensors.shape[0]):\n mask = self.mask[i]\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n res.append(torch.Tensor([maxH, maxW]))\n return res\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def to_img_list_single(self, tensor, mask):\n assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n img = tensor[:, :maxH, :maxW]\n return img\n\n def to_img_list(self):\n \"\"\"remove the padding and convert to img list\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.tensors.dim() == 3:\n return self.to_img_list_single(self.tensors, self.mask)\n else:\n res = []\n for i in range(self.tensors.shape[0]):\n tensor_i = self.tensors[i]\n mask_i = self.mask[i]\n res.append(self.to_img_list_single(tensor_i, mask_i))\n return res\n\n @property\n def device(self):\n return self.tensors.device\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n @property\n def shape(self):\n return {\"tensors.shape\": self.tensors.shape, \"mask.shape\": self.mask.shape}" }, { "identifier": "accuracy", "path": "groundingdino/util/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_world_size", "path": "groundingdino/util/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "interpolate", "path": "groundingdino/util/misc.py", "snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if __torchvision_need_compat_flag < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "inverse_sigmoid", "path": "groundingdino/util/misc.py", "snippet": "def inverse_sigmoid(x, eps=1e-3):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1 / x2)" }, { "identifier": "is_dist_avail_and_initialized", "path": "groundingdino/util/misc.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "nested_tensor_from_tensor_list", "path": "groundingdino/util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], : img.shape[2]] = False\n else:\n raise ValueError(\"not supported\")\n return NestedTensor(tensor, mask)" }, { "identifier": "get_phrases_from_posmap", "path": "groundingdino/util/utils.py", "snippet": "def get_phrases_from_posmap(\n posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer, left_idx: int = 0, right_idx: int = 255\n):\n assert isinstance(posmap, torch.Tensor), \"posmap must be torch.Tensor\"\n if posmap.dim() == 1:\n posmap[0: left_idx + 1] = False\n posmap[right_idx:] = False\n non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()\n token_ids = [tokenized[\"input_ids\"][i] for i in non_zero_idx]\n return tokenizer.decode(token_ids)\n else:\n raise NotImplementedError(\"posmap must be 1-dim\")" }, { "identifier": "COCOVisualizer", "path": "groundingdino/util/visualizer.py", "snippet": "class COCOVisualizer:\n def __init__(self, coco=None, tokenlizer=None) -> None:\n self.coco = coco\n\n def visualize(self, img, tgt, caption=None, dpi=180, savedir=\"vis\"):\n \"\"\"\n img: tensor(3, H, W)\n tgt: make sure they are all on cpu.\n must have items: 'image_id', 'boxes', 'size'\n \"\"\"\n plt.figure(dpi=dpi)\n plt.rcParams[\"font.size\"] = \"5\"\n ax = plt.gca()\n img = renorm(img).permute(1, 2, 0)\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n ax.imshow(img)\n\n self.addtgt(tgt)\n\n if tgt is None:\n image_id = 0\n elif \"image_id\" not in tgt:\n image_id = 0\n else:\n image_id = tgt[\"image_id\"]\n\n if caption is None:\n savename = \"{}/{}-{}.png\".format(\n savedir, int(image_id), str(datetime.datetime.now()).replace(\" \", \"-\")\n )\n else:\n savename = \"{}/{}-{}-{}.png\".format(\n savedir, caption, int(image_id), str(datetime.datetime.now()).replace(\" \", \"-\")\n )\n print(\"savename: {}\".format(savename))\n os.makedirs(os.path.dirname(savename), exist_ok=True)\n plt.savefig(savename)\n plt.close()\n\n def addtgt(self, tgt):\n \"\"\" \"\"\"\n if tgt is None or not \"boxes\" in tgt:\n ax = plt.gca()\n\n if \"caption\" in tgt:\n ax.set_title(tgt[\"caption\"], wrap=True)\n\n ax.set_axis_off()\n return\n\n ax = plt.gca()\n H, W = tgt[\"size\"]\n numbox = tgt[\"boxes\"].shape[0]\n\n color = []\n polygons = []\n boxes = []\n for box in tgt[\"boxes\"].cpu():\n unnormbbox = box * torch.Tensor([W, H, W, H])\n unnormbbox[:2] -= unnormbbox[2:] / 2\n [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist()\n boxes.append([bbox_x, bbox_y, bbox_w, bbox_h])\n poly = [\n [bbox_x, bbox_y],\n [bbox_x, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y],\n ]\n np_poly = np.array(poly).reshape((4, 2))\n polygons.append(Polygon(np_poly))\n c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]\n color.append(c)\n\n p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1)\n ax.add_collection(p)\n p = PatchCollection(polygons, facecolor=\"none\", edgecolors=color, linewidths=2)\n ax.add_collection(p)\n\n if \"strings_positive\" in tgt and len(tgt[\"strings_positive\"]) > 0:\n assert (\n len(tgt[\"strings_positive\"]) == numbox\n ), f\"{len(tgt['strings_positive'])} = {numbox}, \"\n for idx, strlist in enumerate(tgt[\"strings_positive\"]):\n cate_id = int(tgt[\"labels\"][idx])\n _string = str(cate_id) + \":\" + \" \".join(strlist)\n bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]\n # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})\n ax.text(\n bbox_x,\n bbox_y,\n _string,\n color=\"black\",\n bbox={\"facecolor\": color[idx], \"alpha\": 0.6, \"pad\": 1},\n )\n\n if \"box_label\" in tgt:\n assert len(tgt[\"box_label\"]) == numbox, f\"{len(tgt['box_label'])} = {numbox}, \"\n for idx, bl in enumerate(tgt[\"box_label\"]):\n _string = str(bl)\n bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]\n # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})\n ax.text(\n bbox_x,\n bbox_y,\n _string,\n color=\"black\",\n bbox={\"facecolor\": color[idx], \"alpha\": 0.6, \"pad\": 1},\n )\n\n if \"caption\" in tgt:\n ax.set_title(tgt[\"caption\"], wrap=True)\n # plt.figure()\n # rainbow_text(0.0,0.0,\"all unicorns poop rainbows ! ! !\".split(),\n # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black'])\n\n if \"attn\" in tgt:\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n if isinstance(tgt[\"attn\"], tuple):\n tgt[\"attn\"] = [tgt[\"attn\"]]\n for item in tgt[\"attn\"]:\n attn_map, basergb = item\n attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3)\n attn_map = (attn_map * 255).astype(np.uint8)\n cm = ColorMap(basergb)\n heatmap = cm(attn_map)\n ax.imshow(heatmap)\n ax.set_axis_off()\n\n def showAnns(self, anns, draw_bbox=False):\n \"\"\"\n Display the specified annotations.\n :param anns (array of object): annotations to display\n :return: None\n \"\"\"\n if len(anns) == 0:\n return 0\n if \"segmentation\" in anns[0] or \"keypoints\" in anns[0]:\n datasetType = \"instances\"\n elif \"caption\" in anns[0]:\n datasetType = \"captions\"\n else:\n raise Exception(\"datasetType not supported\")\n if datasetType == \"instances\":\n ax = plt.gca()\n ax.set_autoscale_on(False)\n polygons = []\n color = []\n for ann in anns:\n c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]\n if \"segmentation\" in ann:\n if type(ann[\"segmentation\"]) == list:\n # polygon\n for seg in ann[\"segmentation\"]:\n poly = np.array(seg).reshape((int(len(seg) / 2), 2))\n polygons.append(Polygon(poly))\n color.append(c)\n else:\n # mask\n t = self.imgs[ann[\"image_id\"]]\n if type(ann[\"segmentation\"][\"counts\"]) == list:\n rle = maskUtils.frPyObjects(\n [ann[\"segmentation\"]], t[\"height\"], t[\"width\"]\n )\n else:\n rle = [ann[\"segmentation\"]]\n m = maskUtils.decode(rle)\n img = np.ones((m.shape[0], m.shape[1], 3))\n if ann[\"iscrowd\"] == 1:\n color_mask = np.array([2.0, 166.0, 101.0]) / 255\n if ann[\"iscrowd\"] == 0:\n color_mask = np.random.random((1, 3)).tolist()[0]\n for i in range(3):\n img[:, :, i] = color_mask[i]\n ax.imshow(np.dstack((img, m * 0.5)))\n if \"keypoints\" in ann and type(ann[\"keypoints\"]) == list:\n # turn skeleton into zero-based index\n sks = np.array(self.loadCats(ann[\"category_id\"])[0][\"skeleton\"]) - 1\n kp = np.array(ann[\"keypoints\"])\n x = kp[0::3]\n y = kp[1::3]\n v = kp[2::3]\n for sk in sks:\n if np.all(v[sk] > 0):\n plt.plot(x[sk], y[sk], linewidth=3, color=c)\n plt.plot(\n x[v > 0],\n y[v > 0],\n \"o\",\n markersize=8,\n markerfacecolor=c,\n markeredgecolor=\"k\",\n markeredgewidth=2,\n )\n plt.plot(\n x[v > 1],\n y[v > 1],\n \"o\",\n markersize=8,\n markerfacecolor=c,\n markeredgecolor=c,\n markeredgewidth=2,\n )\n\n if draw_bbox:\n [bbox_x, bbox_y, bbox_w, bbox_h] = ann[\"bbox\"]\n poly = [\n [bbox_x, bbox_y],\n [bbox_x, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y],\n ]\n np_poly = np.array(poly).reshape((4, 2))\n polygons.append(Polygon(np_poly))\n color.append(c)\n\n # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)\n # ax.add_collection(p)\n p = PatchCollection(polygons, facecolor=\"none\", edgecolors=color, linewidths=2)\n ax.add_collection(p)\n elif datasetType == \"captions\":\n for ann in anns:\n print(ann[\"caption\"])" }, { "identifier": "create_positive_map_from_span", "path": "groundingdino/util/vl_utils.py", "snippet": "def create_positive_map_from_span(tokenized, token_span, max_text_len=256):\n \"\"\"construct a map such that positive_map[i,j] = True iff box i is associated to token j\n Input:\n - tokenized:\n - input_ids: Tensor[1, ntokens]\n - attention_mask: Tensor[1, ntokens]\n - token_span: list with length num_boxes.\n - each item: [start_idx, end_idx]\n \"\"\"\n positive_map = torch.zeros((len(token_span), max_text_len), dtype=torch.float)\n for j, tok_list in enumerate(token_span):\n for (beg, end) in tok_list:\n beg_pos = tokenized.char_to_token(beg)\n end_pos = tokenized.char_to_token(end - 1)\n if beg_pos is None:\n try:\n beg_pos = tokenized.char_to_token(beg + 1)\n if beg_pos is None:\n beg_pos = tokenized.char_to_token(beg + 2)\n except:\n beg_pos = None\n if end_pos is None:\n try:\n end_pos = tokenized.char_to_token(end - 2)\n if end_pos is None:\n end_pos = tokenized.char_to_token(end - 3)\n except:\n end_pos = None\n if beg_pos is None or end_pos is None:\n continue\n\n assert beg_pos is not None and end_pos is not None\n if os.environ.get(\"SHILONG_DEBUG_ONLY_ONE_POS\", None) == \"TRUE\":\n positive_map[j, beg_pos] = 1\n break\n else:\n positive_map[j, beg_pos : end_pos + 1].fill_(1)\n\n return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)" }, { "identifier": "MODULE_BUILD_FUNCS", "path": "models/registry.py", "snippet": "MODULE_BUILD_FUNCS = Registry('model build functions')" }, { "identifier": "build_backbone", "path": "models/GroundingDINO/backbone/backbone.py", "snippet": "def build_backbone(args):\n \"\"\"\n Useful args:\n - backbone: backbone name\n - lr_backbone:\n - dilation\n - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]\n - backbone_freeze_keywords:\n - use_checkpoint: for swin only for now\n\n \"\"\"\n position_embedding = build_position_encoding(args)\n train_backbone = True\n if not train_backbone:\n raise ValueError(\"Please set lr_backbone > 0\")\n return_interm_indices = args.return_interm_indices\n assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]\n args.backbone_freeze_keywords\n use_checkpoint = getattr(args, \"use_checkpoint\", False)\n\n if args.backbone in [\"resnet50\", \"resnet101\"]:\n backbone = Backbone(\n args.backbone,\n train_backbone,\n args.dilation,\n return_interm_indices,\n batch_norm=FrozenBatchNorm2d,\n )\n bb_num_channels = backbone.num_channels\n elif args.backbone in [\n \"swin_T_224_1k\",\n \"swin_B_224_22k\",\n \"swin_B_384_22k\",\n \"swin_L_224_22k\",\n \"swin_L_384_22k\",\n ]:\n pretrain_img_size = int(args.backbone.split(\"_\")[-2])\n backbone = build_swin_transformer(\n args.backbone,\n pretrain_img_size=pretrain_img_size,\n out_indices=tuple(return_interm_indices),\n dilation=False,\n use_checkpoint=use_checkpoint,\n )\n\n bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :]\n else:\n raise NotImplementedError(\"Unknown backbone {}\".format(args.backbone))\n\n assert len(bb_num_channels) == len(\n return_interm_indices\n ), f\"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}\"\n\n model = Joiner(backbone, position_embedding)\n model.num_channels = bb_num_channels\n assert isinstance(\n bb_num_channels, List\n ), \"bb_num_channels is expected to be a List but {}\".format(type(bb_num_channels))\n # import ipdb; ipdb.set_trace()\n return model" }, { "identifier": "BertModelWarper", "path": "models/GroundingDINO/bertwarper.py", "snippet": "class BertModelWarper(nn.Module):\n def __init__(self, bert_model):\n super().__init__()\n # self.bert = bert_modelc\n\n self.config = bert_model.config\n self.embeddings = bert_model.embeddings\n self.encoder = bert_model.encoder\n self.pooler = bert_model.pooler\n\n self.get_extended_attention_mask = bert_model.get_extended_attention_mask\n self.invert_attention_mask = bert_model.invert_attention_mask\n self.get_head_mask = bert_model.get_head_mask\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = (\n output_attentions if output_attentions is not None else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = (\n past_key_values[0][0].shape[2] if past_key_values is not None else 0\n )\n\n if attention_mask is None:\n attention_mask = torch.ones(\n ((batch_size, seq_length + past_key_values_length)), device=device\n )\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n attention_mask, input_shape, device\n )\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )" }, { "identifier": "generate_masks_with_special_tokens", "path": "models/GroundingDINO/bertwarper.py", "snippet": "def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer):\n \"\"\"Generate attention mask between each pair of special tokens\n Args:\n input_ids (torch.Tensor): input ids. Shape: [bs, num_token]\n special_tokens_mask (list): special tokens mask.\n Returns:\n torch.Tensor: attention mask between each special tokens.\n \"\"\"\n input_ids = tokenized[\"input_ids\"]\n bs, num_token = input_ids.shape\n # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens\n special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()\n for special_token in special_tokens_list:\n special_tokens_mask |= input_ids == special_token\n\n # idxs: each row is a list of indices of special tokens\n idxs = torch.nonzero(special_tokens_mask)\n\n # generate attention mask and positional ids\n attention_mask = (\n torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)\n )\n position_ids = torch.zeros((bs, num_token), device=input_ids.device)\n previous_col = 0\n for i in range(idxs.shape[0]):\n row, col = idxs[i]\n if (col == 0) or (col == num_token - 1):\n attention_mask[row, col, col] = True\n position_ids[row, col] = 0\n else:\n attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True\n position_ids[row, previous_col + 1 : col + 1] = torch.arange(\n 0, col - previous_col, device=input_ids.device\n )\n\n previous_col = col\n\n # # padding mask\n # padding_mask = tokenized['attention_mask']\n # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()\n\n return attention_mask, position_ids.to(torch.long)" }, { "identifier": "generate_masks_with_special_tokens_and_transfer_map", "path": "models/GroundingDINO/bertwarper.py", "snippet": "def generate_masks_with_special_tokens_and_transfer_map(tokenized, special_tokens_list, tokenizer):\n \"\"\"Generate attention mask between each pair of special tokens\n Args:\n input_ids (torch.Tensor): input ids. Shape: [bs, num_token]\n special_tokens_mask (list): special tokens mask.\n Returns:\n torch.Tensor: attention mask between each special tokens.\n \"\"\"\n input_ids = tokenized[\"input_ids\"]\n bs, num_token = input_ids.shape\n # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens\n special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()\n for special_token in special_tokens_list:\n special_tokens_mask |= input_ids == special_token\n\n # idxs: each row is a list of indices of special tokens\n idxs = torch.nonzero(special_tokens_mask)\n\n # generate attention mask and positional ids\n attention_mask = (\n torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)\n )\n position_ids = torch.zeros((bs, num_token), device=input_ids.device)\n cate_to_token_mask_list = [[] for _ in range(bs)]\n previous_col = 0\n for i in range(idxs.shape[0]):\n row, col = idxs[i]\n if (col == 0) or (col == num_token - 1):\n attention_mask[row, col, col] = True\n position_ids[row, col] = 0\n else:\n attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True\n position_ids[row, previous_col + 1 : col + 1] = torch.arange(\n 0, col - previous_col, device=input_ids.device\n )\n c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()\n c2t_maski[previous_col + 1 : col] = True\n cate_to_token_mask_list[row].append(c2t_maski)\n previous_col = col\n\n cate_to_token_mask_list = [\n torch.stack(cate_to_token_mask_listi, dim=0)\n for cate_to_token_mask_listi in cate_to_token_mask_list\n ]\n\n # # padding mask\n # padding_mask = tokenized['attention_mask']\n # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()\n\n return attention_mask, position_ids.to(torch.long), cate_to_token_mask_list" }, { "identifier": "build_transformer", "path": "models/GroundingDINO/transformer.py", "snippet": "def build_transformer(args):\n return Transformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n num_queries=args.num_queries,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n query_dim=args.query_dim,\n activation=args.transformer_activation,\n num_patterns=args.num_patterns,\n num_feature_levels=args.num_feature_levels,\n enc_n_points=args.enc_n_points,\n dec_n_points=args.dec_n_points,\n learnable_tgt_init=True,\n # two stage\n two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']\n embed_init_tgt=args.embed_init_tgt,\n use_text_enhancer=args.use_text_enhancer,\n use_fusion_layer=args.use_fusion_layer,\n use_checkpoint=args.use_checkpoint,\n use_transformer_ckpt=args.use_transformer_ckpt,\n use_text_cross_attention=args.use_text_cross_attention,\n text_dropout=args.text_dropout,\n fusion_dropout=args.fusion_dropout,\n fusion_droppath=args.fusion_droppath,\n )" }, { "identifier": "MLP", "path": "models/GroundingDINO/utils.py", "snippet": "class MLP(nn.Module):\n \"\"\"Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(\n nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])\n )\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x" }, { "identifier": "ContrastiveEmbed", "path": "models/GroundingDINO/utils.py", "snippet": "class ContrastiveEmbed(nn.Module):\n def __init__(self, max_text_len=256):\n \"\"\"\n Args:\n max_text_len: max length of text.\n \"\"\"\n super().__init__()\n self.max_text_len = max_text_len\n\n def forward(self, x, text_dict):\n \"\"\"_summary_\n\n Args:\n x (_type_): _description_\n text_dict (_type_): _description_\n {\n 'encoded_text': encoded_text, # bs, 195, d_model\n 'text_token_mask': text_token_mask, # bs, 195\n # True for used tokens. False for padding tokens\n }\n Returns:\n _type_: _description_\n \"\"\"\n assert isinstance(text_dict, dict)\n # print(x) #torch.Size([2, 16320, 256])\n # print(text_dict)\n\n # import pdb;pdb.set_trace()\n y = text_dict[\"encoded_text\"] #torch.Size([2, 195, 256])\n text_token_mask = text_dict[\"text_token_mask\"]\n\n res = x @ y.transpose(-1, -2)\n res.masked_fill_(~text_token_mask[:, None, :], float(\"-inf\"))\n # 接着,对res进行掩码操作,将未使用的文本token(即padding的token)对应的得分置为负无穷float(\"-inf\")。这是为了在计算相似度时,排除padding部分的影响。\n\n\n # padding to max_text_len\n new_res = torch.full((*res.shape[:-1], self.max_text_len), float(\"-inf\"), device=res.device)\n new_res[..., : res.shape[-1]] = res #torch.Size([2, 16320, 195])\n\n return new_res" }, { "identifier": "sigmoid_focal_loss", "path": "models/GroundingDINO/utils.py", "snippet": "def sigmoid_focal_loss(\n inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False\n):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n if no_reduction:\n return loss\n\n return loss.mean(1).sum() / num_boxes" }, { "identifier": "build_matcher", "path": "models/GroundingDINO/matcher.py", "snippet": "def build_matcher(args):\n assert args.matcher_type in ['HungarianMatcher', 'SimpleMinsumMatcher'], \"Unknown args.matcher_type: {}\".format(args.matcher_type)\n if args.matcher_type == 'HungarianMatcher':\n return HungarianMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n )\n elif args.matcher_type == 'SimpleMinsumMatcher':\n return SimpleMinsumMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n ) \n else:\n raise NotImplementedError(\"Unknown args.matcher_type: {}\".format(args.matcher_type))" } ]
import copy import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast from groundingdino.util import box_ops, get_tokenlizer from groundingdino.util.misc import ( NestedTensor, accuracy, get_world_size, interpolate, inverse_sigmoid, is_dist_avail_and_initialized, nested_tensor_from_tensor_list, ) from groundingdino.util.utils import get_phrases_from_posmap from groundingdino.util.visualizer import COCOVisualizer from groundingdino.util.vl_utils import create_positive_map_from_span from ..registry import MODULE_BUILD_FUNCS from .backbone import build_backbone from .bertwarper import ( BertModelWarper, generate_masks_with_special_tokens, generate_masks_with_special_tokens_and_transfer_map, ) from .transformer import build_transformer from .utils import MLP, ContrastiveEmbed, sigmoid_focal_loss from .matcher import build_matcher from pycocotools.coco import COCO
12,341
nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [ copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers) ] class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format( two_stage_type ) if two_stage_type != "no": if two_stage_bbox_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) def forward(self, samples: NestedTensor, targets: List = None, **kw): """The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if targets is None: captions = kw["captions"] else: captions = [t["caption"] for t in targets] # encoder texts tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to( samples.device ) one_hot_token = tokenized ( text_self_attention_masks, position_ids, cate_to_token_mask_list, ) = generate_masks_with_special_tokens_and_transfer_map( tokenized, self.specical_tokens, self.tokenizer ) if text_self_attention_masks.shape[1] > self.max_text_len: text_self_attention_masks = text_self_attention_masks[ :, : self.max_text_len, : self.max_text_len ] position_ids = position_ids[:, : self.max_text_len] tokenized["input_ids"] = tokenized["input_ids"][:, : self.max_text_len] tokenized["attention_mask"] = tokenized["attention_mask"][:, : self.max_text_len] tokenized["token_type_ids"] = tokenized["token_type_ids"][:, : self.max_text_len] # extract text embeddings if self.sub_sentence_present: tokenized_for_encoder = {k: v for k, v in tokenized.items() if k != "attention_mask"} tokenized_for_encoder["attention_mask"] = text_self_attention_masks tokenized_for_encoder["position_ids"] = position_ids else: tokenized_for_encoder = tokenized bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768 encoded_text = self.feat_map(bert_output["last_hidden_state"]) # bs, 195, d_model text_token_mask = tokenized.attention_mask.bool() # bs, 195 # text_token_mask: True for nomask, False for mask # text_self_attention_masks: True for nomask, False for mask if encoded_text.shape[1] > self.max_text_len: encoded_text = encoded_text[:, : self.max_text_len, :] text_token_mask = text_token_mask[:, : self.max_text_len] position_ids = position_ids[:, : self.max_text_len] text_self_attention_masks = text_self_attention_masks[ :, : self.max_text_len, : self.max_text_len ] text_dict = { "encoded_text": encoded_text, # bs, 195, d_model "text_token_mask": text_token_mask, # bs, 195 "position_ids": position_ids, # bs, 195 "text_self_attention_masks": text_self_attention_masks, # bs, 195,195 } if isinstance(samples, (list, torch.Tensor)):
# ------------------------------------------------------------------------ # Grounding DINO # url: https://github.com/IDEA-Research/GroundingDINO # Copyright (c) 2023 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class GroundingDINO(nn.Module): """This is the Cross-Attention Detector module that performs object detection""" def __init__( self, backbone, transformer, num_queries, aux_loss=False, iter_update=False, query_dim=2, num_feature_levels=1, nheads=8, # two stage two_stage_type="no", # ['no', 'standard'] dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, num_patterns=0, dn_number=100, dn_box_noise_scale=0.4, dn_label_noise_ratio=0.5, dn_labelbook_size=100, text_encoder_type="bert-base-uncased", sub_sentence_present=True, max_text_len=256, ): """Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.max_text_len = 256 self.sub_sentence_present = sub_sentence_present # setting query dim self.query_dim = query_dim assert query_dim == 4 # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # bert self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type) self.bert.pooler.dense.weight.requires_grad_(False) self.bert.pooler.dense.bias.requires_grad_(False) self.bert = BertModelWarper(bert_model=self.bert) self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True) nn.init.constant_(self.feat_map.bias.data, 0) nn.init.xavier_uniform_(self.feat_map.weight.data) # freeze # special tokens self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"]) # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), ) ) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ] ) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = ContrastiveEmbed() _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [ copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers) ] class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format( two_stage_type ) if two_stage_type != "no": if two_stage_bbox_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) def forward(self, samples: NestedTensor, targets: List = None, **kw): """The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if targets is None: captions = kw["captions"] else: captions = [t["caption"] for t in targets] # encoder texts tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to( samples.device ) one_hot_token = tokenized ( text_self_attention_masks, position_ids, cate_to_token_mask_list, ) = generate_masks_with_special_tokens_and_transfer_map( tokenized, self.specical_tokens, self.tokenizer ) if text_self_attention_masks.shape[1] > self.max_text_len: text_self_attention_masks = text_self_attention_masks[ :, : self.max_text_len, : self.max_text_len ] position_ids = position_ids[:, : self.max_text_len] tokenized["input_ids"] = tokenized["input_ids"][:, : self.max_text_len] tokenized["attention_mask"] = tokenized["attention_mask"][:, : self.max_text_len] tokenized["token_type_ids"] = tokenized["token_type_ids"][:, : self.max_text_len] # extract text embeddings if self.sub_sentence_present: tokenized_for_encoder = {k: v for k, v in tokenized.items() if k != "attention_mask"} tokenized_for_encoder["attention_mask"] = text_self_attention_masks tokenized_for_encoder["position_ids"] = position_ids else: tokenized_for_encoder = tokenized bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768 encoded_text = self.feat_map(bert_output["last_hidden_state"]) # bs, 195, d_model text_token_mask = tokenized.attention_mask.bool() # bs, 195 # text_token_mask: True for nomask, False for mask # text_self_attention_masks: True for nomask, False for mask if encoded_text.shape[1] > self.max_text_len: encoded_text = encoded_text[:, : self.max_text_len, :] text_token_mask = text_token_mask[:, : self.max_text_len] position_ids = position_ids[:, : self.max_text_len] text_self_attention_masks = text_self_attention_masks[ :, : self.max_text_len, : self.max_text_len ] text_dict = { "encoded_text": encoded_text, # bs, 195, d_model "text_token_mask": text_token_mask, # bs, 195 "position_ids": position_ids, # bs, 195 "text_self_attention_masks": text_self_attention_masks, # bs, 195,195 } if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
8
2023-10-14 02:20:31+00:00
16k
Beckschen/3D-TransUNet
nn_transunet/networks/transunet3d_model.py
[ { "identifier": "SegmentationNetwork", "path": "nn_transunet/networks/neural_network.py", "snippet": "class SegmentationNetwork(NeuralNetwork):\n def __init__(self):\n super(NeuralNetwork, self).__init__()\n\n # if we have 5 pooling then our patch size must be divisible by 2**5\n # for example in a 2d network that does 5 pool in x and 6 pool\n self.input_shape_must_be_divisible_by = None\n # in y this would be (32, 64)\n\n # we need to know this because we need to know if we are a 2d or a 3d netowrk\n self.conv_op = None # nn.Conv2d or nn.Conv3d\n\n # this tells us how many channely we have in the output. Important for preallocation in inference\n self.num_classes = None # number of channels in the output\n\n # depending on the loss, we do not hard code a nonlinearity into the architecture. To aggregate predictions\n # during inference, we need to apply the nonlinearity, however. So it is important to let the newtork know what\n # to apply in inference. For the most part this will be softmax\n self.inference_apply_nonlin = lambda x: x # softmax_helper\n\n # This is for saving a gaussian importance map for inference. It weights voxels higher that are closer to the\n # center. Prediction at the borders are often less accurate and are thus downweighted. Creating these Gaussians\n # can be expensive, so it makes sense to save and reuse them.\n self._gaussian_3d = self._patch_size_for_gaussian_3d = None\n self._gaussian_2d = self._patch_size_for_gaussian_2d = None\n\n def predict_3D(self, x: np.ndarray, do_mirroring: bool, mirror_axes: Tuple[int, ...] = (0, 1, 2),\n use_sliding_window: bool = False,\n step_size: float = 0.5, patch_size: Tuple[int, ...] = None, regions_class_order: Tuple[int, ...] = None,\n use_gaussian: bool = False, pad_border_mode: str = \"constant\",\n pad_kwargs: dict = None, all_in_gpu: bool = False,\n verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Use this function to predict a 3D image. It does not matter whether the network is a 2D or 3D U-Net, it will\n detect that automatically and run the appropriate code.\n When running predictions, you need to specify whether you want to run fully convolutional of sliding window\n based inference. We very strongly recommend you use sliding window with the default settings.\n It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If\n the network is not in eval mode it will print a warning.\n :param x: Your input data. Must be a nd.ndarray of shape (c, x, y, z).\n :param do_mirroring: If True, use test time data augmentation in the form of mirroring\n :param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three\n axes\n :param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default\n :param step_size: When running sliding window prediction, the step size determines the distance between adjacent\n predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given\n as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between\n predictions. step_size cannot be larger than 1!\n :param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,\n this will either crash or give potentially less accurate segmentations\n :param regions_class_order: Fabian only\n :param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting\n to weigh predictions closer to the center of the current patch higher than those at the borders. The reason\n behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True\n :param pad_border_mode: leave this alone\n :param pad_kwargs: leave this alone\n :param all_in_gpu: experimental. You probably want to leave this as is it\n :param verbose: Do you want a wall of text? If yes then set this to True\n :param mixed_precision: if True, will run inference in mixed precision with autocast()\n :return:\n \"\"\"\n torch.cuda.empty_cache()\n\n assert step_size <= 1, 'step_size must be smaller than 1. Otherwise there will be a gap between consecutive ' \\\n 'predictions'\n\n if verbose:\n print(\"debug: mirroring\", do_mirroring, \"mirror_axes\", mirror_axes)\n\n assert self.get_device() != \"cpu\", \"CPU not implemented\"\n\n if pad_kwargs is None:\n pad_kwargs = {'constant_values': 0}\n\n # A very long time ago the mirror axes were (2, 3, 4) for a 3d network. This is just to intercept any old\n # code that uses this convention\n if len(mirror_axes):\n if self.conv_op == nn.Conv2d:\n if max(mirror_axes) > 1:\n raise ValueError(\"mirror axes. duh\")\n if self.conv_op == nn.Conv3d:\n if max(mirror_axes) > 2:\n raise ValueError(\"mirror axes. duh\")\n\n if self.training:\n print(\n 'WARNING! Network is in train mode during inference. This may be intended, or not...')\n\n assert len(x.shape) == 4, \"data must have shape (c,x,y,z)\"\n\n if mixed_precision:\n context = autocast\n else:\n context = no_op\n\n with context():\n with torch.no_grad():\n if self.conv_op == nn.Conv3d:\n if use_sliding_window:\n res = self._internal_predict_3D_3Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,\n regions_class_order, use_gaussian, pad_border_mode,\n pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,\n verbose=verbose)\n else:\n res = self._internal_predict_3D_3Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,\n pad_border_mode, pad_kwargs=pad_kwargs, verbose=verbose)\n elif self.conv_op == nn.Conv2d:\n if use_sliding_window:\n res = self._internal_predict_3D_2Dconv_tiled(x, patch_size, do_mirroring, mirror_axes, step_size,\n regions_class_order, use_gaussian, pad_border_mode,\n pad_kwargs, all_in_gpu, False)\n else:\n res = self._internal_predict_3D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,\n pad_border_mode, pad_kwargs, all_in_gpu, False)\n else:\n raise RuntimeError(\n \"Invalid conv op, cannot determine what dimensionality (2d/3d) the network is\")\n\n return res\n\n def predict_2D(self, x, do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), use_sliding_window: bool = False,\n step_size: float = 0.5, patch_size: tuple = None, regions_class_order: tuple = None,\n use_gaussian: bool = False, pad_border_mode: str = \"constant\",\n pad_kwargs: dict = None, all_in_gpu: bool = False,\n verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Use this function to predict a 2D image. If this is a 3D U-Net it will crash because you cannot predict a 2D\n image with that (you dummy).\n When running predictions, you need to specify whether you want to run fully convolutional of sliding window\n based inference. We very strongly recommend you use sliding window with the default settings.\n It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If\n the network is not in eval mode it will print a warning.\n :param x: Your input data. Must be a nd.ndarray of shape (c, x, y).\n :param do_mirroring: If True, use test time data augmentation in the form of mirroring\n :param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three\n axes\n :param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default\n :param step_size: When running sliding window prediction, the step size determines the distance between adjacent\n predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given\n as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between\n predictions. step_size cannot be larger than 1!\n :param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,\n this will either crash or give potentially less accurate segmentations\n :param regions_class_order: Fabian only\n :param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting\n to weigh predictions closer to the center of the current patch higher than those at the borders. The reason\n behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True\n :param pad_border_mode: leave this alone\n :param pad_kwargs: leave this alone\n :param all_in_gpu: experimental. You probably want to leave this as is it\n :param verbose: Do you want a wall of text? If yes then set this to True\n :return:\n \"\"\"\n torch.cuda.empty_cache()\n\n assert step_size <= 1, 'step_size must be smaler than 1. Otherwise there will be a gap between consecutive ' \\\n 'predictions'\n\n if self.conv_op == nn.Conv3d:\n raise RuntimeError(\n \"Cannot predict 2d if the network is 3d. Dummy.\")\n\n if verbose:\n print(\"debug: mirroring\", do_mirroring, \"mirror_axes\", mirror_axes)\n\n assert self.get_device() != \"cpu\", \"CPU not implemented\"\n\n if pad_kwargs is None:\n pad_kwargs = {'constant_values': 0}\n\n # A very long time ago the mirror axes were (2, 3) for a 2d network. This is just to intercept any old\n # code that uses this convention\n if len(mirror_axes):\n if max(mirror_axes) > 1:\n raise ValueError(\"mirror axes. duh\")\n\n if self.training:\n print(\n 'WARNING! Network is in train mode during inference. This may be intended, or not...')\n\n assert len(x.shape) == 3, \"data must have shape (c,x,y)\"\n\n if mixed_precision:\n context = autocast\n else:\n context = no_op\n\n with context():\n with torch.no_grad():\n if self.conv_op == nn.Conv2d:\n if use_sliding_window:\n res = self._internal_predict_2D_2Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,\n regions_class_order, use_gaussian, pad_border_mode,\n pad_kwargs, all_in_gpu, verbose)\n else:\n res = self._internal_predict_2D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,\n pad_border_mode, pad_kwargs, verbose)\n else:\n raise RuntimeError(\n \"Invalid conv op, cannot determine what dimensionality (2d/3d) the network is\")\n\n return res\n\n @staticmethod\n def _get_gaussian(patch_size, sigma_scale=1. / 8) -> np.ndarray:\n tmp = np.zeros(patch_size)\n center_coords = [i // 2 for i in patch_size]\n sigmas = [i * sigma_scale for i in patch_size]\n tmp[tuple(center_coords)] = 1\n gaussian_importance_map = gaussian_filter(\n tmp, sigmas, 0, mode='constant', cval=0)\n gaussian_importance_map = gaussian_importance_map / \\\n np.max(gaussian_importance_map) * 1\n gaussian_importance_map = gaussian_importance_map.astype(np.float32)\n\n # gaussian_importance_map cannot be 0, otherwise we may end up with nans!\n gaussian_importance_map[gaussian_importance_map == 0] = np.min(\n gaussian_importance_map[gaussian_importance_map != 0])\n\n return gaussian_importance_map\n\n @staticmethod\n def _compute_steps_for_sliding_window(patch_size: Tuple[int, ...], image_size: Tuple[int, ...], step_size: float) -> List[List[int]]:\n assert [i >= j for i, j in zip(\n image_size, patch_size)], \"image size must be as large or larger than patch_size\"\n assert 0 < step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1'\n\n # our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of\n # 110, patch size of 64 and step_size of 0.5, then we want to make 3 steps starting at coordinate 0, 23, 46\n target_step_sizes_in_voxels = [i * step_size for i in patch_size]\n\n num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j,\n k in zip(image_size, target_step_sizes_in_voxels, patch_size)]\n\n steps = []\n for dim in range(len(patch_size)):\n # the highest step value for this dimension is\n max_step_value = image_size[dim] - patch_size[dim]\n if num_steps[dim] > 1:\n actual_step_size = max_step_value / (num_steps[dim] - 1)\n else:\n # does not matter because there is only one step at 0\n actual_step_size = 99999999999\n\n steps_here = [int(np.round(actual_step_size * i))\n for i in range(num_steps[dim])]\n\n steps.append(steps_here)\n\n return steps\n\n def _internal_predict_3D_3Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,\n patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,\n pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,\n verbose: bool) -> Tuple[np.ndarray, np.ndarray]:\n # better safe than sorry\n assert len(x.shape) == 4, \"x must be (c, x, y, z)\"\n assert self.get_device() != \"cpu\"\n if verbose:\n print(\"step_size:\", step_size)\n if verbose:\n print(\"do mirror:\", do_mirroring)\n\n assert patch_size is not None, \"patch_size cannot be None for tiled prediction\"\n\n # for sliding window inference the image must at least be as large as the patch size. It does not matter\n # whether the shape is divisible by 2**num_pool as long as the patch size is\n data, slicer = pad_nd_image(\n x, patch_size, pad_border_mode, pad_kwargs, True, None)\n data_shape = data.shape # still c, x, y, z\n\n # compute the steps for sliding window\n steps = self._compute_steps_for_sliding_window(\n patch_size, data_shape[1:], step_size)\n num_tiles = len(steps[0]) * len(steps[1]) * len(steps[2])\n\n if verbose:\n print(\"data shape:\", data_shape)\n print(\"patch size:\", patch_size)\n print(\"steps (x, y, and z):\", steps)\n print(\"number of tiles:\", num_tiles)\n\n # we only need to compute that once. It can take a while to compute this due to the large sigma in\n # gaussian_filter\n if use_gaussian and num_tiles > 1:\n if self._gaussian_3d is None or not all(\n [i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_3d)]):\n if verbose:\n print('computing Gaussian')\n gaussian_importance_map = self._get_gaussian(\n patch_size, sigma_scale=1. / 8)\n\n self._gaussian_3d = gaussian_importance_map\n self._patch_size_for_gaussian_3d = patch_size\n else:\n if verbose:\n print(\"using precomputed Gaussian\")\n gaussian_importance_map = self._gaussian_3d\n\n gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),\n non_blocking=True)\n\n else:\n gaussian_importance_map = None\n\n if all_in_gpu:\n # If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces\n # CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU\n\n if use_gaussian and num_tiles > 1:\n # half precision for the outputs should be good enough. If the outputs here are half, the\n # gaussian_importance_map should be as well\n gaussian_importance_map = gaussian_importance_map.half()\n\n # make sure we did not round anything to 0\n gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[\n gaussian_importance_map != 0].min()\n\n add_for_nb_of_preds = gaussian_importance_map\n else:\n add_for_nb_of_preds = torch.ones(\n data.shape[1:], device=self.get_device())\n\n if verbose:\n print(\"initializing result array (on GPU)\")\n aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,\n device=self.get_device())\n\n if verbose:\n print(\"moving data to GPU\")\n data = torch.from_numpy(data).cuda(\n self.get_device(), non_blocking=True)\n\n if verbose:\n print(\"initializing result_numsamples (on GPU)\")\n aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,\n device=self.get_device())\n else:\n if use_gaussian and num_tiles > 1:\n add_for_nb_of_preds = self._gaussian_3d\n else:\n add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)\n aggregated_results = np.zeros(\n [self.num_classes] + list(data.shape[1:]), dtype=np.float32)\n aggregated_nb_of_predictions = np.zeros(\n [self.num_classes] + list(data.shape[1:]), dtype=np.float32)\n\n for x in steps[0]:\n lb_x = x\n ub_x = x + patch_size[0]\n for y in steps[1]:\n lb_y = y\n ub_y = y + patch_size[1]\n for z in steps[2]:\n lb_z = z\n ub_z = z + patch_size[2]\n\n predicted_patch = self._internal_maybe_mirror_and_pred_3D(\n data[None, :, lb_x:ub_x, lb_y:ub_y,\n lb_z:ub_z], mirror_axes, do_mirroring,\n gaussian_importance_map)[0]\n\n if all_in_gpu:\n predicted_patch = predicted_patch.half()\n else:\n predicted_patch = predicted_patch.cpu().numpy()\n\n aggregated_results[:, lb_x:ub_x,\n lb_y:ub_y, lb_z:ub_z] += predicted_patch\n aggregated_nb_of_predictions[:, lb_x:ub_x,\n lb_y:ub_y, lb_z:ub_z] += add_for_nb_of_preds\n\n # we reverse the padding here (remeber that we padded the input to be at least as large as the patch size\n slicer = tuple(\n [slice(0, aggregated_results.shape[i]) for i in\n range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])\n aggregated_results = aggregated_results[slicer]\n aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]\n\n # computing the class_probabilities by dividing the aggregated result with result_numsamples\n class_probabilities = aggregated_results / aggregated_nb_of_predictions\n\n if regions_class_order is None:\n predicted_segmentation = class_probabilities.argmax(0)\n else:\n if all_in_gpu:\n class_probabilities_here = class_probabilities.detach().cpu().numpy()\n else:\n class_probabilities_here = class_probabilities\n predicted_segmentation = np.zeros(\n class_probabilities_here.shape[1:], dtype=np.float32)\n for i, c in enumerate(regions_class_order):\n predicted_segmentation[class_probabilities_here[i] > 0.5] = c\n\n if all_in_gpu:\n if verbose:\n print(\"copying results to CPU\")\n\n if regions_class_order is None:\n predicted_segmentation = predicted_segmentation.detach().cpu().numpy()\n\n class_probabilities = class_probabilities.detach().cpu().numpy()\n\n if verbose:\n print(\"prediction done\")\n return predicted_segmentation, class_probabilities\n\n def _internal_predict_2D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,\n mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,\n pad_border_mode: str = \"constant\", pad_kwargs: dict = None,\n verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n This one does fully convolutional inference. No sliding window\n \"\"\"\n assert len(x.shape) == 3, \"x must be (c, x, y)\"\n assert self.get_device() != \"cpu\"\n assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \\\n 'run _internal_predict_2D_2Dconv'\n if verbose:\n print(\"do mirror:\", do_mirroring)\n\n data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,\n self.input_shape_must_be_divisible_by)\n\n predicted_probabilities = self._internal_maybe_mirror_and_pred_2D(data[None], mirror_axes, do_mirroring,\n None)[0]\n\n slicer = tuple(\n [slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -\n (len(slicer) - 1))] + slicer[1:])\n predicted_probabilities = predicted_probabilities[slicer]\n\n if regions_class_order is None:\n predicted_segmentation = predicted_probabilities.argmax(0)\n predicted_segmentation = predicted_segmentation.detach().cpu().numpy()\n predicted_probabilities = predicted_probabilities.detach().cpu().numpy()\n else:\n predicted_probabilities = predicted_probabilities.detach().cpu().numpy()\n predicted_segmentation = np.zeros(\n predicted_probabilities.shape[1:], dtype=np.float32)\n for i, c in enumerate(regions_class_order):\n predicted_segmentation[predicted_probabilities[i] > 0.5] = c\n\n return predicted_segmentation, predicted_probabilities\n\n def _internal_predict_3D_3Dconv(self, x: np.ndarray, min_size: Tuple[int, ...], do_mirroring: bool,\n mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,\n pad_border_mode: str = \"constant\", pad_kwargs: dict = None,\n verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n This one does fully convolutional inference. No sliding window\n \"\"\"\n assert len(x.shape) == 4, \"x must be (c, x, y, z)\"\n assert self.get_device() != \"cpu\"\n assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \\\n 'run _internal_predict_3D_3Dconv'\n if verbose:\n print(\"do mirror:\", do_mirroring)\n\n data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,\n self.input_shape_must_be_divisible_by)\n\n predicted_probabilities = self._internal_maybe_mirror_and_pred_3D(data[None], mirror_axes, do_mirroring,\n None)[0]\n\n slicer = tuple(\n [slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -\n (len(slicer) - 1))] + slicer[1:])\n predicted_probabilities = predicted_probabilities[slicer]\n\n if regions_class_order is None:\n predicted_segmentation = predicted_probabilities.argmax(0)\n predicted_segmentation = predicted_segmentation.detach().cpu().numpy()\n predicted_probabilities = predicted_probabilities.detach().cpu().numpy()\n else:\n predicted_probabilities = predicted_probabilities.detach().cpu().numpy()\n predicted_segmentation = np.zeros(\n predicted_probabilities.shape[1:], dtype=np.float32)\n for i, c in enumerate(regions_class_order):\n predicted_segmentation[predicted_probabilities[i] > 0.5] = c\n\n return predicted_segmentation, predicted_probabilities\n\n def _internal_maybe_mirror_and_pred_3D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,\n do_mirroring: bool = True,\n mult: np.ndarray or torch.tensor = None) -> torch.tensor:\n assert len(x.shape) == 5, 'x must be (b, c, x, y, z)'\n # everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here\n # we now return a cuda tensor! Not numpy array!\n\n x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())\n result_torch = torch.zeros([1, self.num_classes] + list(x.shape[2:]),\n dtype=torch.float).cuda(self.get_device(), non_blocking=True)\n\n if mult is not None:\n mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())\n\n if do_mirroring:\n mirror_idx = 8\n num_results = 2 ** len(mirror_axes)\n else:\n mirror_idx = 1\n num_results = 1\n\n for m in range(mirror_idx):\n if m == 0:\n pred = self.inference_apply_nonlin(self(x)) # self(x) - forward\n result_torch += 1 / num_results * pred\n\n if m == 1 and (2 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (4, ))))\n result_torch += 1 / num_results * torch.flip(pred, (4,))\n\n if m == 2 and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))\n result_torch += 1 / num_results * torch.flip(pred, (3,))\n\n if m == 3 and (2 in mirror_axes) and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 3))))\n result_torch += 1 / num_results * torch.flip(pred, (4, 3))\n\n if m == 4 and (0 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))\n result_torch += 1 / num_results * torch.flip(pred, (2,))\n\n if m == 5 and (0 in mirror_axes) and (2 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 2))))\n result_torch += 1 / num_results * torch.flip(pred, (4, 2))\n\n if m == 6 and (0 in mirror_axes) and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))\n result_torch += 1 / num_results * torch.flip(pred, (3, 2))\n\n if m == 7 and (0 in mirror_axes) and (1 in mirror_axes) and (2 in mirror_axes):\n pred = self.inference_apply_nonlin(\n self(torch.flip(x, (4, 3, 2))))\n result_torch += 1 / num_results * torch.flip(pred, (4, 3, 2))\n\n if mult is not None:\n result_torch[:, :] *= mult\n\n return result_torch\n\n def _internal_maybe_mirror_and_pred_2D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,\n do_mirroring: bool = True,\n mult: np.ndarray or torch.tensor = None) -> torch.tensor:\n # everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here\n # we now return a cuda tensor! Not numpy array!\n assert len(x.shape) == 4, 'x must be (b, c, x, y)'\n\n x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())\n result_torch = torch.zeros([x.shape[0], self.num_classes] + list(x.shape[2:]),\n dtype=torch.float).cuda(self.get_device(), non_blocking=True)\n\n if mult is not None:\n mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())\n\n if do_mirroring:\n mirror_idx = 4\n num_results = 2 ** len(mirror_axes)\n else:\n mirror_idx = 1\n num_results = 1\n\n for m in range(mirror_idx):\n if m == 0:\n pred = self.inference_apply_nonlin(self(x))\n result_torch += 1 / num_results * pred\n\n if m == 1 and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))\n result_torch += 1 / num_results * torch.flip(pred, (3, ))\n\n if m == 2 and (0 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))\n result_torch += 1 / num_results * torch.flip(pred, (2, ))\n\n if m == 3 and (0 in mirror_axes) and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))\n result_torch += 1 / num_results * torch.flip(pred, (3, 2))\n\n if mult is not None:\n result_torch[:, :] *= mult\n\n return result_torch\n\n def _internal_predict_2D_2Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,\n patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,\n pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,\n verbose: bool) -> Tuple[np.ndarray, np.ndarray]:\n # better safe than sorry\n assert len(x.shape) == 3, \"x must be (c, x, y)\"\n assert self.get_device() != \"cpu\"\n if verbose:\n print(\"step_size:\", step_size)\n if verbose:\n print(\"do mirror:\", do_mirroring)\n\n assert patch_size is not None, \"patch_size cannot be None for tiled prediction\"\n\n # for sliding window inference the image must at least be as large as the patch size. It does not matter\n # whether the shape is divisible by 2**num_pool as long as the patch size is\n data, slicer = pad_nd_image(\n x, patch_size, pad_border_mode, pad_kwargs, True, None)\n data_shape = data.shape # still c, x, y\n\n # compute the steps for sliding window\n steps = self._compute_steps_for_sliding_window(\n patch_size, data_shape[1:], step_size)\n num_tiles = len(steps[0]) * len(steps[1])\n\n if verbose:\n print(\"data shape:\", data_shape)\n print(\"patch size:\", patch_size)\n print(\"steps (x, y, and z):\", steps)\n print(\"number of tiles:\", num_tiles)\n\n # we only need to compute that once. It can take a while to compute this due to the large sigma in\n # gaussian_filter\n if use_gaussian and num_tiles > 1:\n if self._gaussian_2d is None or not all(\n [i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_2d)]):\n if verbose:\n print('computing Gaussian')\n gaussian_importance_map = self._get_gaussian(\n patch_size, sigma_scale=1. / 8)\n\n self._gaussian_2d = gaussian_importance_map\n self._patch_size_for_gaussian_2d = patch_size\n else:\n if verbose:\n print(\"using precomputed Gaussian\")\n gaussian_importance_map = self._gaussian_2d\n\n gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),\n non_blocking=True)\n else:\n gaussian_importance_map = None\n\n if all_in_gpu:\n # If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces\n # CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU\n\n if use_gaussian and num_tiles > 1:\n # half precision for the outputs should be good enough. If the outputs here are half, the\n # gaussian_importance_map should be as well\n gaussian_importance_map = gaussian_importance_map.half()\n\n # make sure we did not round anything to 0\n gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[\n gaussian_importance_map != 0].min()\n\n add_for_nb_of_preds = gaussian_importance_map\n else:\n add_for_nb_of_preds = torch.ones(\n data.shape[1:], device=self.get_device())\n\n if verbose:\n print(\"initializing result array (on GPU)\")\n aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,\n device=self.get_device())\n\n if verbose:\n print(\"moving data to GPU\")\n data = torch.from_numpy(data).cuda(\n self.get_device(), non_blocking=True)\n\n if verbose:\n print(\"initializing result_numsamples (on GPU)\")\n aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,\n device=self.get_device())\n else:\n if use_gaussian and num_tiles > 1:\n add_for_nb_of_preds = self._gaussian_2d\n else:\n add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)\n aggregated_results = np.zeros(\n [self.num_classes] + list(data.shape[1:]), dtype=np.float32)\n aggregated_nb_of_predictions = np.zeros(\n [self.num_classes] + list(data.shape[1:]), dtype=np.float32)\n\n for x in steps[0]:\n lb_x = x\n ub_x = x + patch_size[0]\n for y in steps[1]:\n lb_y = y\n ub_y = y + patch_size[1]\n\n predicted_patch = self._internal_maybe_mirror_and_pred_2D(\n data[None, :, lb_x:ub_x, lb_y:ub_y], mirror_axes, do_mirroring,\n gaussian_importance_map)[0]\n\n if all_in_gpu:\n predicted_patch = predicted_patch.half()\n else:\n predicted_patch = predicted_patch.cpu().numpy()\n\n aggregated_results[:, lb_x:ub_x, lb_y:ub_y] += predicted_patch\n aggregated_nb_of_predictions[:, lb_x:ub_x,\n lb_y:ub_y] += add_for_nb_of_preds\n\n # we reverse the padding here (remeber that we padded the input to be at least as large as the patch size\n slicer = tuple(\n [slice(0, aggregated_results.shape[i]) for i in\n range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])\n aggregated_results = aggregated_results[slicer]\n aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]\n\n # computing the class_probabilities by dividing the aggregated result with result_numsamples\n class_probabilities = aggregated_results / aggregated_nb_of_predictions\n\n if regions_class_order is None:\n predicted_segmentation = class_probabilities.argmax(0)\n else:\n if all_in_gpu:\n class_probabilities_here = class_probabilities.detach().cpu().numpy()\n else:\n class_probabilities_here = class_probabilities\n predicted_segmentation = np.zeros(\n class_probabilities_here.shape[1:], dtype=np.float32)\n for i, c in enumerate(regions_class_order):\n predicted_segmentation[class_probabilities_here[i] > 0.5] = c\n\n if all_in_gpu:\n if verbose:\n print(\"copying results to CPU\")\n\n if regions_class_order is None:\n predicted_segmentation = predicted_segmentation.detach().cpu().numpy()\n\n class_probabilities = class_probabilities.detach().cpu().numpy()\n\n if verbose:\n print(\"prediction done\")\n return predicted_segmentation, class_probabilities\n\n def _internal_predict_3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,\n mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,\n pad_border_mode: str = \"constant\", pad_kwargs: dict = None,\n all_in_gpu: bool = False, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n if all_in_gpu:\n raise NotImplementedError\n assert len(x.shape) == 4, \"data must be c, x, y, z\"\n predicted_segmentation = []\n softmax_pred = []\n for s in range(x.shape[1]):\n pred_seg, softmax_pres = self._internal_predict_2D_2Dconv(\n x[:, s], min_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose)\n predicted_segmentation.append(pred_seg[None])\n softmax_pred.append(softmax_pres[None])\n predicted_segmentation = np.vstack(predicted_segmentation)\n softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))\n return predicted_segmentation, softmax_pred\n\n def predict_3D_pseudo3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,\n mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,\n pseudo3D_slices: int = 5, all_in_gpu: bool = False,\n pad_border_mode: str = \"constant\", pad_kwargs: dict = None,\n verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n if all_in_gpu:\n raise NotImplementedError\n assert len(x.shape) == 4, \"data must be c, x, y, z\"\n assert pseudo3D_slices % 2 == 1, \"pseudo3D_slices must be odd\"\n extra_slices = (pseudo3D_slices - 1) // 2\n\n shp_for_pad = np.array(x.shape)\n shp_for_pad[1] = extra_slices\n\n pad = np.zeros(shp_for_pad, dtype=np.float32)\n data = np.concatenate((pad, x, pad), 1)\n\n predicted_segmentation = []\n softmax_pred = []\n for s in range(extra_slices, data.shape[1] - extra_slices):\n d = data[:, (s - extra_slices):(s + extra_slices + 1)]\n d = d.reshape((-1, d.shape[-2], d.shape[-1]))\n pred_seg, softmax_pres = \\\n self._internal_predict_2D_2Dconv(d, min_size, do_mirroring, mirror_axes,\n regions_class_order, pad_border_mode, pad_kwargs, verbose)\n predicted_segmentation.append(pred_seg[None])\n softmax_pred.append(softmax_pres[None])\n predicted_segmentation = np.vstack(predicted_segmentation)\n softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))\n\n return predicted_segmentation, softmax_pred\n\n def _internal_predict_3D_2Dconv_tiled(self, x: np.ndarray, patch_size: Tuple[int, int], do_mirroring: bool,\n mirror_axes: tuple = (0, 1), step_size: float = 0.5,\n regions_class_order: tuple = None, use_gaussian: bool = False,\n pad_border_mode: str = \"edge\", pad_kwargs: dict = None,\n all_in_gpu: bool = False,\n verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n if all_in_gpu:\n raise NotImplementedError\n\n assert len(x.shape) == 4, \"data must be c, x, y, z\"\n\n predicted_segmentation = []\n softmax_pred = []\n\n for s in range(x.shape[1]):\n pred_seg, softmax_pres = self._internal_predict_2D_2Dconv_tiled(\n x[:, s], step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian,\n pad_border_mode, pad_kwargs, all_in_gpu, verbose)\n\n predicted_segmentation.append(pred_seg[None])\n softmax_pred.append(softmax_pres[None])\n\n predicted_segmentation = np.vstack(predicted_segmentation)\n softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))\n\n return predicted_segmentation, softmax_pred" }, { "identifier": "Transformer", "path": "nn_transunet/networks/vit_modeling.py", "snippet": "class Transformer(nn.Module):\n def __init__(self, config, feat_size, vis, feat_channels, use_layer_scale):\n super(Transformer, self).__init__()\n self.embeddings = Embeddings(config, feat_size=feat_size, in_channels=feat_channels)\n self.encoder = Encoder(config, vis, use_layer_scale)\n\n def forward(self, input_ids):\n embedding_output, features = self.embeddings(input_ids)\n encoded, attn_weights = self.encoder(embedding_output) # (B, n_patch, hidden)\n\n B, n_patch, hidden = encoded.size() # reshape from (B, n_patch, hidden) to (B, h, w, hidden)\n h, w, d = input_ids.shape[2:]\n x = encoded.permute(0, 2, 1)\n encoded = x.contiguous().view(B, hidden, h, w, d)\n # encoded = self.conv_more(x)\n return encoded, attn_weights\n\n def load_from(self, weights):\n with torch.no_grad():\n\n # self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights[\"embedding/kernel\"], conv=True))\n # self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights[\"embedding/bias\"]))\n\n self.encoder.encoder_norm.weight.copy_(np2th(weights[\"Transformer/encoder_norm/scale\"]))\n self.encoder.encoder_norm.bias.copy_(np2th(weights[\"Transformer/encoder_norm/bias\"]))\n\n posemb = np2th(weights[\"Transformer/posembed_input/pos_embedding\"])\n\n posemb_new = self.embeddings.position_embeddings\n if posemb.size() == posemb_new.size():\n self.embeddings.position_embeddings.copy_(posemb)\n elif posemb.size()[1]-1 == posemb_new.size()[1]:\n posemb = posemb[:, 1:]\n self.embeddings.position_embeddings.copy_(posemb)\n else:\n print(\"load_pretrained: resized variant: %s to %s\" % (posemb.size(), posemb_new.size()))\n ntok_new = posemb_new.size(1)\n print(\"ntok_new\", ntok_new)\n _, posemb_grid = posemb[:, :1], posemb[0, 1:] # if 'seg'\n posemb_up = nn.Upsample(size=ntok_new)(posemb_grid.permute(1,0).unsqueeze(0))\n posemb_up = posemb_up.permute(0,2,1) # (1, ntok_new, d)\n self.embeddings.position_embeddings.copy_(posemb_up)\n\n # Encoder whole\n for bname, block in self.encoder.named_children():\n for uname, unit in block.named_children():\n unit.load_from(weights, n_block=uname)" }, { "identifier": "CONFIGS", "path": "nn_transunet/networks/vit_modeling.py", "snippet": "CONFIGS = {\n 'ViT-B_16': configs.get_b16_config(),\n 'ViT-B_32': configs.get_b32_config(),\n 'ViT-L_16': configs.get_l16_config(),\n 'ViT-L_32': configs.get_l32_config(),\n 'ViT-H_14': configs.get_h14_config(),\n 'R50-ViT-B_16': configs.get_r50_b16_config(),\n 'R50-ViT-L_16': configs.get_r50_l16_config(),\n 'testing': configs.get_testing(),\n}" } ]
import torch import numpy as np import torch.nn.functional import torch.nn.functional as F from copy import deepcopy from torch import nn from torch.cuda.amp import autocast from scipy.optimize import linear_sum_assignment from ..networks.neural_network import SegmentationNetwork from .vit_modeling import Transformer from .vit_modeling import CONFIGS as CONFIGS_ViT from .mask2former_modeling.transformer_decoder.mask2former_transformer_decoder3d import MultiScaleMaskedTransformerDecoder3d from .mask2former_modeling.transformer_decoder.maskformer_transformer_decoder3d import StandardTransformerDecoder
12,846
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs) self.lrelu = self.nonlin(**self.nonlin_kwargs) def forward(self, x): x = self.conv(x) if self.dropout is not None: x = self.dropout(x) return self.lrelu(self.instnorm(x)) class ConvDropoutNonlinNorm(ConvDropoutNormNonlin): def forward(self, x): x = self.conv(x) if self.dropout is not None: x = self.dropout(x) return self.instnorm(self.lrelu(x)) class StackedConvLayers(nn.Module): def __init__(self, input_feature_channels, output_feature_channels, num_convs, conv_op=nn.Conv2d, conv_kwargs=None, norm_op=nn.BatchNorm2d, norm_op_kwargs=None, dropout_op=nn.Dropout2d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None, basic_block=ConvDropoutNormNonlin): ''' stacks ConvDropoutNormLReLU layers. initial_stride will only be applied to first layer in the stack. The other parameters affect all layers :param input_feature_channels: :param output_feature_channels: :param num_convs: :param dilation: :param kernel_size: :param padding: :param dropout: :param initial_stride: :param conv_op: :param norm_op: :param dropout_op: :param inplace: :param neg_slope: :param norm_affine: :param conv_bias: ''' self.input_channels = input_feature_channels self.output_channels = output_feature_channels if nonlin_kwargs is None: nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} if dropout_op_kwargs is None: dropout_op_kwargs = {'p': 0.5, 'inplace': True} if norm_op_kwargs is None: norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1} if conv_kwargs is None: conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True} self.nonlin_kwargs = nonlin_kwargs self.nonlin = nonlin self.dropout_op = dropout_op self.dropout_op_kwargs = dropout_op_kwargs self.norm_op_kwargs = norm_op_kwargs self.conv_kwargs = conv_kwargs self.conv_op = conv_op self.norm_op = norm_op if first_stride is not None: self.conv_kwargs_first_conv = deepcopy(conv_kwargs) self.conv_kwargs_first_conv['stride'] = first_stride else: self.conv_kwargs_first_conv = conv_kwargs super(StackedConvLayers, self).__init__() self.blocks = nn.Sequential( *([basic_block(input_feature_channels, output_feature_channels, self.conv_op, self.conv_kwargs_first_conv, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs)] + [basic_block(output_feature_channels, output_feature_channels, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)])) def forward(self, x): return self.blocks(x) def print_module_training_status(module): if isinstance(module, nn.Conv2d) or isinstance(module, nn.Conv3d) or isinstance(module, nn.Dropout3d) or \ isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout) or isinstance(module, nn.InstanceNorm3d) \ or isinstance(module, nn.InstanceNorm2d) or isinstance(module, nn.InstanceNorm1d) \ or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or isinstance(module, nn.BatchNorm1d): print(str(module), module.training) class Upsample(nn.Module): def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=False): super(Upsample, self).__init__() self.align_corners = align_corners self.mode = mode self.scale_factor = scale_factor self.size = size def forward(self, x): return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) def c2_xavier_fill(module: nn.Module) -> None: """ Initialize `module.weight` using the "XavierFill" implemented in Caffe2. Also initializes `module.bias` to 0. Args: module (torch.nn.Module): module to initialize. """ # Caffe2 implementation of XavierFill in fact # corresponds to kaiming_uniform_ in PyTorch nn.init.kaiming_uniform_(module.weight, a=1) if module.bias is not None: # pyre-fixme[6]: Expected `Tensor` for 1st param but got `Union[nn.Module, # torch.Tensor]`. nn.init.constant_(module.bias, 0)
# 3D version of TransUNet; Copyright Johns Hopkins University # Modified from nnUNet softmax_helper = lambda x: F.softmax(x, 1) class InitWeights_He(object): def __init__(self, neg_slope=1e-2): self.neg_slope = neg_slope def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class ConvDropoutNormNonlin(nn.Module): """ fixes a bug in ConvDropoutNormNonlin where lrelu was used regardless of nonlin. Bad. """ def __init__(self, input_channels, output_channels, conv_op=nn.Conv2d, conv_kwargs=None, norm_op=nn.BatchNorm2d, norm_op_kwargs=None, dropout_op=nn.Dropout2d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None): super(ConvDropoutNormNonlin, self).__init__() if nonlin_kwargs is None: nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} if dropout_op_kwargs is None: dropout_op_kwargs = {'p': 0.5, 'inplace': True} if norm_op_kwargs is None: norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1} if conv_kwargs is None: conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True} self.nonlin_kwargs = nonlin_kwargs self.nonlin = nonlin self.dropout_op = dropout_op self.dropout_op_kwargs = dropout_op_kwargs self.norm_op_kwargs = norm_op_kwargs self.conv_kwargs = conv_kwargs self.conv_op = conv_op self.norm_op = norm_op self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs) if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[ 'p'] > 0: self.dropout = self.dropout_op(**self.dropout_op_kwargs) else: self.dropout = None self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs) self.lrelu = self.nonlin(**self.nonlin_kwargs) def forward(self, x): x = self.conv(x) if self.dropout is not None: x = self.dropout(x) return self.lrelu(self.instnorm(x)) class ConvDropoutNonlinNorm(ConvDropoutNormNonlin): def forward(self, x): x = self.conv(x) if self.dropout is not None: x = self.dropout(x) return self.instnorm(self.lrelu(x)) class StackedConvLayers(nn.Module): def __init__(self, input_feature_channels, output_feature_channels, num_convs, conv_op=nn.Conv2d, conv_kwargs=None, norm_op=nn.BatchNorm2d, norm_op_kwargs=None, dropout_op=nn.Dropout2d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None, basic_block=ConvDropoutNormNonlin): ''' stacks ConvDropoutNormLReLU layers. initial_stride will only be applied to first layer in the stack. The other parameters affect all layers :param input_feature_channels: :param output_feature_channels: :param num_convs: :param dilation: :param kernel_size: :param padding: :param dropout: :param initial_stride: :param conv_op: :param norm_op: :param dropout_op: :param inplace: :param neg_slope: :param norm_affine: :param conv_bias: ''' self.input_channels = input_feature_channels self.output_channels = output_feature_channels if nonlin_kwargs is None: nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} if dropout_op_kwargs is None: dropout_op_kwargs = {'p': 0.5, 'inplace': True} if norm_op_kwargs is None: norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1} if conv_kwargs is None: conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True} self.nonlin_kwargs = nonlin_kwargs self.nonlin = nonlin self.dropout_op = dropout_op self.dropout_op_kwargs = dropout_op_kwargs self.norm_op_kwargs = norm_op_kwargs self.conv_kwargs = conv_kwargs self.conv_op = conv_op self.norm_op = norm_op if first_stride is not None: self.conv_kwargs_first_conv = deepcopy(conv_kwargs) self.conv_kwargs_first_conv['stride'] = first_stride else: self.conv_kwargs_first_conv = conv_kwargs super(StackedConvLayers, self).__init__() self.blocks = nn.Sequential( *([basic_block(input_feature_channels, output_feature_channels, self.conv_op, self.conv_kwargs_first_conv, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs)] + [basic_block(output_feature_channels, output_feature_channels, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)])) def forward(self, x): return self.blocks(x) def print_module_training_status(module): if isinstance(module, nn.Conv2d) or isinstance(module, nn.Conv3d) or isinstance(module, nn.Dropout3d) or \ isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout) or isinstance(module, nn.InstanceNorm3d) \ or isinstance(module, nn.InstanceNorm2d) or isinstance(module, nn.InstanceNorm1d) \ or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or isinstance(module, nn.BatchNorm1d): print(str(module), module.training) class Upsample(nn.Module): def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=False): super(Upsample, self).__init__() self.align_corners = align_corners self.mode = mode self.scale_factor = scale_factor self.size = size def forward(self, x): return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) def c2_xavier_fill(module: nn.Module) -> None: """ Initialize `module.weight` using the "XavierFill" implemented in Caffe2. Also initializes `module.bias` to 0. Args: module (torch.nn.Module): module to initialize. """ # Caffe2 implementation of XavierFill in fact # corresponds to kaiming_uniform_ in PyTorch nn.init.kaiming_uniform_(module.weight, a=1) if module.bias is not None: # pyre-fixme[6]: Expected `Tensor` for 1st param but got `Union[nn.Module, # torch.Tensor]`. nn.init.constant_(module.bias, 0)
class Generic_TransUNet_max_ppbp(SegmentationNetwork):
0
2023-10-11 05:19:25+00:00
16k
AMAAI-Lab/Video2Music
train.py
[ { "identifier": "compute_vevo_accuracy", "path": "dataset/vevo_dataset.py", "snippet": "def compute_vevo_accuracy(out, tgt):\n softmax = nn.Softmax(dim=-1)\n out = torch.argmax(softmax(out), dim=-1)\n\n out = out.flatten()\n tgt = tgt.flatten()\n\n mask = (tgt != CHORD_PAD)\n\n out = out[mask]\n tgt = tgt[mask]\n\n if(len(tgt) == 0):\n return 1.0\n\n num_right = (out == tgt)\n num_right = torch.sum(num_right).type(TORCH_FLOAT)\n\n acc = num_right / len(tgt)\n\n return acc" }, { "identifier": "create_vevo_datasets", "path": "dataset/vevo_dataset.py", "snippet": "def create_vevo_datasets(dataset_root = \"./dataset\", max_seq_chord=300, max_seq_video=300, vis_models=\"2d/clip_l14p\", emo_model=\"6c_l14p\", split_ver=\"v1\", random_seq=True, is_video=True):\n\n train_dataset = VevoDataset(\n dataset_root = dataset_root, split=\"train\", split_ver=split_ver, \n vis_models=vis_models, emo_model =emo_model, max_seq_chord=max_seq_chord, max_seq_video=max_seq_video, \n random_seq=random_seq, is_video = is_video )\n \n val_dataset = VevoDataset(\n dataset_root = dataset_root, split=\"val\", split_ver=split_ver, \n vis_models=vis_models, emo_model =emo_model, max_seq_chord=max_seq_chord, max_seq_video=max_seq_video, \n random_seq=random_seq, is_video = is_video )\n \n test_dataset = VevoDataset(\n dataset_root = dataset_root, split=\"test\", split_ver=split_ver, \n vis_models=vis_models, emo_model =emo_model, max_seq_chord=max_seq_chord, max_seq_video=max_seq_video, \n random_seq=random_seq, is_video = is_video )\n \n return train_dataset, val_dataset, test_dataset" }, { "identifier": "MusicTransformer", "path": "model/music_transformer.py", "snippet": "class MusicTransformer(nn.Module):\n def __init__(self, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence_midi=2048, max_sequence_chord=300, rpr=False):\n super(MusicTransformer, self).__init__()\n\n self.dummy = DummyDecoder()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq_midi = max_sequence_midi\n self.max_seq_chord = max_sequence_chord\n self.rpr = rpr\n\n # Input embedding for video and music features\n self.embedding = nn.Embedding(CHORD_SIZE, self.d_model)\n\n # self.embedding_key = nn.Embedding(1, self.d_model)\n self.embedding_root = nn.Embedding(CHORD_ROOT_SIZE, self.d_model)\n self.embedding_attr = nn.Embedding(CHORD_ATTR_SIZE, self.d_model)\n\n self.positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq_chord)\n self.Linear_chord = nn.Linear(self.d_model+1, self.d_model)\n\n # Base transformer\n if(not self.rpr):\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy\n )\n # RPR Transformer\n else:\n encoder_norm = LayerNorm(self.d_model)\n encoder_layer = TransformerEncoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout, er_len=self.max_seq_chord)\n\n encoder = TransformerEncoderRPR(encoder_layer, self.nlayers, encoder_norm)\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy, custom_encoder=encoder\n )\n # Final output is a softmaxed linear layer\n self.Wout = nn.Linear(self.d_model, CHORD_SIZE)\n self.Wout_root = nn.Linear(self.d_model, CHORD_ROOT_SIZE)\n self.Wout_attr = nn.Linear(self.d_model, CHORD_ATTR_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n\n # forward\n def forward(self, x, x_root, x_attr, feature_key, mask=True):\n if(mask is True):\n mask = self.transformer.generate_square_subsequent_mask(x.shape[1]).to(get_device())\n else:\n mask = None\n\n ### Chord + Key (DECODER) ###\n # x = self.embedding(x)\n \n x_root = self.embedding_root(x_root)\n x_attr = self.embedding_attr(x_attr)\n x = x_root + x_attr\n\n feature_key_padded = torch.full((x.shape[0], x.shape[1], 1), feature_key.item())\n feature_key_padded = feature_key_padded.to(get_device())\n x = torch.cat([x, feature_key_padded], dim=-1)\n xf = self.Linear_chord(x)\n\n ### POSITIONAL ENCODING ###\n xf = xf.permute(1,0,2) # -> (max_seq-1, batch_size, d_model)\n xf = self.positional_encoding(xf)\n \n ### TRANSFORMER ###\n x_out = self.transformer(src=xf, tgt=xf, tgt_mask=mask)\n x_out = x_out.permute(1,0,2)\n \n if IS_SEPERATED:\n y_root = self.Wout_root(x_out)\n y_attr = self.Wout_attr(x_out)\n del mask\n return y_root, y_attr\n else:\n y = self.Wout(x_out)\n del mask\n return y\n\n # generate\n def generate(self, feature_key=None, primer=None, primer_root=None, primer_attr=None, target_seq_length=300, beam=0, beam_chance=1.0):\n assert (not self.training), \"Cannot generate while in training mode\"\n\n with open('dataset/vevo_meta/chord_inv.json') as json_file:\n chordInvDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_root.json') as json_file:\n chordRootDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_attr.json') as json_file:\n chordAttrDic = json.load(json_file)\n\n print(\"Generating sequence of max length:\", target_seq_length)\n gen_seq = torch.full((1,target_seq_length), CHORD_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_root = torch.full((1,target_seq_length), CHORD_ROOT_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_attr = torch.full((1,target_seq_length), CHORD_ATTR_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n \n num_primer = len(primer)\n\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n gen_seq_root[..., :num_primer] = primer_root.type(TORCH_LABEL_TYPE).to(get_device())\n \n gen_seq_attr[..., :num_primer] = primer_attr.type(TORCH_LABEL_TYPE).to(get_device())\n\n cur_i = num_primer\n while(cur_i < target_seq_length):\n # gen_seq_batch = gen_seq.clone()\n # y = self.softmax(self.forward(gen_seq[..., :cur_i]))[..., :CHORD_END]\n y = self.softmax( self.forward( gen_seq[..., :cur_i], gen_seq_root[..., :cur_i], gen_seq_attr[..., :cur_i], feature_key) )[..., :CHORD_END]\n \n token_probs = y[:, cur_i-1, :]\n if(beam == 0):\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0,1)\n if(beam_ran <= beam_chance):\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n beam_rows = top_i // CHORD_SIZE\n beam_cols = top_i % CHORD_SIZE\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n else:\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n #print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n gen_chord = chordInvDic[ str( next_token.item() ) ]\n \n chord_arr = gen_chord.split(\":\")\n if len(chord_arr) == 1:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = 1\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n elif len(chord_arr) == 2:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = chordAttrDic[chord_arr[1]]\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n \n # Let the transformer decide to end if it wants to\n if(next_token == CHORD_END):\n print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n break\n \n cur_i += 1\n if(cur_i % 50 == 0):\n print(cur_i, \"/\", target_seq_length)\n return gen_seq[:, :cur_i]" }, { "identifier": "VideoMusicTransformer", "path": "model/video_music_transformer.py", "snippet": "class VideoMusicTransformer(nn.Module):\n def __init__(self, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence_midi =2048, max_sequence_video=300, max_sequence_chord=300, total_vf_dim = 0, rpr=False):\n super(VideoMusicTransformer, self).__init__()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq_midi = max_sequence_midi\n self.max_seq_video = max_sequence_video\n self.max_seq_chord = max_sequence_chord\n self.rpr = rpr\n\n # Input embedding for video and music features\n self.embedding = nn.Embedding(CHORD_SIZE, self.d_model)\n self.embedding_root = nn.Embedding(CHORD_ROOT_SIZE, self.d_model)\n self.embedding_attr = nn.Embedding(CHORD_ATTR_SIZE, self.d_model)\n \n self.total_vf_dim = total_vf_dim\n self.Linear_vis = nn.Linear(self.total_vf_dim, self.d_model)\n self.Linear_chord = nn.Linear(self.d_model+1, self.d_model)\n \n # Positional encoding\n self.positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq_chord)\n self.positional_encoding_video = PositionalEncoding(self.d_model, self.dropout, self.max_seq_video)\n\n # Add condition (minor or major)\n self.condition_linear = nn.Linear(1, self.d_model)\n \n # Base transformer\n if(not self.rpr):\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=self.nlayers, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff\n )\n # RPR Transformer\n else:\n decoder_norm = LayerNorm(self.d_model)\n decoder_layer = TransformerDecoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout, er_len=self.max_seq_chord)\n decoder = TransformerDecoderRPR(decoder_layer, self.nlayers, decoder_norm)\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=self.nlayers, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=decoder\n ) \n \n self.Wout = nn.Linear(self.d_model, CHORD_SIZE)\n self.Wout_root = nn.Linear(self.d_model, CHORD_ROOT_SIZE)\n self.Wout_attr = nn.Linear(self.d_model, CHORD_ATTR_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n \n def forward(self, x, x_root, x_attr, feature_semantic_list, feature_key, feature_scene_offset, feature_motion, feature_emotion, mask=True):\n if(mask is True):\n mask = self.transformer.generate_square_subsequent_mask(x.shape[1]).to(get_device())\n else:\n mask = None\n \n x_root = self.embedding_root(x_root)\n x_attr = self.embedding_attr(x_attr)\n x = x_root + x_attr\n\n feature_key_padded = torch.full((x.shape[0], x.shape[1], 1), feature_key.item())\n feature_key_padded = feature_key_padded.to(get_device())\n x = torch.cat([x, feature_key_padded], dim=-1)\n\n xf = self.Linear_chord(x)\n\n ### Video (SemanticList + SceneOffset + Motion + Emotion) (ENCODER) ###\n vf_concat = feature_semantic_list[0].float()\n\n for i in range(1, len(feature_semantic_list)):\n vf_concat = torch.cat( (vf_concat, feature_semantic_list[i].float()), dim=2) \n \n vf_concat = torch.cat([vf_concat, feature_scene_offset.unsqueeze(-1).float()], dim=-1) # -> (max_seq_video, batch_size, d_model+1)\n vf_concat = torch.cat([vf_concat, feature_motion.unsqueeze(-1).float()], dim=-1) # -> (max_seq_video, batch_size, d_model+1)\n vf_concat = torch.cat([vf_concat, feature_emotion.float()], dim=-1) # -> (max_seq_video, batch_size, d_model+1)\n vf = self.Linear_vis(vf_concat)\n \n ### POSITIONAL ENCODING ###\n xf = xf.permute(1,0,2) # -> (max_seq-1, batch_size, d_model)\n vf = vf.permute(1,0,2) # -> (max_seq_video, batch_size, d_model)\n xf = self.positional_encoding(xf)\n vf = self.positional_encoding_video(vf)\n\n ### TRANSFORMER ###\n x_out = self.transformer(src=vf, tgt=xf, tgt_mask=mask)\n x_out = x_out.permute(1,0,2)\n\n if IS_SEPERATED:\n y_root = self.Wout_root(x_out)\n y_attr = self.Wout_attr(x_out)\n del mask\n return y_root, y_attr\n else:\n y = self.Wout(x_out)\n del mask\n return y\n \n def generate(self, feature_semantic_list = [], feature_key=None, feature_scene_offset=None, feature_motion=None, feature_emotion=None,\n primer=None, primer_root=None, primer_attr=None, target_seq_length=300, beam=0, beam_chance=1.0, max_conseq_N = 0, max_conseq_chord = 2):\n \n assert (not self.training), \"Cannot generate while in training mode\"\n print(\"Generating sequence of max length:\", target_seq_length)\n\n with open('dataset/vevo_meta/chord_inv.json') as json_file:\n chordInvDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_root.json') as json_file:\n chordRootDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_attr.json') as json_file:\n chordAttrDic = json.load(json_file)\n\n gen_seq = torch.full((1,target_seq_length), CHORD_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_root = torch.full((1,target_seq_length), CHORD_ROOT_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_attr = torch.full((1,target_seq_length), CHORD_ATTR_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n \n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n gen_seq_root[..., :num_primer] = primer_root.type(TORCH_LABEL_TYPE).to(get_device())\n gen_seq_attr[..., :num_primer] = primer_attr.type(TORCH_LABEL_TYPE).to(get_device())\n\n cur_i = num_primer\n while(cur_i < target_seq_length):\n y = self.softmax( self.forward( gen_seq[..., :cur_i], gen_seq_root[..., :cur_i], gen_seq_attr[..., :cur_i], \n feature_semantic_list, feature_key, feature_scene_offset, feature_motion, feature_emotion) )[..., :CHORD_END]\n \n token_probs = y[:, cur_i-1, :]\n if(beam == 0):\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0,1)\n if(beam_ran <= beam_chance):\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n beam_rows = top_i // CHORD_SIZE\n beam_cols = top_i % CHORD_SIZE\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n else:\n # token_probs.shape : [1, 157] \n # 0: N, 1: C, ... , 156: B:maj7\n # 157 chordEnd 158 padding\n if max_conseq_N == 0:\n token_probs[0][0] = 0.0\n isMaxChord = True\n if cur_i >= max_conseq_chord :\n preChord = gen_seq[0][cur_i-1].item() \n for k in range (1, max_conseq_chord):\n if preChord != gen_seq[0][cur_i-1-k].item():\n isMaxChord = False\n else:\n isMaxChord = False\n \n if isMaxChord:\n preChord = gen_seq[0][cur_i-1].item()\n token_probs[0][preChord] = 0.0\n \n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n gen_seq[:, cur_i] = next_token\n gen_chord = chordInvDic[ str( next_token.item() ) ]\n \n chord_arr = gen_chord.split(\":\")\n if len(chord_arr) == 1:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = 1\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n elif len(chord_arr) == 2:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = chordAttrDic[chord_arr[1]]\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n \n # Let the transformer decide to end if it wants to\n if(next_token == CHORD_END):\n print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n break\n cur_i += 1\n if(cur_i % 50 == 0):\n print(cur_i, \"/\", target_seq_length)\n return gen_seq[:, :cur_i]" }, { "identifier": "SmoothCrossEntropyLoss", "path": "model/loss.py", "snippet": "class SmoothCrossEntropyLoss(_Loss):\n \"\"\"\n https://arxiv.org/abs/1512.00567\n \"\"\"\n __constants__ = ['label_smoothing', 'vocab_size', 'ignore_index', 'reduction']\n\n def __init__(self, label_smoothing, vocab_size, ignore_index=-100, reduction='mean', is_logits=True):\n assert 0.0 <= label_smoothing <= 1.0\n super().__init__(reduction=reduction)\n\n self.label_smoothing = label_smoothing\n self.vocab_size = vocab_size\n self.ignore_index = ignore_index\n self.input_is_logits = is_logits\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: [B * T, V]\n target: [B * T]\n Returns:\n cross entropy: [1]\n \"\"\"\n mask = (target == self.ignore_index).unsqueeze(-1)\n q = F.one_hot(target.long(), self.vocab_size).type(torch.float32)\n u = 1.0 / self.vocab_size\n q_prime = (1.0 - self.label_smoothing) * q + self.label_smoothing * u\n q_prime = q_prime.masked_fill(mask, 0)\n\n ce = self.cross_entropy_with_logits(q_prime, input)\n if self.reduction == 'mean':\n lengths = torch.sum(target != self.ignore_index)\n return ce.sum() / lengths\n elif self.reduction == 'sum':\n return ce.sum()\n else:\n raise NotImplementedError\n\n def cross_entropy_with_logits(self, p, q):\n return -torch.sum(p * (q - q.logsumexp(dim=-1, keepdim=True)), dim=-1)" }, { "identifier": "get_device", "path": "utilities/device.py", "snippet": "def get_device():\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Grabs the default device. Default device is CUDA if available and use_cuda is not False, CPU otherwise.\n ----------\n \"\"\"\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE" }, { "identifier": "use_cuda", "path": "utilities/device.py", "snippet": "def use_cuda(cuda_bool):\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Sets whether to use CUDA (if available), or use the CPU (not recommended)\n ----------\n \"\"\"\n\n global USE_CUDA\n USE_CUDA = cuda_bool" }, { "identifier": "LrStepTracker", "path": "utilities/lr_scheduling.py", "snippet": "class LrStepTracker:\n \"\"\"\n ----------\n Author: Ryan Marshall\n Modified: Damon Gwinn\n ----------\n Class for custom learn rate scheduler (to be used by torch.optim.lr_scheduler.LambdaLR).\n\n Learn rate for each step (batch) given the warmup steps is:\n lr = [ 1/sqrt(d_model) ] * min[ 1/sqrt(step) , step * (warmup_steps)^-1.5 ]\n\n This is from Attention is All you Need (https://arxiv.org/abs/1706.03762)\n ----------\n \"\"\"\n\n def __init__(self, model_dim=512, warmup_steps=4000, init_steps=0):\n # Store Values\n self.warmup_steps = warmup_steps\n self.model_dim = model_dim\n self.init_steps = init_steps\n\n # Begin Calculations\n self.invsqrt_dim = (1 / math.sqrt(model_dim))\n self.invsqrt_warmup = (1 / (warmup_steps * math.sqrt(warmup_steps)))\n\n # step\n def step(self, step):\n \"\"\"\n ----------\n Author: Ryan Marshall\n Modified: Damon Gwinn\n ----------\n Method to pass to LambdaLR. Increments the step and computes the new learn rate.\n ----------\n \"\"\"\n\n step += self.init_steps\n if(step <= self.warmup_steps):\n return self.invsqrt_dim * self.invsqrt_warmup * step\n else:\n invsqrt_step = (1 / math.sqrt(step))\n return self.invsqrt_dim * invsqrt_step" }, { "identifier": "get_lr", "path": "utilities/lr_scheduling.py", "snippet": "def get_lr(optimizer):\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Hack to get the current learn rate of the model\n ----------\n \"\"\"\n\n for param_group in optimizer.param_groups:\n return param_group['lr']" }, { "identifier": "parse_train_args", "path": "utilities/argument_funcs.py", "snippet": "def parse_train_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-dataset_dir\", type=str, default=\"./dataset/\", help=\"Folder of VEVO dataset\")\n \n parser.add_argument(\"-input_dir_music\", type=str, default=\"./dataset/vevo_chord/\" + MUSIC_TYPE, help=\"Folder of video CNN feature files\")\n parser.add_argument(\"-input_dir_video\", type=str, default=\"./dataset/vevo_vis\", help=\"Folder of video CNN feature files\")\n\n parser.add_argument(\"-output_dir\", type=str, default=\"./saved_models\", help=\"Folder to save model weights. Saves one every epoch\")\n \n parser.add_argument(\"-weight_modulus\", type=int, default=1, help=\"How often to save epoch weights (ex: value of 10 means save every 10 epochs)\")\n parser.add_argument(\"-print_modulus\", type=int, default=1, help=\"How often to print train results for a batch (batch loss, learn rate, etc.)\")\n parser.add_argument(\"-n_workers\", type=int, default=1, help=\"Number of threads for the dataloader\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"--no_tensorboard\", action=\"store_true\", help=\"Turns off tensorboard result reporting\")\n parser.add_argument(\"-continue_weights\", type=str, default=None, help=\"Model weights to continue training based on\")\n parser.add_argument(\"-continue_epoch\", type=int, default=None, help=\"Epoch the continue_weights model was at\")\n parser.add_argument(\"-lr\", type=float, default=None, help=\"Constant learn rate. Leave as None for a custom scheduler.\")\n parser.add_argument(\"-ce_smoothing\", type=float, default=None, help=\"Smoothing parameter for smoothed cross entropy loss (defaults to no smoothing)\")\n parser.add_argument(\"-batch_size\", type=int, default=1, help=\"Batch size to use\")\n parser.add_argument(\"-epochs\", type=int, default=5, help=\"Number of epochs to use\")\n\n parser.add_argument(\"-max_sequence_midi\", type=int, default=2048, help=\"Maximum midi sequence to consider\")\n parser.add_argument(\"-max_sequence_video\", type=int, default=300, help=\"Maximum video sequence to consider\")\n parser.add_argument(\"-max_sequence_chord\", type=int, default=300, help=\"Maximum video sequence to consider\")\n\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n parser.add_argument(\"-dropout\", type=float, default=0.1, help=\"Dropout rate\")\n\n parser.add_argument(\"-is_video\", type=bool, default=IS_VIDEO, help=\"MusicTransformer or VideoMusicTransformer\")\n\n if IS_VIDEO:\n parser.add_argument(\"-vis_models\", type=str, default=VIS_MODELS_SORTED, help=\"...\")\n else:\n parser.add_argument(\"-vis_models\", type=str, default=\"\", help=\"...\")\n\n parser.add_argument(\"-emo_model\", type=str, default=\"6c_l14p\", help=\"...\")\n parser.add_argument(\"-rpr\", type=bool, default=RPR, help=\"...\")\n return parser.parse_args()" }, { "identifier": "print_train_args", "path": "utilities/argument_funcs.py", "snippet": "def print_train_args(args):\n print(SEPERATOR)\n \n print(\"dataset_dir:\", args.dataset_dir )\n \n print(\"input_dir_music:\", args.input_dir_music)\n print(\"input_dir_video:\", args.input_dir_video)\n\n print(\"output_dir:\", args.output_dir)\n\n print(\"weight_modulus:\", args.weight_modulus)\n print(\"print_modulus:\", args.print_modulus)\n print(\"\")\n print(\"n_workers:\", args.n_workers)\n print(\"force_cpu:\", args.force_cpu)\n print(\"tensorboard:\", not args.no_tensorboard)\n print(\"\")\n print(\"continue_weights:\", args.continue_weights)\n print(\"continue_epoch:\", args.continue_epoch)\n print(\"\")\n print(\"lr:\", args.lr)\n print(\"ce_smoothing:\", args.ce_smoothing)\n print(\"batch_size:\", args.batch_size)\n print(\"epochs:\", args.epochs)\n print(\"\")\n print(\"rpr:\", args.rpr)\n\n print(\"max_sequence_midi:\", args.max_sequence_midi)\n print(\"max_sequence_video:\", args.max_sequence_video)\n print(\"max_sequence_chord:\", args.max_sequence_chord)\n \n print(\"n_layers:\", args.n_layers)\n print(\"num_heads:\", args.num_heads)\n print(\"d_model:\", args.d_model)\n print(\"\")\n print(\"dim_feedforward:\", args.dim_feedforward)\n print(\"dropout:\", args.dropout)\n print(\"is_video:\", args.is_video)\n\n print(SEPERATOR)\n print(\"\")" }, { "identifier": "write_model_params", "path": "utilities/argument_funcs.py", "snippet": "def write_model_params(args, output_file):\n o_stream = open(output_file, \"w\")\n\n o_stream.write(\"rpr: \" + str(args.rpr) + \"\\n\")\n o_stream.write(\"lr: \" + str(args.lr) + \"\\n\")\n o_stream.write(\"ce_smoothing: \" + str(args.ce_smoothing) + \"\\n\")\n o_stream.write(\"batch_size: \" + str(args.batch_size) + \"\\n\")\n\n o_stream.write(\"max_sequence_midi: \" + str(args.max_sequence_midi) + \"\\n\")\n o_stream.write(\"max_sequence_video: \" + str(args.max_sequence_video) + \"\\n\")\n o_stream.write(\"max_sequence_chord: \" + str(args.max_sequence_chord) + \"\\n\")\n \n o_stream.write(\"n_layers: \" + str(args.n_layers) + \"\\n\")\n o_stream.write(\"num_heads: \" + str(args.num_heads) + \"\\n\")\n o_stream.write(\"d_model: \" + str(args.d_model) + \"\\n\")\n o_stream.write(\"dim_feedforward: \" + str(args.dim_feedforward) + \"\\n\")\n o_stream.write(\"dropout: \" + str(args.dropout) + \"\\n\")\n\n o_stream.write(\"is_video: \" + str(args.is_video) + \"\\n\")\n o_stream.write(\"vis_models: \" + str(args.vis_models) + \"\\n\")\n o_stream.write(\"input_dir_music: \" + str(args.input_dir_music) + \"\\n\")\n o_stream.write(\"input_dir_video: \" + str(args.input_dir_video) + \"\\n\")\n\n o_stream.close()" }, { "identifier": "train_epoch", "path": "utilities/run_model_vevo.py", "snippet": "def train_epoch(cur_epoch, model, dataloader, \n train_loss_func, train_loss_emotion_func,\n opt, lr_scheduler=None, print_modulus=1, isVideo=True):\n \n loss_chord = -1\n loss_emotion = -1\n model.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n opt.zero_grad()\n\n x = batch[\"x\"].to(get_device())\n tgt = batch[\"tgt\"].to(get_device())\n x_root = batch[\"x_root\"].to(get_device())\n tgt_root = batch[\"tgt_root\"].to(get_device())\n x_attr = batch[\"x_attr\"].to(get_device())\n tgt_attr = batch[\"tgt_attr\"].to(get_device())\n tgt_emotion = batch[\"tgt_emotion\"].to(get_device())\n tgt_emotion_prob = batch[\"tgt_emotion_prob\"].to(get_device())\n \n feature_semantic_list = [] \n for feature_semantic in batch[\"semanticList\"]:\n feature_semantic_list.append( feature_semantic.to(get_device()) )\n\n feature_key = batch[\"key\"].to(get_device())\n feature_scene_offset = batch[\"scene_offset\"].to(get_device())\n feature_motion = batch[\"motion\"].to(get_device())\n feature_emotion = batch[\"emotion\"].to(get_device())\n\n if isVideo:\n # use VideoMusicTransformer\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = train_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = train_loss_func.forward(y_attr, tgt_attr)\n loss_chord = loss_chord_root + loss_chord_attr\n\n first_14 = tgt_emotion[:, :14]\n last_2 = tgt_emotion[:, -2:]\n tgt_emotion_attr = torch.cat((first_14, last_2), dim=1)\n\n loss_emotion = train_loss_emotion_func.forward(y_attr, tgt_emotion_attr)\n\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n total_loss.backward()\n opt.step()\n if(lr_scheduler is not None):\n lr_scheduler.step()\n \n else:\n #videomusic tran nosep\n y = model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n tgt_emotion = tgt_emotion.squeeze()\n loss_chord = train_loss_func.forward(y, tgt)\n loss_emotion = train_loss_emotion_func.forward(y, tgt_emotion)\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n total_loss.backward()\n opt.step()\n if(lr_scheduler is not None):\n lr_scheduler.step()\n\n else:\n # music transformer\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_key)\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = train_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = train_loss_func.forward(y_attr, tgt_attr)\n\n loss_chord = loss_chord_root + loss_chord_attr\n loss_emotion = -1\n \n total_loss = loss_chord\n total_loss.backward()\n opt.step()\n if(lr_scheduler is not None):\n lr_scheduler.step()\n else:\n # use MusicTransformer (no sep)\n y = model(x,\n x_root,\n x_attr,\n feature_key)\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n\n loss_chord = train_loss_func.forward(y, tgt)\n loss_emotion = -1\n\n total_loss = loss_chord\n total_loss.backward()\n\n opt.step()\n\n if(lr_scheduler is not None):\n lr_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n \n if((batch_num+1) % print_modulus == 0):\n print(SEPERATOR)\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num+1, \"/\", len(dataloader))\n print(\"LR:\", get_lr(opt))\n print(\"Train loss (total):\", float(total_loss))\n print(\"Train loss (chord):\", float(loss_chord))\n print(\"Train loss (emotion):\", float(loss_emotion))\n print(\"\")\n print(\"Time (s):\", time_took)\n print(SEPERATOR)\n print(\"\")\n return" }, { "identifier": "eval_model", "path": "utilities/run_model_vevo.py", "snippet": "def eval_model(model, dataloader, \n eval_loss_func, eval_loss_emotion_func,\n isVideo = True, isGenConfusionMatrix=False):\n model.eval()\n avg_acc = -1\n avg_cor = -1\n avg_acc_cor = -1\n\n avg_h1 = -1\n avg_h3 = -1\n avg_h5 = -1\n \n avg_loss_chord = -1\n avg_loss_emotion = -1\n avg_total_loss = -1\n\n true_labels = []\n true_root_labels = []\n true_attr_labels = []\n \n pred_labels = []\n pred_root_labels = []\n pred_attr_labels = []\n \n with torch.set_grad_enabled(False):\n n_test = len(dataloader)\n n_test_cor = 0 \n\n sum_loss_chord = 0.0\n sum_loss_emotion = 0.0\n sum_total_loss = 0.0\n\n sum_acc = 0.0\n sum_cor = 0.0\n\n sum_h1 = 0.0\n sum_h3 = 0.0\n sum_h5 = 0.0\n \n for batch in dataloader:\n x = batch[\"x\"].to(get_device())\n tgt = batch[\"tgt\"].to(get_device())\n x_root = batch[\"x_root\"].to(get_device())\n tgt_root = batch[\"tgt_root\"].to(get_device())\n x_attr = batch[\"x_attr\"].to(get_device())\n tgt_attr = batch[\"tgt_attr\"].to(get_device())\n tgt_emotion = batch[\"tgt_emotion\"].to(get_device())\n tgt_emotion_prob = batch[\"tgt_emotion_prob\"].to(get_device())\n \n feature_semantic_list = [] \n for feature_semantic in batch[\"semanticList\"]:\n feature_semantic_list.append( feature_semantic.to(get_device()) )\n \n feature_key = batch[\"key\"].to(get_device())\n feature_scene_offset = batch[\"scene_offset\"].to(get_device())\n feature_motion = batch[\"motion\"].to(get_device())\n feature_emotion = batch[\"emotion\"].to(get_device())\n\n if isVideo:\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n\n sum_acc += float(compute_vevo_accuracy_root_attr(y_root, y_attr, tgt))\n cor = float(compute_vevo_correspondence_root_attr(y_root, y_attr, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,1))\n sum_h3 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,3))\n sum_h5 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,5))\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = eval_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = eval_loss_func.forward(y_attr, tgt_attr)\n loss_chord = loss_chord_root + loss_chord_attr\n\n first_14 = tgt_emotion[:, :14]\n last_2 = tgt_emotion[:, -2:]\n tgt_emotion_attr = torch.cat((first_14, last_2), dim=1)\n\n loss_emotion = eval_loss_emotion_func.forward(y_attr, tgt_emotion_attr)\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n else:\n y= model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n \n sum_acc += float(compute_vevo_accuracy(y, tgt ))\n cor = float(compute_vevo_correspondence(y, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k(y, tgt,1))\n sum_h3 += float(compute_hits_k(y, tgt,3))\n sum_h5 += float(compute_hits_k(y, tgt,5))\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n\n tgt = tgt.flatten()\n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n \n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord = eval_loss_func.forward(y, tgt)\n loss_emotion = eval_loss_emotion_func.forward(y, tgt_emotion)\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n\n if isGenConfusionMatrix:\n pred = y.argmax(dim=1).detach().cpu().numpy()\n pred_root = []\n pred_attr = []\n\n for i in pred:\n if i == 0:\n pred_root.append(0)\n pred_attr.append(0)\n elif i == 157:\n pred_root.append(CHORD_ROOT_END)\n pred_attr.append(CHORD_ATTR_END)\n elif i == 158:\n pred_root.append(CHORD_ROOT_PAD)\n pred_attr.append(CHORD_ATTR_PAD)\n else:\n rootindex = int( (i-1)/13 ) + 1\n attrindex = (i-1)%13 + 1\n pred_root.append(rootindex)\n pred_attr.append(attrindex)\n \n pred_root = np.array(pred_root)\n pred_attr = np.array(pred_attr)\n\n true = tgt.detach().cpu().numpy()\n true_root = tgt_root.detach().cpu().numpy()\n true_attr = tgt_attr.detach().cpu().numpy()\n \n pred_labels.extend(pred)\n pred_root_labels.extend(pred_root)\n pred_attr_labels.extend(pred_attr)\n \n true_labels.extend(true)\n true_root_labels.extend(true_root)\n true_attr_labels.extend(true_attr)\n else:\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_key)\n\n sum_acc += float(compute_vevo_accuracy_root_attr(y_root, y_attr, tgt))\n cor = float(compute_vevo_correspondence_root_attr(y_root, y_attr, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,1))\n sum_h3 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,3))\n sum_h5 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,5))\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = eval_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = eval_loss_func.forward(y_attr, tgt_attr)\n loss_chord = loss_chord_root + loss_chord_attr\n\n first_14 = tgt_emotion[:, :14]\n last_2 = tgt_emotion[:, -2:]\n tgt_emotion_attr = torch.cat((first_14, last_2), dim=1)\n loss_emotion = eval_loss_emotion_func.forward(y_attr, tgt_emotion_attr)\n \n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n else:\n # use MusicTransformer no sep\n y = model(x,\n x_root,\n x_attr,\n feature_key)\n \n sum_acc += float(compute_vevo_accuracy(y, tgt ))\n cor = float(compute_vevo_correspondence(y, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n \n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k(y, tgt,1))\n sum_h3 += float(compute_hits_k(y, tgt,3))\n sum_h5 += float(compute_hits_k(y, tgt,5))\n\n tgt_emotion = tgt_emotion.squeeze()\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n loss_chord = eval_loss_func.forward(y, tgt)\n loss_emotion = eval_loss_emotion_func.forward(y, tgt_emotion)\n total_loss = loss_chord\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n\n avg_loss_chord = sum_loss_chord / n_test\n avg_loss_emotion = sum_loss_emotion / n_test\n avg_total_loss = sum_total_loss / n_test\n\n avg_acc = sum_acc / n_test\n avg_cor = sum_cor / n_test_cor\n \n avg_h1 = sum_h1 / n_test\n avg_h3 = sum_h3 / n_test\n avg_h5 = sum_h5 / n_test\n \n avg_acc_cor = (avg_acc + avg_cor)/ 2.0\n\n if isGenConfusionMatrix:\n chordInvDicPath = \"./dataset/vevo_meta/chord_inv.json\"\n chordRootInvDicPath = \"./dataset/vevo_meta/chord_root_inv.json\"\n chordAttrInvDicPath = \"./dataset/vevo_meta/chord_attr_inv.json\"\n \n with open(chordInvDicPath) as json_file:\n chordInvDic = json.load(json_file)\n with open(chordRootInvDicPath) as json_file:\n chordRootInvDic = json.load(json_file)\n with open(chordAttrInvDicPath) as json_file:\n chordAttrInvDic = json.load(json_file)\n\n # Confusion matrix (CHORD)\n topChordList = []\n with open(\"./dataset/vevo_meta/top_chord.txt\", encoding = 'utf-8') as f:\n for line in f:\n line = line.strip()\n line_arr = line.split(\" \")\n if len(line_arr) == 3 :\n chordID = line_arr[1]\n topChordList.append( int(chordID) )\n topChordList = np.array(topChordList)\n topChordList = topChordList[:10]\n mask = np.isin(true_labels, topChordList)\n true_labels = np.array(true_labels)[mask]\n pred_labels = np.array(pred_labels)[mask]\n\n conf_matrix = confusion_matrix(true_labels, pred_labels, labels=topChordList)\n label_names = [ chordInvDic[str(label_id)] for label_id in topChordList ]\n \n plt.figure(figsize=(8, 6))\n plt.imshow(conf_matrix, cmap=plt.cm.Blues)\n plt.title(\"Confusion Matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(topChordList))\n plt.xticks(tick_marks, label_names, rotation=45)\n plt.yticks(tick_marks, label_names)\n thresh = conf_matrix.max() / 2.0\n for i in range(conf_matrix.shape[0]):\n for j in range(conf_matrix.shape[1]):\n plt.text(j, i, format(conf_matrix[i, j], 'd'),\n ha=\"center\", va=\"center\",\n color=\"white\" if conf_matrix[i, j] > thresh else \"black\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig(\"confusion_matrix.png\")\n plt.show()\n\n # Confusion matrix (CHORD ROOT) \n chordRootList = np.arange(1, 13)\n conf_matrix = confusion_matrix(true_root_labels, pred_root_labels, labels= chordRootList )\n \n label_names = [ chordRootInvDic[str(label_id)] for label_id in chordRootList ]\n \n plt.figure(figsize=(8, 6))\n plt.imshow(conf_matrix, cmap=plt.cm.Blues)\n plt.title(\"Confusion Matrix (Chord root)\")\n plt.colorbar()\n tick_marks = np.arange(len(chordRootList))\n plt.xticks(tick_marks, label_names, rotation=45)\n plt.yticks(tick_marks, label_names)\n thresh = conf_matrix.max() / 2.0\n for i in range(conf_matrix.shape[0]):\n for j in range(conf_matrix.shape[1]):\n plt.text(j, i, format(conf_matrix[i, j], 'd'),\n ha=\"center\", va=\"center\",\n color=\"white\" if conf_matrix[i, j] > thresh else \"black\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig(\"confusion_matrix_root.png\")\n plt.show()\n\n # Confusion matrix (CHORD ATTR)\n chordAttrList = np.arange(1, 14)\n conf_matrix = confusion_matrix(true_attr_labels, pred_attr_labels, labels= chordAttrList )\n \n label_names = [ chordAttrInvDic[str(label_id)] for label_id in chordAttrList ]\n \n plt.figure(figsize=(8, 6))\n plt.imshow(conf_matrix, cmap=plt.cm.Blues)\n plt.title(\"Confusion Matrix (Chord quality)\")\n plt.colorbar()\n tick_marks = np.arange(len(chordAttrList))\n plt.xticks(tick_marks, label_names, rotation=45)\n plt.yticks(tick_marks, label_names)\n thresh = conf_matrix.max() / 2.0\n for i in range(conf_matrix.shape[0]):\n for j in range(conf_matrix.shape[1]):\n plt.text(j, i, format(conf_matrix[i, j], 'd'),\n ha=\"center\", va=\"center\",\n color=\"white\" if conf_matrix[i, j] > thresh else \"black\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig(\"confusion_matrix_quality.png\")\n plt.show()\n\n return { \"avg_total_loss\" : avg_total_loss, \n \"avg_loss_chord\" : avg_loss_chord, \n \"avg_loss_emotion\": avg_loss_emotion, \n \"avg_acc\" : avg_acc, \n \"avg_cor\" : avg_cor, \n \"avg_acc_cor\" : avg_acc_cor, \n \"avg_h1\" : avg_h1, \n \"avg_h3\" : avg_h3,\n \"avg_h5\" : avg_h5 }" } ]
import os import csv import shutil import torch import torch.nn as nn from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from torch.optim import Adam from dataset.vevo_dataset import compute_vevo_accuracy, create_vevo_datasets from model.music_transformer import MusicTransformer from model.video_music_transformer import VideoMusicTransformer from model.loss import SmoothCrossEntropyLoss from utilities.constants import * from utilities.device import get_device, use_cuda from utilities.lr_scheduling import LrStepTracker, get_lr from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params from utilities.run_model_vevo import train_epoch, eval_model from torch.utils.tensorboard import SummaryWriter
13,056
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss (total)", "Avg Train loss (chord)", "Avg Train loss (emotion)", "Avg Eval loss (total)", "Avg Eval loss (chord)", "Avg Eval loss (emotion)"] BASELINE_EPOCH = -1 version = VERSION split_ver = SPLIT_VER split_path = "split_" + split_ver VIS_MODELS_ARR = [ "2d/clip_l14p" ] # main def main( vm = "" , isPrintArgs = True ): args = parse_train_args() if isPrintArgs: print_train_args(args) if vm != "": args.vis_models = vm if args.is_video: vis_arr = args.vis_models.split(" ") vis_arr.sort() vis_abbr_path = "" for v in vis_arr: vis_abbr_path = vis_abbr_path + "_" + VIS_ABBR_DIC[v] vis_abbr_path = vis_abbr_path[1:] else: vis_abbr_path = "no_video" if(args.force_cpu):
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss (total)", "Avg Train loss (chord)", "Avg Train loss (emotion)", "Avg Eval loss (total)", "Avg Eval loss (chord)", "Avg Eval loss (emotion)"] BASELINE_EPOCH = -1 version = VERSION split_ver = SPLIT_VER split_path = "split_" + split_ver VIS_MODELS_ARR = [ "2d/clip_l14p" ] # main def main( vm = "" , isPrintArgs = True ): args = parse_train_args() if isPrintArgs: print_train_args(args) if vm != "": args.vis_models = vm if args.is_video: vis_arr = args.vis_models.split(" ") vis_arr.sort() vis_abbr_path = "" for v in vis_arr: vis_abbr_path = vis_abbr_path + "_" + VIS_ABBR_DIC[v] vis_abbr_path = vis_abbr_path[1:] else: vis_abbr_path = "no_video" if(args.force_cpu):
use_cuda(False)
6
2023-10-13 09:06:24+00:00
16k
LeapLabTHU/Rank-DETR
projects/h_deformable_detr/configs/models/h_deformable_detr_r50.py
[ { "identifier": "HungarianMatcher", "path": "detrex/modeling/matcher/matcher.py", "snippet": "class HungarianMatcher(nn.Module):\n \"\"\"HungarianMatcher which computes an assignment between targets and predictions.\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n\n Args:\n cost_class (float): The relative weight of the classification error\n in the matching cost. Default: 1.\n cost_bbox (float): The relative weight of the L1 error of the bounding box\n coordinates in the matching cost. Default: 1.\n cost_giou (float): This is the relative weight of the giou loss of\n the bounding box in the matching cost. Default: 1.\n cost_class_type (str): How the classification error is calculated.\n Choose from ``[\"ce_cost\", \"focal_loss_cost\"]``. Default: \"focal_loss_cost\".\n alpha (float): Weighting factor in range (0, 1) to balance positive vs\n negative examples in focal loss. Default: 0.25.\n gamma (float): Exponent of modulating factor (1 - p_t) to balance easy vs\n hard examples in focal loss. Default: 2.\n \"\"\"\n\n def __init__(\n self,\n cost_class: float = 1,\n cost_bbox: float = 1,\n cost_giou: float = 1,\n cost_class_type: str = \"focal_loss_cost\",\n alpha: float = 0.25,\n gamma: float = 2.0,\n ):\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n self.cost_class_type = cost_class_type\n self.alpha = alpha\n self.gamma = gamma\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n assert cost_class_type in {\n \"ce_cost\",\n \"focal_loss_cost\",\n }, \"only support ce loss or focal loss for computing class cost\"\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Forward function for `HungarianMatcher` which performs the matching.\n\n Args:\n outputs (Dict[str, torch.Tensor]): This is a dict that contains at least these entries:\n\n - ``\"pred_logits\"``: Tensor of shape (bs, num_queries, num_classes) with the classification logits.\n - ``\"pred_boxes\"``: Tensor of shape (bs, num_queries, 4) with the predicted box coordinates.\n\n targets (List[Dict[str, torch.Tensor]]): This is a list of targets (len(targets) = batch_size),\n where each target is a dict containing:\n\n - ``\"labels\"``: Tensor of shape (num_target_boxes, ) (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels. # noqa\n - ``\"boxes\"``: Tensor of shape (num_target_boxes, 4) containing the target box coordinates.\n\n Returns:\n list[torch.Tensor]: A list of size batch_size, containing tuples of `(index_i, index_j)` where:\n\n - ``index_i`` is the indices of the selected predictions (in order)\n - ``index_j`` is the indices of the corresponding selected targets (in order)\n\n For each batch element, it holds: `len(index_i) = len(index_j) = min(num_queries, num_target_boxes)`\n \"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n if self.cost_class_type == \"ce_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).softmax(-1)\n ) # [batch_size * num_queries, num_classes]\n elif self.cost_class_type == \"focal_loss_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).sigmoid()\n ) # [batch_size * num_queries, num_classes]\n\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # [batch_size * num_queries, 4]\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n # Compute the classification cost.\n if self.cost_class_type == \"ce_cost\":\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n elif self.cost_class_type == \"focal_loss_cost\":\n alpha = self.alpha\n gamma = self.gamma\n neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n\n # Compute the giou cost betwen boxes\n cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n\n # Final cost matrix\n C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"boxes\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_bbox: {}\".format(self.cost_bbox),\n \"cost_giou: {}\".format(self.cost_giou),\n \"cost_class_type: {}\".format(self.cost_class_type),\n \"focal cost alpha: {}\".format(self.alpha),\n \"focal cost gamma: {}\".format(self.gamma),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "ChannelMapper", "path": "detrex/modeling/neck/channel_mapper.py", "snippet": "class ChannelMapper(nn.Module):\n \"\"\"Channel Mapper for reduce/increase channels of backbone features. Modified\n from `mmdet <https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/channel_mapper.py>`_.\n\n This is used to reduce/increase the channels of backbone features.\n\n Args:\n input_shape (Dict[str, ShapeSpec]): A dict which contains the backbone features meta infomation,\n e.g. ``input_shape = {\"res5\": ShapeSpec(channels=2048)}``.\n in_features (List[str]): A list contains the keys which maps the features output from the backbone,\n e.g. ``in_features = [\"res\"]``.\n out_channels (int): Number of output channels for each scale.\n kernel_size (int, optional): Size of the convolving kernel for each scale.\n Default: 3.\n stride (int, optional): Stride of convolution for each scale. Default: 1.\n bias (bool, optional): If True, adds a learnable bias to the output of each scale.\n Default: True.\n groups (int, optional): Number of blocked connections from input channels to\n output channels for each scale. Default: 1.\n dilation (int, optional): Spacing between kernel elements for each scale.\n Default: 1.\n norm_layer (nn.Module, optional): The norm layer used for each scale. Default: None.\n activation (nn.Module, optional): The activation layer used for each scale. Default: None.\n num_outs (int, optional): Number of output feature maps. There will be ``extra_convs`` when\n ``num_outs`` is larger than the length of ``in_features``. Default: None.\n\n Examples:\n >>> import torch\n >>> import torch.nn as nn\n >>> from detrex.modeling import ChannelMapper\n >>> from detectron2.modeling import ShapeSpec\n >>> input_features = {\n ... \"p0\": torch.randn(1, 128, 128, 128),\n ... \"p1\": torch.randn(1, 256, 64, 64),\n ... \"p2\": torch.randn(1, 512, 32, 32),\n ... \"p3\": torch.randn(1, 1024, 16, 16),\n ... }\n >>> input_shapes = {\n ... \"p0\": ShapeSpec(channels=128),\n ... \"p1\": ShapeSpec(channels=256),\n ... \"p2\": ShapeSpec(channels=512),\n ... \"p3\": ShapeSpec(channels=1024),\n ... }\n >>> in_features = [\"p0\", \"p1\", \"p2\", \"p3\"]\n >>> neck = ChannelMapper(\n ... input_shapes=input_shapes,\n ... in_features=in_features,\n ... out_channels=256,\n ... norm_layer=nn.GroupNorm(num_groups=32, num_channels=256)\n >>> outputs = neck(input_features)\n >>> for i in range(len(outputs)):\n ... print(f\"output[{i}].shape = {outputs[i].shape}\")\n output[0].shape = torch.Size([1, 256, 128, 128])\n output[1].shape = torch.Size([1, 256, 64, 64])\n output[2].shape = torch.Size([1, 256, 32, 32])\n output[3].shape = torch.Size([1, 256, 16, 16])\n \"\"\"\n\n def __init__(\n self,\n input_shapes: Dict[str, ShapeSpec],\n in_features: List[str],\n out_channels: int,\n kernel_size: int = 3,\n stride: int = 1,\n bias: bool = True,\n groups: int = 1,\n dilation: int = 1,\n norm_layer: nn.Module = None,\n activation: nn.Module = None,\n num_outs: int = None,\n **kwargs,\n ):\n super(ChannelMapper, self).__init__()\n self.extra_convs = None\n\n in_channels_per_feature = [input_shapes[f].channels for f in in_features]\n\n if num_outs is None:\n num_outs = len(input_shapes)\n\n self.convs = nn.ModuleList()\n for in_channel in in_channels_per_feature:\n self.convs.append(\n ConvNormAct(\n in_channels=in_channel,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=(kernel_size - 1) // 2,\n bias=bias,\n groups=groups,\n dilation=dilation,\n norm_layer=copy.deepcopy(norm_layer),\n activation=copy.deepcopy(activation),\n )\n )\n\n if num_outs > len(in_channels_per_feature):\n self.extra_convs = nn.ModuleList()\n for i in range(len(in_channels_per_feature), num_outs):\n if i == len(in_channels_per_feature):\n in_channel = in_channels_per_feature[-1]\n else:\n in_channel = out_channels\n self.extra_convs.append(\n ConvNormAct(\n in_channels=in_channel,\n out_channels=out_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n bias=bias,\n groups=groups,\n dilation=dilation,\n norm_layer=copy.deepcopy(norm_layer),\n activation=copy.deepcopy(activation),\n )\n )\n\n self.input_shapes = input_shapes\n self.in_features = in_features\n self.out_channels = out_channels\n\n def forward(self, inputs):\n \"\"\"Forward function for ChannelMapper\n\n Args:\n inputs (Dict[str, torch.Tensor]): The backbone feature maps.\n\n Return:\n tuple(torch.Tensor): A tuple of the processed features.\n \"\"\"\n assert len(inputs) == len(self.convs)\n outs = [self.convs[i](inputs[self.in_features[i]]) for i in range(len(inputs))]\n if self.extra_convs:\n for i in range(len(self.extra_convs)):\n if i == 0:\n outs.append(self.extra_convs[0](inputs[self.in_features[-1]]))\n else:\n outs.append(self.extra_convs[i](outs[-1]))\n return tuple(outs)" }, { "identifier": "PositionEmbeddingSine", "path": "detrex/layers/position_embedding.py", "snippet": "class PositionEmbeddingSine(nn.Module):\n \"\"\"Sinusoidal position embedding used in DETR model.\n\n Please see `End-to-End Object Detection with Transformers\n <https://arxiv.org/pdf/2005.12872>`_ for more details.\n\n Args:\n num_pos_feats (int): The feature dimension for each position along\n x-axis or y-axis. The final returned dimension for each position\n is 2 times of the input value.\n temperature (int, optional): The temperature used for scaling\n the position embedding. Default: 10000.\n scale (float, optional): A scale factor that scales the position\n embedding. The scale will be used only when `normalize` is True.\n Default: 2*pi.\n eps (float, optional): A value added to the denominator for numerical\n stability. Default: 1e-6.\n offset (float): An offset added to embed when doing normalization.\n normalize (bool, optional): Whether to normalize the position embedding.\n Default: False.\n \"\"\"\n\n def __init__(\n self,\n num_pos_feats: int = 64,\n temperature: int = 10000,\n scale: float = 2 * math.pi,\n eps: float = 1e-6,\n offset: float = 0.0,\n normalize: bool = False,\n ):\n super().__init__()\n if normalize:\n assert isinstance(scale, (float, int)), (\n \"when normalize is set,\"\n \"scale should be provided and in float or int type, \"\n f\"found {type(scale)}\"\n )\n self.num_pos_feats = num_pos_feats\n self.temperature = temperature\n self.normalize = normalize\n self.scale = scale\n self.eps = eps\n self.offset = offset\n\n def forward(self, mask: torch.Tensor, **kwargs) -> torch.Tensor:\n \"\"\"Forward function for `PositionEmbeddingSine`.\n\n Args:\n mask (torch.Tensor): ByteTensor mask. Non-zero values representing\n ignored positions, while zero values means valid positions\n for the input tensor. Shape as `(bs, h, w)`.\n\n Returns:\n torch.Tensor: Returned position embedding with\n shape `(bs, num_pos_feats * 2, h, w)`\n \"\"\"\n assert mask is not None\n not_mask = ~mask\n y_embed = not_mask.cumsum(1, dtype=torch.float32)\n x_embed = not_mask.cumsum(2, dtype=torch.float32)\n if self.normalize:\n y_embed = (y_embed + self.offset) / (y_embed[:, -1:, :] + self.eps) * self.scale\n x_embed = (x_embed + self.offset) / (x_embed[:, :, -1:] + self.eps) * self.scale\n dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=mask.device)\n dim_t = self.temperature ** (\n 2 * torch.div(dim_t, 2, rounding_mode=\"floor\") / self.num_pos_feats\n )\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n\n # use view as mmdet instead of flatten for dynamically exporting to ONNX\n B, H, W = mask.size()\n pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).view(\n B, H, W, -1\n )\n pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).view(\n B, H, W, -1\n )\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n return pos" }, { "identifier": "HDeformableDetrTransformerEncoder", "path": "projects/h_deformable_detr/modeling/h_deformable_transformer.py", "snippet": "class HDeformableDetrTransformerEncoder(TransformerLayerSequence):\n def __init__(\n self,\n embed_dim: int = 256,\n num_heads: int = 8,\n feedforward_dim: int = 1024,\n attn_dropout: float = 0.1,\n ffn_dropout: float = 0.1,\n num_layers: int = 6,\n post_norm: bool = False,\n num_feature_levels: int = 4,\n ):\n super(HDeformableDetrTransformerEncoder, self).__init__(\n transformer_layers=BaseTransformerLayer(\n attn=MultiScaleDeformableAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=attn_dropout,\n batch_first=True,\n num_levels=num_feature_levels,\n ),\n ffn=FFN(\n embed_dim=embed_dim,\n feedforward_dim=feedforward_dim,\n output_dim=embed_dim,\n num_fcs=2,\n ffn_drop=ffn_dropout,\n ),\n norm=nn.LayerNorm(embed_dim),\n operation_order=(\"self_attn\", \"norm\", \"ffn\", \"norm\"),\n ),\n num_layers=num_layers,\n )\n self.embed_dim = self.layers[0].embed_dim\n self.pre_norm = self.layers[0].pre_norm\n\n if post_norm:\n self.post_norm_layer = nn.LayerNorm(self.embed_dim)\n else:\n self.post_norm_layer = None\n\n def forward(\n self,\n query,\n key,\n value,\n query_pos=None,\n key_pos=None,\n attn_masks=None,\n query_key_padding_mask=None,\n key_padding_mask=None,\n **kwargs,\n ):\n\n for layer in self.layers:\n query = layer(\n query,\n key,\n value,\n query_pos=query_pos,\n attn_masks=attn_masks,\n query_key_padding_mask=query_key_padding_mask,\n key_padding_mask=key_padding_mask,\n **kwargs,\n )\n\n if self.post_norm_layer is not None:\n query = self.post_norm_layer(query)\n return query" }, { "identifier": "HDeformableDetrTransformerDecoder", "path": "projects/h_deformable_detr/modeling/h_deformable_transformer.py", "snippet": "class HDeformableDetrTransformerDecoder(TransformerLayerSequence):\n def __init__(\n self,\n embed_dim: int = 256,\n num_heads: int = 8,\n feedforward_dim: int = 1024,\n attn_dropout: float = 0.1,\n ffn_dropout: float = 0.1,\n num_layers: int = 6,\n return_intermediate: bool = True,\n num_feature_levels: int = 4,\n look_forward_twice=True,\n ):\n super(HDeformableDetrTransformerDecoder, self).__init__(\n transformer_layers=BaseTransformerLayer(\n attn=[\n MultiheadAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n attn_drop=attn_dropout,\n batch_first=True,\n ),\n MultiScaleDeformableAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=attn_dropout,\n batch_first=True,\n num_levels=num_feature_levels,\n ),\n ],\n ffn=FFN(\n embed_dim=embed_dim,\n feedforward_dim=feedforward_dim,\n output_dim=embed_dim,\n ffn_drop=ffn_dropout,\n ),\n norm=nn.LayerNorm(embed_dim),\n operation_order=(\n \"self_attn\",\n \"norm\",\n \"cross_attn\",\n \"norm\",\n \"ffn\",\n \"norm\",\n ),\n ),\n num_layers=num_layers,\n )\n self.return_intermediate = return_intermediate\n\n self.bbox_embed = None\n self.class_embed = None\n self.look_forward_twice = look_forward_twice\n\n def forward(\n self,\n query,\n key,\n value,\n query_pos=None,\n key_pos=None,\n attn_masks=None,\n query_key_padding_mask=None,\n key_padding_mask=None,\n reference_points=None,\n valid_ratios=None,\n **kwargs,\n ):\n output = query\n\n intermediate = []\n intermediate_reference_points = []\n for layer_idx, layer in enumerate(self.layers):\n if reference_points.shape[-1] == 4:\n reference_points_input = (\n reference_points[:, :, None]\n * torch.cat([valid_ratios, valid_ratios], -1)[:, None]\n )\n else:\n assert reference_points.shape[-1] == 2\n reference_points_input = reference_points[:, :, None] * valid_ratios[:, None]\n\n output = layer(\n output,\n key,\n value,\n query_pos=query_pos,\n key_pos=key_pos,\n attn_masks=attn_masks,\n query_key_padding_mask=query_key_padding_mask,\n key_padding_mask=key_padding_mask,\n reference_points=reference_points_input,\n **kwargs,\n )\n\n if self.bbox_embed is not None:\n tmp = self.bbox_embed[layer_idx](output)\n if reference_points.shape[-1] == 4:\n new_reference_points = tmp + inverse_sigmoid(reference_points)\n new_reference_points = new_reference_points.sigmoid()\n else:\n assert reference_points.shape[-1] == 2\n new_reference_points = tmp\n new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points)\n new_reference_points = new_reference_points.sigmoid()\n reference_points = new_reference_points.detach()\n\n if self.return_intermediate:\n intermediate.append(output)\n intermediate_reference_points.append(\n new_reference_points if self.look_forward_twice else reference_points\n )\n\n if self.return_intermediate:\n return torch.stack(intermediate), torch.stack(intermediate_reference_points)\n\n return output, reference_points" }, { "identifier": "HDeformableDetrTransformer", "path": "projects/h_deformable_detr/modeling/h_deformable_transformer.py", "snippet": "class HDeformableDetrTransformer(nn.Module):\n \"\"\"Transformer module for Deformable DETR\n\n Args:\n encoder (nn.Module): encoder module.\n decoder (nn.Module): decoder module.\n as_two_stage (bool): whether to use two-stage transformer. Default False.\n num_feature_levels (int): number of feature levels. Default 4.\n two_stage_num_proposals (int): number of proposals in two-stage transformer. Default 300.\n Only used when as_two_stage is True.\n \"\"\"\n\n def __init__(\n self,\n encoder=None,\n decoder=None,\n num_feature_levels=4,\n as_two_stage=False,\n two_stage_num_proposals=300,\n mixed_selection=True,\n ):\n super(HDeformableDetrTransformer, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.num_feature_levels = num_feature_levels\n self.as_two_stage = as_two_stage\n self.two_stage_num_proposals = two_stage_num_proposals\n\n self.embed_dim = self.encoder.embed_dim\n\n self.level_embeds = nn.Parameter(torch.Tensor(self.num_feature_levels, self.embed_dim))\n\n if self.as_two_stage:\n self.enc_output = nn.Linear(self.embed_dim, self.embed_dim)\n self.enc_output_norm = nn.LayerNorm(self.embed_dim)\n self.pos_trans = nn.Linear(self.embed_dim * 2, self.embed_dim * 2)\n self.pos_trans_norm = nn.LayerNorm(self.embed_dim * 2)\n else:\n self.reference_points = nn.Linear(self.embed_dim, 2)\n\n self.mixed_selection = mixed_selection\n\n self.init_weights()\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n for m in self.modules():\n if isinstance(m, MultiScaleDeformableAttention):\n m.init_weights()\n if not self.as_two_stage:\n nn.init.xavier_normal_(self.reference_points.weight.data, gain=1.0)\n nn.init.constant_(self.reference_points.bias.data, 0.0)\n nn.init.normal_(self.level_embeds)\n\n def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes):\n N, S, C = memory.shape\n proposals = []\n _cur = 0\n for lvl, (H, W) in enumerate(spatial_shapes):\n mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H * W)].view(N, H, W, 1)\n valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n\n grid_y, grid_x = torch.meshgrid(\n torch.linspace(0, H - 1, H, dtype=torch.float32, device=memory.device),\n torch.linspace(0, W - 1, W, dtype=torch.float32, device=memory.device),\n )\n grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n\n scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2)\n grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale\n wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)\n proposal = torch.cat((grid, wh), -1).view(N, -1, 4)\n proposals.append(proposal)\n _cur += H * W\n\n output_proposals = torch.cat(proposals, 1)\n output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(\n -1, keepdim=True\n )\n output_proposals = torch.log(output_proposals / (1 - output_proposals))\n output_proposals = output_proposals.masked_fill(\n memory_padding_mask.unsqueeze(-1), float(\"inf\")\n )\n output_proposals = output_proposals.masked_fill(~output_proposals_valid, float(\"inf\"))\n\n output_memory = memory\n output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))\n output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))\n output_memory = self.enc_output_norm(self.enc_output(output_memory))\n return output_memory, output_proposals\n\n @staticmethod\n def get_reference_points(spatial_shapes, valid_ratios, device):\n \"\"\"Get the reference points used in decoder.\n\n Args:\n spatial_shapes (Tensor): The shape of all\n feature maps, has shape (num_level, 2).\n valid_ratios (Tensor): The radios of valid\n points on the feature map, has shape\n (bs, num_levels, 2)\n device (obj:`device`): The device where\n reference_points should be.\n\n Returns:\n Tensor: reference points used in decoder, has \\\n shape (bs, num_keys, num_levels, 2).\n \"\"\"\n reference_points_list = []\n for lvl, (H, W) in enumerate(spatial_shapes):\n # TODO check this 0.5\n ref_y, ref_x = torch.meshgrid(\n torch.linspace(0.5, H - 0.5, H, dtype=torch.float32, device=device),\n torch.linspace(0.5, W - 0.5, W, dtype=torch.float32, device=device),\n )\n ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H)\n ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W)\n ref = torch.stack((ref_x, ref_y), -1)\n reference_points_list.append(ref)\n reference_points = torch.cat(reference_points_list, 1)\n reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n return reference_points\n\n def get_valid_ratio(self, mask):\n \"\"\"Get the valid radios of feature maps of all level.\"\"\"\n _, H, W = mask.shape\n valid_H = torch.sum(~mask[:, :, 0], 1)\n valid_W = torch.sum(~mask[:, 0, :], 1)\n valid_ratio_h = valid_H.float() / H\n valid_ratio_w = valid_W.float() / W\n valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)\n return valid_ratio\n\n def get_proposal_pos_embed(self, proposals, num_pos_feats=128, temperature=10000):\n \"\"\"Get the position embedding of proposal.\"\"\"\n scale = 2 * math.pi\n dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device)\n dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode=\"floor\") / num_pos_feats)\n # N, L, 4\n proposals = proposals.sigmoid() * scale\n # N, L, 4, 128\n pos = proposals[:, :, :, None] / dim_t\n # N, L, 4, 64, 2\n pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2)\n return pos\n\n def forward(\n self,\n multi_level_feats,\n multi_level_masks,\n multi_level_pos_embeds,\n query_embed,\n self_attn_mask,\n **kwargs,\n ):\n assert self.as_two_stage or query_embed is not None\n\n feat_flatten = []\n mask_flatten = []\n lvl_pos_embed_flatten = []\n spatial_shapes = []\n for lvl, (feat, mask, pos_embed) in enumerate(\n zip(multi_level_feats, multi_level_masks, multi_level_pos_embeds)\n ):\n bs, c, h, w = feat.shape\n spatial_shape = (h, w)\n spatial_shapes.append(spatial_shape)\n\n feat = feat.flatten(2).transpose(1, 2) # bs, hw, c\n mask = mask.flatten(1)\n pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c\n lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1)\n lvl_pos_embed_flatten.append(lvl_pos_embed)\n feat_flatten.append(feat)\n mask_flatten.append(mask)\n feat_flatten = torch.cat(feat_flatten, 1)\n mask_flatten = torch.cat(mask_flatten, 1)\n lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)\n spatial_shapes = torch.as_tensor(\n spatial_shapes, dtype=torch.long, device=feat_flatten.device\n )\n level_start_index = torch.cat(\n (spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])\n )\n valid_ratios = torch.stack([self.get_valid_ratio(m) for m in multi_level_masks], 1)\n\n reference_points = self.get_reference_points(\n spatial_shapes, valid_ratios, device=feat.device\n )\n\n memory = self.encoder(\n query=feat_flatten,\n key=None,\n value=None,\n query_pos=lvl_pos_embed_flatten,\n query_key_padding_mask=mask_flatten,\n spatial_shapes=spatial_shapes,\n reference_points=reference_points,\n level_start_index=level_start_index,\n valid_ratios=valid_ratios,\n **kwargs,\n )\n\n bs, _, c = memory.shape\n if self.as_two_stage:\n output_memory, output_proposals = self.gen_encoder_output_proposals(\n memory, mask_flatten, spatial_shapes\n )\n\n enc_outputs_class = self.decoder.class_embed[self.decoder.num_layers](output_memory)\n enc_outputs_coord_unact = (\n self.decoder.bbox_embed[self.decoder.num_layers](output_memory) + output_proposals\n )\n\n topk = self.two_stage_num_proposals\n topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]\n topk_coords_unact = torch.gather(\n enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)\n )\n topk_coords_unact = topk_coords_unact.detach()\n reference_points = topk_coords_unact.sigmoid()\n init_reference_out = reference_points\n pos_trans_out = self.pos_trans_norm(\n self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))\n )\n if not self.mixed_selection:\n query_pos, query = torch.split(pos_trans_out, c, dim=2)\n else:\n # query_pos here is the content embed for deformable DETR\n query = query_embed.unsqueeze(0).expand(bs, -1, -1)\n query_pos, _ = torch.split(pos_trans_out, c, dim=2)\n else:\n query_pos, query = torch.split(query_embed, c, dim=1)\n query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1)\n query = query.unsqueeze(0).expand(bs, -1, -1)\n reference_points = self.reference_points(query_pos).sigmoid()\n init_reference_out = reference_points\n\n # decoder\n inter_states, inter_references = self.decoder(\n query=query, # bs, num_queries, embed_dims\n key=None, # bs, num_tokens, embed_dims\n value=memory, # bs, num_tokens, embed_dims\n query_pos=query_pos,\n key_padding_mask=mask_flatten, # bs, num_tokens\n reference_points=reference_points, # num_queries, 4\n spatial_shapes=spatial_shapes, # nlvl, 2\n level_start_index=level_start_index, # nlvl\n valid_ratios=valid_ratios, # bs, nlvl, 2\n attn_masks=[self_attn_mask, None],\n **kwargs,\n )\n\n inter_references_out = inter_references\n if self.as_two_stage:\n return (\n inter_states,\n init_reference_out,\n inter_references_out,\n enc_outputs_class,\n enc_outputs_coord_unact,\n )\n return inter_states, init_reference_out, inter_references_out, None, None" }, { "identifier": "HDeformableDETR", "path": "projects/h_deformable_detr/modeling/h_deformable_detr.py", "snippet": "class HDeformableDETR(nn.Module):\n \"\"\"Implements the Deformable DETR model.\n\n Code is modified from the `official github repo\n <https://github.com/fundamentalvision/Deformable-DETR>`_.\n\n More details can be found in the `paper\n <https://arxiv.org/abs/2010.04159>`_ .\n\n Args:\n backbone (nn.Module): the backbone module.\n position_embedding (nn.Module): the position embedding module.\n neck (nn.Module): the neck module.\n transformer (nn.Module): the transformer module.\n embed_dim (int): the dimension of the embedding.\n num_classes (int): Number of total categories.\n num_queries (int): Number of proposal dynamic anchor boxes in Transformer\n criterion (nn.Module): Criterion for calculating the total losses.\n pixel_mean (List[float]): Pixel mean value for image normalization.\n Default: [123.675, 116.280, 103.530].\n pixel_std (List[float]): Pixel std value for image normalization.\n Default: [58.395, 57.120, 57.375].\n aux_loss (bool): whether to use auxiliary loss. Default: True.\n with_box_refine (bool): whether to use box refinement. Default: False.\n as_two_stage (bool): whether to use two-stage. Default: False.\n select_box_nums_for_evaluation (int): the number of topk candidates\n slected at postprocess for evaluation. Default: 100.\n\n \"\"\"\n\n def __init__(\n self,\n backbone,\n position_embedding,\n neck,\n transformer,\n embed_dim,\n num_classes,\n num_queries_one2one,\n num_queries_one2many,\n criterion,\n pixel_mean,\n pixel_std,\n aux_loss=True,\n with_box_refine=False,\n as_two_stage=False,\n select_box_nums_for_evaluation=100,\n device=\"cuda\",\n mixed_selection=True,\n k_one2many=6,\n lambda_one2many=1.0,\n ):\n super().__init__()\n num_queries = num_queries_one2one + num_queries_one2many\n # define backbone and position embedding module\n self.backbone = backbone\n self.position_embedding = position_embedding\n\n # define neck module\n self.neck = neck\n\n # define learnable query embedding\n self.num_queries = num_queries\n if not as_two_stage:\n self.query_embedding = nn.Embedding(num_queries, embed_dim * 2)\n elif mixed_selection:\n self.query_embedding = nn.Embedding(num_queries, embed_dim)\n\n # define transformer module\n self.transformer = transformer\n\n # define classification head and box head\n self.num_classes = num_classes\n self.class_embed = nn.Linear(embed_dim, num_classes)\n self.bbox_embed = MLP(embed_dim, embed_dim, 4, 3)\n\n # where to calculate auxiliary loss in criterion\n self.aux_loss = aux_loss\n self.criterion = criterion\n\n # define contoller for box refinement and two-stage variants\n self.with_box_refine = with_box_refine\n self.as_two_stage = as_two_stage\n\n # init parameters for heads\n prior_prob = 0.01\n bias_value = -math.log((1 - prior_prob) / prior_prob)\n self.class_embed.bias.data = torch.ones(num_classes) * bias_value\n nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)\n for _, neck_layer in self.neck.named_modules():\n if isinstance(neck_layer, nn.Conv2d):\n nn.init.xavier_uniform_(neck_layer.weight, gain=1)\n nn.init.constant_(neck_layer.bias, 0)\n\n # If two-stage, the last class_embed and bbox_embed is for region proposal generation\n # Decoder layers share the same heads without box refinement, while use the different\n # heads when box refinement is used.\n num_pred = (\n (transformer.decoder.num_layers + 1) if as_two_stage else transformer.decoder.num_layers\n )\n if with_box_refine:\n self.class_embed = nn.ModuleList(\n [copy.deepcopy(self.class_embed) for i in range(num_pred)]\n )\n self.bbox_embed = nn.ModuleList(\n [copy.deepcopy(self.bbox_embed) for i in range(num_pred)]\n )\n nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)\n self.transformer.decoder.bbox_embed = self.bbox_embed\n else:\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)\n self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])\n self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])\n self.transformer.decoder.bbox_embed = None\n\n # hack implementation for two-stage. The last class_embed and bbox_embed is for region proposal generation\n if as_two_stage:\n self.transformer.decoder.class_embed = self.class_embed\n for box_embed in self.bbox_embed:\n nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)\n\n # set topk boxes selected for inference\n self.select_box_nums_for_evaluation = select_box_nums_for_evaluation\n\n # normalizer for input raw images\n self.device = device\n pixel_mean = torch.Tensor(pixel_mean).to(self.device).view(3, 1, 1)\n pixel_std = torch.Tensor(pixel_std).to(self.device).view(3, 1, 1)\n self.normalizer = lambda x: (x - pixel_mean) / pixel_std\n self.num_queries_one2one = num_queries_one2one\n self.mixed_selection = mixed_selection\n self.k_one2many = k_one2many\n self.lambda_one2many = lambda_one2many\n\n def forward(self, batched_inputs):\n images = self.preprocess_image(batched_inputs)\n\n if self.training:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_ones(batch_size, H, W)\n for img_id in range(batch_size):\n # mask padding regions in batched images\n img_h, img_w = batched_inputs[img_id][\"instances\"].image_size\n img_masks[img_id, :img_h, :img_w] = 0\n else:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_zeros(batch_size, H, W)\n # disable the one-to-many branch queries\n # save them frist\n save_num_queries = self.num_queries\n save_two_stage_num_proposals = self.transformer.two_stage_num_proposals\n self.num_queries = self.num_queries_one2one\n self.transformer.two_stage_num_proposals = self.num_queries\n\n # original features\n features = self.backbone(images.tensor) # output feature dict\n\n # project backbone features to the reuired dimension of transformer\n # we use multi-scale features in deformable DETR\n multi_level_feats = self.neck(features)\n multi_level_masks = []\n multi_level_position_embeddings = []\n for feat in multi_level_feats:\n multi_level_masks.append(\n F.interpolate(img_masks[None], size=feat.shape[-2:]).to(torch.bool).squeeze(0)\n )\n multi_level_position_embeddings.append(self.position_embedding(multi_level_masks[-1]))\n\n # initialize object query embeddings\n query_embeds = None\n if not self.as_two_stage or self.mixed_selection:\n query_embeds = self.query_embedding.weight[0 : self.num_queries, :]\n\n # make attn mask\n \"\"\" attention mask to prevent information leakage\n \"\"\"\n self_attn_mask = (\n torch.zeros(\n [\n self.num_queries,\n self.num_queries,\n ]\n )\n .bool()\n .to(feat.device)\n )\n self_attn_mask[\n self.num_queries_one2one :,\n 0 : self.num_queries_one2one,\n ] = True\n self_attn_mask[\n 0 : self.num_queries_one2one,\n self.num_queries_one2one :,\n ] = True\n\n (\n inter_states,\n init_reference,\n inter_references,\n enc_outputs_class,\n enc_outputs_coord_unact,\n ) = self.transformer(\n multi_level_feats,\n multi_level_masks,\n multi_level_position_embeddings,\n query_embeds,\n self_attn_mask,\n )\n\n # Calculate output coordinates and classes.\n outputs_classes_one2one = []\n outputs_coords_one2one = []\n outputs_classes_one2many = []\n outputs_coords_one2many = []\n for lvl in range(inter_states.shape[0]):\n if lvl == 0:\n reference = init_reference\n else:\n reference = inter_references[lvl - 1]\n reference = inverse_sigmoid(reference)\n outputs_class = self.class_embed[lvl](inter_states[lvl])\n tmp = self.bbox_embed[lvl](inter_states[lvl])\n if reference.shape[-1] == 4:\n tmp += reference\n else:\n assert reference.shape[-1] == 2\n tmp[..., :2] += reference\n outputs_coord = tmp.sigmoid()\n outputs_classes_one2one.append(outputs_class[:, 0 : self.num_queries_one2one])\n outputs_classes_one2many.append(outputs_class[:, self.num_queries_one2one :])\n outputs_coords_one2one.append(outputs_coord[:, 0 : self.num_queries_one2one])\n outputs_coords_one2many.append(outputs_coord[:, self.num_queries_one2one :])\n outputs_classes_one2one = torch.stack(outputs_classes_one2one)\n # tensor shape: [num_decoder_layers, bs, num_queries_one2one, num_classes]\n outputs_coords_one2one = torch.stack(outputs_coords_one2one)\n # tensor shape: [num_decoder_layers, bs, num_queries_one2one, 4]\n outputs_classes_one2many = torch.stack(outputs_classes_one2many)\n # tensor shape: [num_decoder_layers, bs, num_queries_one2many, num_classes]\n outputs_coords_one2many = torch.stack(outputs_coords_one2many)\n # tensor shape: [num_decoder_layers, bs, num_queries_one2many, 4]\n\n # prepare for loss computation\n output = {\n \"pred_logits\": outputs_classes_one2one[-1],\n \"pred_boxes\": outputs_coords_one2one[-1],\n \"pred_logits_one2many\": outputs_classes_one2many[-1],\n \"pred_boxes_one2many\": outputs_coords_one2many[-1],\n }\n if self.aux_loss:\n output[\"aux_outputs\"] = self._set_aux_loss(\n outputs_classes_one2one, outputs_coords_one2one\n )\n output[\"aux_outputs_one2many\"] = self._set_aux_loss(\n outputs_classes_one2many, outputs_coords_one2many\n )\n\n if self.as_two_stage:\n enc_outputs_coord = enc_outputs_coord_unact.sigmoid()\n output[\"enc_outputs\"] = {\n \"pred_logits\": enc_outputs_class,\n \"pred_boxes\": enc_outputs_coord,\n }\n\n if self.training:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n targets = self.prepare_targets(gt_instances)\n if self.k_one2many > 0:\n loss_dict = self.train_hybrid(\n output,\n targets,\n self.k_one2many,\n self.criterion,\n self.lambda_one2many,\n )\n else:\n loss_dict = self.criterion(output, targets)\n weight_dict = self.criterion.weight_dict\n new_dict = dict()\n for key, value in weight_dict.items():\n new_dict[key] = value\n new_dict[key + \"_one2many\"] = value\n weight_dict = new_dict\n for k in loss_dict.keys():\n if k in weight_dict:\n loss_dict[k] *= weight_dict[k]\n return loss_dict\n else:\n box_cls = output[\"pred_logits\"]\n box_pred = output[\"pred_boxes\"]\n results = self.inference(box_cls, box_pred, images.image_sizes)\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n results, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({\"instances\": r})\n # recover the model parameters for next training epoch\n self.num_queries = save_num_queries\n self.transformer.two_stage_num_proposals = save_two_stage_num_proposals\n return processed_results\n\n def train_hybrid(self, outputs, targets, k_one2many, criterion, lambda_one2many):\n # one-to-one-loss\n loss_dict = criterion(outputs, targets)\n multi_targets = copy.deepcopy(targets)\n # repeat the targets\n for target in multi_targets:\n target[\"boxes\"] = target[\"boxes\"].repeat(k_one2many, 1)\n target[\"labels\"] = target[\"labels\"].repeat(k_one2many)\n\n outputs_one2many = dict()\n outputs_one2many[\"pred_logits\"] = outputs[\"pred_logits_one2many\"]\n outputs_one2many[\"pred_boxes\"] = outputs[\"pred_boxes_one2many\"]\n outputs_one2many[\"aux_outputs\"] = outputs[\"aux_outputs_one2many\"]\n\n # one-to-many loss\n loss_dict_one2many = criterion(outputs_one2many, multi_targets)\n for key, value in loss_dict_one2many.items():\n if key + \"_one2many\" in loss_dict.keys():\n loss_dict[key + \"_one2many\"] += value * lambda_one2many\n else:\n loss_dict[key + \"_one2many\"] = value * lambda_one2many\n return loss_dict\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_coord):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [\n {\"pred_logits\": a, \"pred_boxes\": b}\n for a, b in zip(outputs_class[:-1], outputs_coord[:-1])\n ]\n\n def inference(self, box_cls, box_pred, image_sizes):\n \"\"\"\n Arguments:\n box_cls (Tensor): tensor of shape (batch_size, num_queries, K).\n The tensor predicts the classification probability for each query.\n box_pred (Tensor): tensors of shape (batch_size, num_queries, 4).\n The tensor predicts 4-vector (x,y,w,h) box\n regression values for every queryx\n image_sizes (List[torch.Size]): the input image sizes\n\n Returns:\n results (List[Instances]): a list of #images elements.\n \"\"\"\n assert len(box_cls) == len(image_sizes)\n results = []\n\n # Select top-k confidence boxes for inference\n prob = box_cls.sigmoid()\n topk_values, topk_indexes = torch.topk(\n prob.view(box_cls.shape[0], -1), self.select_box_nums_for_evaluation, dim=1\n )\n scores = topk_values\n topk_boxes = torch.div(topk_indexes, box_cls.shape[2], rounding_mode=\"floor\")\n labels = topk_indexes % box_cls.shape[2]\n\n boxes = torch.gather(box_pred, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n\n for (\n i,\n (scores_per_image, labels_per_image, box_pred_per_image, image_size),\n ) in enumerate(zip(scores, labels, boxes, image_sizes)):\n result = Instances(image_size)\n result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image))\n result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0])\n result.scores = scores_per_image\n result.pred_classes = labels_per_image\n results.append(result)\n return results\n\n def prepare_targets(self, targets):\n new_targets = []\n for targets_per_image in targets:\n h, w = targets_per_image.image_size\n image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device)\n gt_classes = targets_per_image.gt_classes\n gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy\n gt_boxes = box_xyxy_to_cxcywh(gt_boxes)\n new_targets.append({\"labels\": gt_classes, \"boxes\": gt_boxes})\n return new_targets\n\n def preprocess_image(self, batched_inputs):\n images = [self.normalizer(x[\"image\"].to(self.device)) for x in batched_inputs]\n images = ImageList.from_tensors(images)\n return images" }, { "identifier": "DeformableCriterion", "path": "projects/h_deformable_detr/modeling/deformable_criterion.py", "snippet": "class DeformableCriterion(SetCriterion):\n \"\"\"This class computes the loss for Deformable-DETR\n and two-stage Deformable-DETR\n \"\"\"\n\n def __init__(\n self,\n num_classes,\n matcher,\n weight_dict,\n losses: List[str] = [\"class\", \"boxes\"],\n eos_coef: float = 0.1,\n loss_class_type: str = \"focal_loss\",\n alpha: float = 0.25,\n gamma: float = 2.0,\n ):\n super(DeformableCriterion, self).__init__(\n num_classes=num_classes,\n matcher=matcher,\n weight_dict=weight_dict,\n losses=losses,\n eos_coef=eos_coef,\n loss_class_type=loss_class_type,\n alpha=alpha,\n gamma=gamma,\n )\n\n def forward(self, outputs, targets):\n outputs_without_aux = {\n k: v for k, v in outputs.items() if k != \"aux_outputs\" and k != \"enc_outputs\"\n }\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor(\n [num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n kwargs = {}\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes, **kwargs))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if \"aux_outputs\" in outputs:\n for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n # Compute losses for two-stage deformable-detr\n if \"enc_outputs\" in outputs:\n enc_outputs = outputs[\"enc_outputs\"]\n bin_targets = copy.deepcopy(targets)\n for bt in bin_targets:\n bt[\"labels\"] = torch.zeros_like(bt[\"labels\"])\n indices = self.matcher(enc_outputs, bin_targets)\n for loss in self.losses:\n l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs)\n l_dict = {k + \"_enc\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses" } ]
import torch.nn as nn from detectron2.modeling.backbone import ResNet, BasicStem from detectron2.layers import ShapeSpec from detectron2.config import LazyCall as L from detrex.modeling.matcher import HungarianMatcher from detrex.modeling.neck import ChannelMapper from detrex.layers import PositionEmbeddingSine from projects.h_deformable_detr.modeling import ( HDeformableDETR, HDeformableDetrTransformerEncoder, HDeformableDetrTransformerDecoder, HDeformableDetrTransformer, DeformableCriterion, )
13,751
model = L(HDeformableDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(HDeformableDetrTransformer)(
model = L(HDeformableDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(HDeformableDetrTransformer)(
encoder=L(HDeformableDetrTransformerEncoder)(
3
2023-10-12 03:02:25+00:00
16k
ByungKwanLee/Full-Segment-Anything
mask_generator.py
[ { "identifier": "Sam", "path": "modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n \n\n # Batch Individual Mask Generation by LBK\n @torch.no_grad()\n def individual_forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n is_low_resol: bool = False,\n ) -> List[Dict[str, torch.Tensor]]:\n \n input_images = torch.stack([self.lbk_preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n refined_mask_outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Progressing Intergraion.. by LBK\n refined_masks = self.postprocess_small_regions(low_res_masks, iou_predictions, *input_images.shape[2:], is_low_resol)\n if not is_low_resol:\n refined_masks = F.interpolate(\n refined_masks.unsqueeze(1).float(),\n input_images.shape[2:],\n mode=\"bilinear\",\n align_corners=False,\n ).squeeze(1).bool()\n refined_mask_outputs.append(refined_masks)\n \n return refined_mask_outputs\n \n # PostProcess by LBK EDIT\n def postprocess_small_regions(self, masks, iou_predictions, orig_h, orig_w, is_low_resol):\n\n\n \"\"\"\n Configuration\n \"\"\"\n # pred_iou_thresh = 0.85\n # stability_score_offset = 1.0\n # stability_score_thresh = 0.85\n # box_nms_thresh = 0.7\n\n\n pred_iou_thresh = 0.7\n stability_score_offset = 1.0\n stability_score_thresh = 0.7\n box_nms_thresh = 0.7\n\n # Interpolation\n if not is_low_resol:\n masks = F.interpolate(\n masks,\n (orig_h, orig_w),\n mode=\"bilinear\",\n align_corners=False,\n )\n else:\n orig_h, orig_w = masks.shape[2:]\n\n # Serialize predictions and store in MaskData\n data = MaskData(\n masks=masks.flatten(0, 1),\n iou_preds=iou_predictions.flatten(0, 1), \n )\n\n # Filter by predicted IoU\n if pred_iou_thresh > 0.0:\n keep_mask = data[\"iou_preds\"] > pred_iou_thresh\n data.filter(keep_mask)\n\n # Calculate stability score\n data[\"stability_score\"] = calculate_stability_score(\n data[\"masks\"], self.mask_threshold, stability_score_offset\n )\n if stability_score_thresh > 0.0:\n keep_mask = data[\"stability_score\"] >= stability_score_thresh\n data.filter(keep_mask)\n\n # Threshold masks and calculate boxes\n data[\"masks\"] = data[\"masks\"] > self.mask_threshold\n data[\"boxes\"] = batched_mask_to_box(data[\"masks\"])\n\n # Filter boxes that touch crop boundaries\n keep_mask = ~is_box_near_crop_edge(data[\"boxes\"], [0, 0, orig_w, orig_h], [0, 0, orig_w, orig_h])\n if not torch.all(keep_mask):\n data.filter(keep_mask)\n data['masks'] = uncrop_masks(data[\"masks\"], [0, 0, orig_w, orig_h], orig_h, orig_w)\n\n # Remove duplicates within this crop.\n keep_by_nms = batched_nms(\n data[\"boxes\"].float(),\n data[\"iou_preds\"],\n torch.zeros_like(data[\"boxes\"][:, 0]), # categories\n iou_threshold=box_nms_thresh,\n )\n data.filter(keep_by_nms)\n\n # making masks\n return data['masks']\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n \n # by lbk edit\n def lbk_preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n return x" }, { "identifier": "SamPredictor", "path": "predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert self.features is not None, \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from modeling import Sam from predictor import SamPredictor from utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
11,115
layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2]
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2]
crop_boxes, layer_idxs = generate_crop_boxes(
10
2023-10-13 20:07:42+00:00
16k
sakemin/cog-musicgen-remixer
audiocraft/modules/conditioners.py
[ { "identifier": "ChromaExtractor", "path": "audiocraft/modules/chroma.py", "snippet": "class ChromaExtractor(nn.Module):\n \"\"\"Chroma extraction and quantization.\n\n Args:\n sample_rate (int): Sample rate for the chroma extraction.\n n_chroma (int): Number of chroma bins for the chroma extraction.\n radix2_exp (int): Size of stft window for the chroma extraction (power of 2, e.g. 12 -> 2^12).\n nfft (int, optional): Number of FFT.\n winlen (int, optional): Window length.\n winhop (int, optional): Window hop size.\n argmax (bool, optional): Whether to use argmax. Defaults to False.\n norm (float, optional): Norm for chroma normalization. Defaults to inf.\n \"\"\"\n def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12, nfft: tp.Optional[int] = None,\n winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None, argmax: bool = False,\n norm: float = torch.inf):\n super().__init__()\n self.winlen = winlen or 2 ** radix2_exp\n self.nfft = nfft or self.winlen\n self.winhop = winhop or (self.winlen // 4)\n self.sample_rate = sample_rate\n self.n_chroma = n_chroma\n self.norm = norm\n self.argmax = argmax\n self.register_buffer('fbanks', torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0,\n n_chroma=self.n_chroma)), persistent=False)\n self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen,\n hop_length=self.winhop, power=2, center=True,\n pad=0, normalized=True)\n\n def forward(self, wav: torch.Tensor) -> torch.Tensor:\n T = wav.shape[-1]\n # in case we are getting a wav that was dropped out (nullified)\n # from the conditioner, make sure wav length is no less that nfft\n if T < self.nfft:\n pad = self.nfft - T\n r = 0 if pad % 2 == 0 else 1\n wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0)\n assert wav.shape[-1] == self.nfft, f\"expected len {self.nfft} but got {wav.shape[-1]}\"\n\n spec = self.spec(wav).squeeze(1)\n raw_chroma = torch.einsum('cf,...ft->...ct', self.fbanks, spec)\n norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6)\n norm_chroma = rearrange(norm_chroma, 'b d t -> b t d')\n\n if self.argmax:\n idx = norm_chroma.argmax(-1, keepdim=True)\n norm_chroma[:] = 0\n norm_chroma.scatter_(dim=-1, index=idx, value=1)\n\n return norm_chroma" }, { "identifier": "ChordExtractor", "path": "audiocraft/modules/chord_chroma.py", "snippet": "class ChordExtractor(nn.Module):\n\n def __init__(self, device, sample_rate, max_duration, chroma_len, n_chroma, winhop):\n super().__init__()\n self.config = HParams.load(\"/src/audiocraft/modules/btc/run_config.yaml\") #gotta specify the path for run_config.yaml of btc\n\n # self.config.feature['large_voca'] = False\n # self.config.model['num_chords'] = 25\n\n self.model_file = '/src/audiocraft/modules/btc/test/btc_model_large_voca.pt'\n # self.model_file = 'audiocraft/modules/btc/test/btc_model.pt'\n self.idx_to_chord = idx2voca_chord()\n self.sr = sample_rate\n\n self.n_chroma = n_chroma\n self.max_duration = max_duration\n self.chroma_len = chroma_len\n self.to_timebin = self.max_duration/self.chroma_len\n self.timebin = winhop\n\n self.chords = chords.Chords()\n self.device = device\n\n self.denoise_window_size = 7\n self.denoise_threshold = 0.5\n \n self.model = BTC_model(config=self.config.model).to(device)\n if os.path.isfile(self.model_file):\n checkpoint = torch.load(self.model_file)\n self.mean = checkpoint['mean']\n self.std = checkpoint['std']\n self.model.load_state_dict(checkpoint['model'])\n\n def forward(self, wavs:torch.Tensor) -> torch.Tensor:\n sr = self.config.mp3['song_hz']\n chromas = []\n for wav in wavs:\n original_wav = librosa.resample(wav.cpu().numpy(), orig_sr=self.sr, target_sr=sr)\n original_wav = original_wav.squeeze(0)\n # print(original_wav.shape)\n T = original_wav.shape[-1]\n # in case we are getting a wav that was dropped out (nullified)\n # from the conditioner, make sure wav length is no less that nfft\n if T < self.timebin//4:\n pad = self.timebin//4 - T\n r = 0 if pad % 2 == 0 else 1\n original_wav = F.pad(torch.Tensor(original_wav), (pad // 2, pad // 2 + r), 'constant', 0)\n original_wav = original_wav.numpy()\n assert original_wav.shape[-1] == self.timebin//4, f\"expected len {self.timebin//4} but got {original_wav.shape[-1]}\"\n # print(original_wav.shape)\n #preprocess\n currunt_sec_hz = 0\n\n while len(original_wav) > currunt_sec_hz + self.config.mp3['song_hz'] * self.config.mp3['inst_len']:\n start_idx = int(currunt_sec_hz)\n end_idx = int(currunt_sec_hz + self.config.mp3['song_hz'] * self.config.mp3['inst_len'])\n tmp = librosa.cqt(original_wav[start_idx:end_idx], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n if start_idx == 0:\n feature = tmp\n else:\n feature = np.concatenate((feature, tmp), axis=1)\n currunt_sec_hz = end_idx\n \n if currunt_sec_hz == 0:\n feature = librosa.cqt(original_wav[currunt_sec_hz:], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n else:\n tmp = librosa.cqt(original_wav[currunt_sec_hz:], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n feature = np.concatenate((feature, tmp), axis=1)\n # print(feature.shape)\n feature = np.log(np.abs(feature) + 1e-6)\n # print(feature)\n feature_per_second = self.config.mp3['inst_len'] / self.config.model['timestep']\n song_length_second = len(original_wav)/self.config.mp3['song_hz']\n\n feature = feature.T\n feature = (feature - self.mean)/self.std\n\n time_unit = feature_per_second\n n_timestep = self.config.model['timestep']\n\n num_pad = n_timestep - (feature.shape[0] % n_timestep)\n feature = np.pad(feature, ((0, num_pad), (0, 0)), mode=\"constant\", constant_values=0)\n num_instance = feature.shape[0] // n_timestep\n\n #inference\n start_time = 0.0\n lines = []\n with torch.no_grad():\n self.model.eval()\n feature = torch.tensor(feature, dtype=torch.float32).unsqueeze(0).to(self.device)\n for t in range(num_instance):\n self_attn_output, _ = self.model.self_attn_layers(feature[:, n_timestep * t:n_timestep * (t + 1), :])\n prediction, _ = self.model.output_layer(self_attn_output)\n prediction = prediction.squeeze()\n for i in range(n_timestep):\n if t == 0 and i == 0:\n prev_chord = prediction[i].item()\n continue\n if prediction[i].item() != prev_chord:\n lines.append(\n '%.3f %.3f %s\\n' % (start_time, time_unit * (n_timestep * t + i), self.idx_to_chord[prev_chord]))\n start_time = time_unit * (n_timestep * t + i)\n prev_chord = prediction[i].item()\n if t == num_instance - 1 and i + num_pad == n_timestep:\n if start_time != time_unit * (n_timestep * t + i):\n lines.append('%.3f %.3f %s\\n' % (start_time, time_unit * (n_timestep * t + i), self.idx_to_chord[prev_chord]))\n break\n\n strlines = ''.join(lines)\n\n chroma = []\n\n count = 0\n for line in lines:\n if count >= self.chroma_len: \n break\n splits = line.split()\n if len(splits) == 3:\n s = splits[0]\n e = splits[1]\n l = splits[2]\n\n crd = self.chords.chord(l)\n \n if crd[0] == -1:\n multihot = torch.Tensor(crd[2])\n else:\n multihot = torch.concat([torch.Tensor(crd[2])[-crd[0]:],torch.Tensor(crd[2])[:-crd[0]]])\n start_bin = round(float(s)/self.to_timebin)\n end_bin = round(float(e)/self.to_timebin)\n for j in range(start_bin,end_bin):\n if count >= self.chroma_len: \n break\n chroma.append(multihot)\n count += 1\n \n chroma = torch.stack(chroma, dim=0)\n\n # Denoising chroma\n kernel = torch.ones(self.denoise_window_size)/self.denoise_window_size\n\n filtered_signals = []\n for i in range(chroma.shape[-1]):\n filtered_signals.append(torch.nn.functional.conv1d(chroma[...,i].unsqueeze(0),\n kernel.unsqueeze(0).unsqueeze(0).to(chroma.device), \n padding=(self.denoise_window_size - 1) // 2))\n filtered_signals = torch.stack(filtered_signals, dim=-1)\n filtered_signals = filtered_signals > self.denoise_threshold\n\n chromas.append(filtered_signals.squeeze(0))\n \n return torch.stack(chromas, dim=0).to(self.device)" }, { "identifier": "StreamingModule", "path": "audiocraft/modules/streaming.py", "snippet": "class StreamingModule(nn.Module):\n \"\"\"Common API for streaming components.\n\n Each streaming component has a streaming state, which is just a dict[str, Tensor].\n By convention, the first dim of each tensor must be the batch size.\n Don't use dots in the key names, as this would clash with submodules\n (like in state_dict).\n\n If `self._is_streaming` is True, the component should use and remember\n the proper state inside `self._streaming_state`.\n\n To set a streaming component in streaming state, use\n\n with module.streaming():\n ...\n\n This will automatically reset the streaming state when exiting the context manager.\n This also automatically propagates to all streaming children module.\n\n Some module might also implement the `StreamingModule.flush` method, although\n this one is trickier, as all parents module must be StreamingModule and implement\n it as well for it to work properly. See `StreamingSequential` after.\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n self._streaming_state: State = {}\n self._is_streaming = False\n\n def _apply_named_streaming(self, fn: tp.Any):\n for name, module in self.named_modules():\n if isinstance(module, StreamingModule):\n fn(name, module)\n\n def _set_streaming(self, streaming: bool):\n def _set_streaming(name, module):\n module._is_streaming = streaming\n self._apply_named_streaming(_set_streaming)\n\n @contextmanager\n def streaming(self):\n \"\"\"Context manager to enter streaming mode. Reset streaming state on exit.\"\"\"\n self._set_streaming(True)\n try:\n yield\n finally:\n self._set_streaming(False)\n self.reset_streaming()\n\n def reset_streaming(self):\n \"\"\"Reset the streaming state.\"\"\"\n def _reset(name: str, module: StreamingModule):\n module._streaming_state.clear()\n\n self._apply_named_streaming(_reset)\n\n def get_streaming_state(self) -> State:\n \"\"\"Return the streaming state, including that of sub-modules.\"\"\"\n state: State = {}\n\n def _add(name: str, module: StreamingModule):\n if name:\n name += \".\"\n for key, value in module._streaming_state.items():\n state[name + key] = value\n\n self._apply_named_streaming(_add)\n return state\n\n def set_streaming_state(self, state: State):\n \"\"\"Set the streaming state, including that of sub-modules.\"\"\"\n state = dict(state)\n\n def _set(name: str, module: StreamingModule):\n if name:\n name += \".\"\n module._streaming_state.clear()\n for key, value in list(state.items()):\n # complexity is not ideal here, but probably fine.\n if key.startswith(name):\n local_key = key[len(name):]\n if '.' not in local_key:\n module._streaming_state[local_key] = value\n del state[key]\n\n self._apply_named_streaming(_set)\n assert len(state) == 0, list(state.keys())\n\n def flush(self, x: tp.Optional[torch.Tensor] = None):\n \"\"\"Flush any remaining outputs that were waiting for completion.\n Typically, for convolutions, this will add the final padding\n and process the last buffer.\n\n This should take an optional argument `x`, which will be provided\n if a module before this one in the streaming pipeline has already\n spitted out a flushed out buffer.\n \"\"\"\n if x is None:\n return None\n else:\n return self(x)" }, { "identifier": "create_sin_embedding", "path": "audiocraft/modules/transformer.py", "snippet": "def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000,\n dtype: torch.dtype = torch.float32) -> torch.Tensor:\n \"\"\"Create sinusoidal positional embedding, with shape `[B, T, C]`.\n\n Args:\n positions (torch.Tensor): LongTensor of positions.\n dim (int): Dimension of the embedding.\n max_period (float): Maximum period of the cosine/sine functions.\n dtype (torch.dtype or str): dtype to use to generate the embedding.\n Returns:\n torch.Tensor: Sinusoidal positional embedding.\n \"\"\"\n # We aim for BTC format\n assert dim % 2 == 0\n half_dim = dim // 2\n positions = positions.to(dtype)\n adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1)\n max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point\n phase = positions / (max_period_tensor ** (adim / (half_dim - 1)))\n return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1)" }, { "identifier": "audio_read", "path": "audiocraft/data/audio.py", "snippet": "def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0.,\n duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]:\n \"\"\"Read audio by picking the most appropriate backend tool based on the audio format.\n\n Args:\n filepath (str or Path): Path to audio file to read.\n seek_time (float): Time at which to start reading in the file.\n duration (float): Duration to read from the file. If set to -1, the whole file is read.\n pad (bool): Pad output audio if not reaching expected duration.\n Returns:\n tuple of torch.Tensor, int: Tuple containing audio data and sample rate.\n \"\"\"\n fp = Path(filepath)\n if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg\n # There is some bug with ffmpeg and reading flac\n info = _soundfile_info(filepath)\n frames = -1 if duration <= 0 else int(duration * info.sample_rate)\n frame_offset = int(seek_time * info.sample_rate)\n wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32)\n assert info.sample_rate == sr, f\"Mismatch of sample rates {info.sample_rate} {sr}\"\n wav = torch.from_numpy(wav).t().contiguous()\n if len(wav.shape) == 1:\n wav = torch.unsqueeze(wav, 0)\n else:\n wav, sr = _av_read(filepath, seek_time, duration)\n if pad and duration > 0:\n expected_frames = int(duration * sr)\n wav = F.pad(wav, (0, expected_frames - wav.shape[-1]))\n return wav, sr" }, { "identifier": "SegmentInfo", "path": "audiocraft/data/audio_dataset.py", "snippet": "class SegmentInfo(BaseInfo):\n meta: AudioMeta\n seek_time: float\n # The following values are given once the audio is processed, e.g.\n # at the target sample rate and target number of channels.\n n_frames: int # actual number of frames without padding\n total_frames: int # total number of frames, padding included\n sample_rate: int # actual sample rate\n channels: int # number of audio channels." }, { "identifier": "convert_audio", "path": "audiocraft/data/audio_utils.py", "snippet": "def convert_audio(wav: torch.Tensor, from_rate: float,\n to_rate: float, to_channels: int) -> torch.Tensor:\n \"\"\"Convert audio to new sample rate and number of audio channels.\"\"\"\n wav = julius.resample_frac(wav, int(from_rate), int(to_rate))\n wav = convert_audio_channels(wav, to_channels)\n return wav" }, { "identifier": "AudioCraftEnvironment", "path": "audiocraft/environment.py", "snippet": "class AudioCraftEnvironment:\n \"\"\"Environment configuration for teams and clusters.\n\n AudioCraftEnvironment picks compute cluster settings (slurm, dora) from the current running environment\n or declared variable and the loaded team configuration. Additionally, the AudioCraftEnvironment\n provides pointers to a reference folder resolved automatically across clusters that is shared across team members,\n allowing to share sigs or other files to run jobs. Finally, it provides dataset mappers to automatically\n map dataset file paths to new locations across clusters, allowing to use the same manifest of files across cluters.\n\n The cluster type is identified automatically and base configuration file is read from config/teams.yaml.\n Use the following environment variables to specify the cluster, team or configuration:\n\n AUDIOCRAFT_CLUSTER (optional): Cluster type to enforce. Useful if the cluster type\n cannot be inferred automatically.\n AUDIOCRAFT_CONFIG (optional): Path to yaml config holding the teams configuration.\n If not set, configuration is read from config/teams.yaml.\n AUDIOCRAFT_TEAM (optional): Name of the team. Recommended to set to your own team.\n Cluster configuration are shared across teams to match compute allocation,\n specify your cluster configuration in the configuration file under a key mapping\n your team name.\n \"\"\"\n _instance = None\n DEFAULT_TEAM = \"default\"\n\n def __init__(self) -> None:\n \"\"\"Loads configuration.\"\"\"\n self.team: str = os.getenv(\"AUDIOCRAFT_TEAM\", self.DEFAULT_TEAM)\n cluster_type = _guess_cluster_type()\n cluster = os.getenv(\n \"AUDIOCRAFT_CLUSTER\", cluster_type.value\n )\n logger.info(\"Detecting cluster type %s\", cluster_type)\n\n self.cluster: str = cluster\n\n config_path = os.getenv(\n \"AUDIOCRAFT_CONFIG\",\n Path(__file__)\n .parent.parent.joinpath(\"config/teams\", self.team)\n .with_suffix(\".yaml\"),\n )\n self.config = omegaconf.OmegaConf.load(config_path)\n self._dataset_mappers = []\n cluster_config = self._get_cluster_config()\n if \"dataset_mappers\" in cluster_config:\n for pattern, repl in cluster_config[\"dataset_mappers\"].items():\n regex = re.compile(pattern)\n self._dataset_mappers.append((regex, repl))\n\n def _get_cluster_config(self) -> omegaconf.DictConfig:\n assert isinstance(self.config, omegaconf.DictConfig)\n return self.config[self.cluster]\n\n @classmethod\n def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance\n\n @classmethod\n def reset(cls):\n \"\"\"Clears the environment and forces a reload on next invocation.\"\"\"\n cls._instance = None\n\n @classmethod\n def get_team(cls) -> str:\n \"\"\"Gets the selected team as dictated by the AUDIOCRAFT_TEAM env var.\n If not defined, defaults to \"labs\".\n \"\"\"\n return cls.instance().team\n\n @classmethod\n def get_cluster(cls) -> str:\n \"\"\"Gets the detected cluster.\n This value can be overridden by the AUDIOCRAFT_CLUSTER env var.\n \"\"\"\n return cls.instance().cluster\n\n @classmethod\n def get_dora_dir(cls) -> Path:\n \"\"\"Gets the path to the dora directory for the current team and cluster.\n Value is overridden by the AUDIOCRAFT_DORA_DIR env var.\n \"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n dora_dir = os.getenv(\"AUDIOCRAFT_DORA_DIR\", cluster_config[\"dora_dir\"])\n logger.warning(f\"Dora directory: {dora_dir}\")\n return Path(dora_dir)\n\n @classmethod\n def get_reference_dir(cls) -> Path:\n \"\"\"Gets the path to the reference directory for the current team and cluster.\n Value is overridden by the AUDIOCRAFT_REFERENCE_DIR env var.\n \"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n return Path(os.getenv(\"AUDIOCRAFT_REFERENCE_DIR\", cluster_config[\"reference_dir\"]))\n\n @classmethod\n def get_slurm_exclude(cls) -> tp.Optional[str]:\n \"\"\"Get the list of nodes to exclude for that cluster.\"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n return cluster_config.get(\"slurm_exclude\")\n\n @classmethod\n def get_slurm_partitions(cls, partition_types: tp.Optional[tp.List[str]] = None) -> str:\n \"\"\"Gets the requested partitions for the current team and cluster as a comma-separated string.\n\n Args:\n partition_types (list[str], optional): partition types to retrieve. Values must be\n from ['global', 'team']. If not provided, the global partition is returned.\n \"\"\"\n if not partition_types:\n partition_types = [\"global\"]\n\n cluster_config = cls.instance()._get_cluster_config()\n partitions = [\n cluster_config[\"partitions\"][partition_type]\n for partition_type in partition_types\n ]\n return \",\".join(partitions)\n\n @classmethod\n def resolve_reference_path(cls, path: tp.Union[str, Path]) -> Path:\n \"\"\"Converts reference placeholder in path with configured reference dir to resolve paths.\n\n Args:\n path (str or Path): Path to resolve.\n Returns:\n Path: Resolved path.\n \"\"\"\n path = str(path)\n\n if path.startswith(\"//reference\"):\n reference_dir = cls.get_reference_dir()\n logger.warn(f\"Reference directory: {reference_dir}\")\n assert (\n reference_dir.exists() and reference_dir.is_dir()\n ), f\"Reference directory does not exist: {reference_dir}.\"\n path = re.sub(\"^//reference\", str(reference_dir), path)\n\n return Path(path)\n\n @classmethod\n def apply_dataset_mappers(cls, path: str) -> str:\n \"\"\"Applies dataset mapping regex rules as defined in the configuration.\n If no rules are defined, the path is returned as-is.\n \"\"\"\n instance = cls.instance()\n\n for pattern, repl in instance._dataset_mappers:\n path = pattern.sub(repl, path)\n\n return path" }, { "identifier": "ResidualVectorQuantizer", "path": "audiocraft/quantization/vq.py", "snippet": "class ResidualVectorQuantizer(BaseQuantizer):\n \"\"\"Residual Vector Quantizer.\n\n Args:\n dimension (int): Dimension of the codebooks.\n n_q (int): Number of residual vector quantizers used.\n q_dropout (bool): Random quantizer drop out at train time.\n bins (int): Codebook size.\n decay (float): Decay for exponential moving average over the codebooks.\n kmeans_init (bool): Whether to use kmeans to initialize the codebooks.\n kmeans_iters (int): Number of iterations used for kmeans initialization.\n threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes\n that have an exponential moving average cluster size less than the specified threshold with\n randomly selected vector from the current batch.\n orthogonal_reg_weight (float): Orthogonal regularization weights.\n orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.\n orthogonal_reg_max_codes (optional int): Maximum number of codes to consider.\n for orthogonal regularization.\n \"\"\"\n def __init__(\n self,\n dimension: int = 256,\n n_q: int = 8,\n q_dropout: bool = False,\n bins: int = 1024,\n decay: float = 0.99,\n kmeans_init: bool = True,\n kmeans_iters: int = 10,\n threshold_ema_dead_code: int = 2,\n orthogonal_reg_weight: float = 0.0,\n orthogonal_reg_active_codes_only: bool = False,\n orthogonal_reg_max_codes: tp.Optional[int] = None,\n ):\n super().__init__()\n self.max_n_q = n_q\n self.n_q = n_q\n self.q_dropout = q_dropout\n self.dimension = dimension\n self.bins = bins\n self.decay = decay\n self.kmeans_init = kmeans_init\n self.kmeans_iters = kmeans_iters\n self.threshold_ema_dead_code = threshold_ema_dead_code\n self.orthogonal_reg_weight = orthogonal_reg_weight\n self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only\n self.orthogonal_reg_max_codes = orthogonal_reg_max_codes\n self.vq = ResidualVectorQuantization(\n dim=self.dimension,\n codebook_size=self.bins,\n num_quantizers=self.n_q,\n decay=self.decay,\n kmeans_init=self.kmeans_init,\n kmeans_iters=self.kmeans_iters,\n threshold_ema_dead_code=self.threshold_ema_dead_code,\n orthogonal_reg_weight=self.orthogonal_reg_weight,\n orthogonal_reg_active_codes_only=self.orthogonal_reg_active_codes_only,\n orthogonal_reg_max_codes=self.orthogonal_reg_max_codes,\n channels_last=False\n )\n\n def forward(self, x: torch.Tensor, frame_rate: int):\n n_q = self.n_q\n if self.training and self.q_dropout:\n n_q = int(torch.randint(1, self.n_q + 1, (1,)).item())\n bw_per_q = math.log2(self.bins) * frame_rate / 1000\n quantized, codes, commit_loss = self.vq(x, n_q=n_q)\n codes = codes.transpose(0, 1)\n # codes is [B, K, T], with T frames, K nb of codebooks.\n bw = torch.tensor(n_q * bw_per_q).to(x)\n return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss))\n\n def encode(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Encode a given input tensor with the specified frame rate at the given bandwidth.\n The RVQ encode method sets the appropriate number of quantizer to use\n and returns indices for each quantizer.\n \"\"\"\n n_q = self.n_q\n codes = self.vq.encode(x, n_q=n_q)\n codes = codes.transpose(0, 1)\n # codes is [B, K, T], with T frames, K nb of codebooks.\n return codes\n\n def decode(self, codes: torch.Tensor) -> torch.Tensor:\n \"\"\"Decode the given codes to the quantized representation.\"\"\"\n # codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T].\n codes = codes.transpose(0, 1)\n quantized = self.vq.decode(codes)\n return quantized\n\n @property\n def total_codebooks(self):\n return self.max_n_q\n\n @property\n def num_codebooks(self):\n return self.n_q\n\n def set_num_codebooks(self, n: int):\n assert n > 0 and n <= self.max_n_q\n self.n_q = n" }, { "identifier": "TorchAutocast", "path": "audiocraft/utils/autocast.py", "snippet": "class TorchAutocast:\n \"\"\"TorchAutocast utility class.\n Allows you to enable and disable autocast. This is specially useful\n when dealing with different architectures and clusters with different\n levels of support.\n\n Args:\n enabled (bool): Whether to enable torch.autocast or not.\n args: Additional args for torch.autocast.\n kwargs: Additional kwargs for torch.autocast\n \"\"\"\n def __init__(self, enabled: bool, *args, **kwargs):\n self.autocast = torch.autocast(*args, **kwargs) if enabled else None\n\n def __enter__(self):\n if self.autocast is None:\n return\n try:\n self.autocast.__enter__()\n except RuntimeError:\n device = self.autocast.device\n dtype = self.autocast.fast_dtype\n raise RuntimeError(\n f\"There was an error autocasting with dtype={dtype} device={device}\\n\"\n \"If you are on the FAIR Cluster, you might need to use autocast_dtype=float16\"\n )\n\n def __exit__(self, *args, **kwargs):\n if self.autocast is None:\n return\n self.autocast.__exit__(*args, **kwargs)" }, { "identifier": "EmbeddingCache", "path": "audiocraft/utils/cache.py", "snippet": "class EmbeddingCache:\n \"\"\"Cache around embeddings computation for faster execution.\n The EmbeddingCache is storing pre-computed embeddings on disk and provides a simple API\n to retrieve the pre-computed embeddings on full inputs and extract only a given chunk\n using a user-provided function. When the cache is warm (all embeddings are pre-computed),\n the EmbeddingCache allows for faster training as it removes the need of computing the embeddings.\n Additionally, it provides in-memory cache around the loaded embeddings to limit IO footprint\n and synchronization points in the forward calls.\n\n Args:\n cache_path (Path): Path to folder where all pre-computed embeddings are saved on disk.\n device (str or torch.device): Device on which the embedding is returned.\n compute_embed_fn (callable[[Path, any, int], torch.Tensor], optional): Function to compute\n the embedding from a given object and path. This user provided function can compute the\n embedding from the provided object or using the provided path as entry point. The last parameter\n specify the index corresponding to the current embedding in the object that can represent batch metadata.\n extract_embed_fn (callable[[torch.Tensor, any, int], torch.Tensor], optional): Function to extract\n the desired embedding chunk from the full embedding loaded from the cache. The last parameter\n specify the index corresponding to the current embedding in the object that can represent batch metadata.\n If not specified, will return the full embedding unmodified.\n \"\"\"\n def __init__(self, cache_path: tp.Union[str, Path], device: tp.Union[str, torch.device],\n compute_embed_fn: tp.Callable[[Path, tp.Any, int], torch.Tensor],\n extract_embed_fn: tp.Optional[tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]] = None):\n self.cache_path = Path(cache_path)\n self.device = device\n self._compute_embed_fn = compute_embed_fn\n self._extract_embed_fn: tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]\n if extract_embed_fn is not None:\n self._extract_embed_fn = extract_embed_fn\n else:\n self._extract_embed_fn = partial(get_full_embed, device=device)\n if self.cache_path is not None:\n self.cache_path.mkdir(exist_ok=True, parents=True)\n logger.info(f\"Cache instantiated at: {self.cache_path}\")\n self.pool = ThreadPoolExecutor(8)\n self.pool.__enter__()\n self._current_batch_cache: dict = {}\n self._memory_cache: dict = {}\n\n def _get_cache_path(self, path: tp.Union[Path, str]):\n \"\"\"Get cache path for the given file path.\"\"\"\n sig = sha1(str(path).encode()).hexdigest()\n return self.cache_path / sig\n\n @staticmethod\n def _get_full_embed_from_cache(cache: Path):\n \"\"\"Loads full pre-computed embedding from the cache.\"\"\"\n try:\n embed = torch.load(cache, 'cpu')\n except Exception as exc:\n logger.error(\"Error loading %s: %r\", cache, exc)\n embed = None\n return embed\n\n def get_embed_from_cache(self, paths: tp.List[Path], x: tp.Any) -> torch.Tensor:\n \"\"\"Get embedding from cache, computing and storing it to cache if not already cached.\n The EmbeddingCache first tries to load the embedding from the in-memory cache\n containing the pre-computed chunks populated through `populate_embed_cache`.\n If not found, the full embedding is computed and stored on disk to be later accessed\n to populate the in-memory cache, and the desired embedding chunk is extracted and returned.\n\n Args:\n paths (list[Path or str]): List of paths from where the embeddings can be loaded.\n x (any): Object from which the embedding is extracted.\n \"\"\"\n embeds = []\n for idx, path in enumerate(paths):\n cache = self._get_cache_path(path)\n if cache in self._current_batch_cache:\n embed = self._current_batch_cache[cache]\n else:\n full_embed = self._compute_embed_fn(path, x, idx)\n try:\n with flashy.utils.write_and_rename(cache, pid=True) as f:\n torch.save(full_embed.cpu(), f)\n except Exception as exc:\n logger.error('Error saving embed %s (%s): %r', cache, full_embed.shape, exc)\n else:\n logger.info('New embed cache saved: %s (%s)', cache, full_embed.shape)\n embed = self._extract_embed_fn(full_embed, x, idx)\n embeds.append(embed)\n embed = torch.stack(embeds, dim=0)\n return embed\n\n def populate_embed_cache(self, paths: tp.List[Path], x: tp.Any) -> None:\n \"\"\"Populate in-memory caches for embeddings reading from the embeddings stored on disk.\n The in-memory caches consist in a cache for the full embedding and another cache for the\n final embedding chunk. Such caches are used to limit the IO access when computing the actual embeddings\n and reduce the IO footprint and synchronization points during forward passes.\n\n Args:\n paths (list[Path]): List of paths from where the embeddings can be loaded.\n x (any): Object from which the embedding is extracted.\n \"\"\"\n self._current_batch_cache.clear()\n if self.cache_path is not None:\n futures: list = []\n for path in paths:\n assert path is not None, \"Path is required for computation from cache\"\n cache = self._get_cache_path(path)\n if cache in self._memory_cache or not cache.exists():\n futures.append(None)\n else:\n futures.append(self.pool.submit(EmbeddingCache._get_full_embed_from_cache, cache))\n for idx, (path, future) in enumerate(zip(paths, futures)):\n assert path is not None\n cache = self._get_cache_path(path)\n full_embed = None\n if future is None:\n if cache in self._memory_cache:\n full_embed = self._memory_cache[cache]\n else:\n full_embed = future.result()\n if full_embed is not None:\n self._memory_cache[cache] = full_embed\n full_embed = full_embed.to(self.device)\n if full_embed is not None:\n embed = self._extract_embed_fn(full_embed, x, idx)\n self._current_batch_cache[cache] = embed" }, { "identifier": "collate", "path": "audiocraft/utils/utils.py", "snippet": "def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Get a list of tensors and collate them to a single tensor. according to the following logic:\n - `dim` specifies the time dimension which will be stacked and padded.\n - The output will contain 1 new dimension (dimension index 0) which will be the size of\n of the original list.\n\n Args:\n tensors (tp.List[torch.Tensor]): List of tensors to collate.\n dim (int): Dimension which will be stacked and padded.\n Returns:\n tp.Tuple[torch.Tensor, torch.Tensor]:\n torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension\n (dimension index 0) which will be the size of the original list.\n torch.Tensor: Tensor containing length of original tensor sizes (without padding).\n \"\"\"\n tensors = [x.transpose(0, dim) for x in tensors]\n lens = torch.LongTensor([len(x) for x in tensors])\n padded_tensors = pad_sequence(tensors)\n padded_tensors = padded_tensors.transpose(0, 1)\n padded_tensors = padded_tensors.transpose(1, dim + 1)\n return padded_tensors, lens" }, { "identifier": "hash_trick", "path": "audiocraft/utils/utils.py", "snippet": "def hash_trick(word: str, vocab_size: int) -> int:\n \"\"\"Hash trick to pair each word with an index\n\n Args:\n word (str): word we wish to convert to an index\n vocab_size (int): size of the vocabulary\n Returns:\n int: index of the word in the embedding LUT\n \"\"\"\n hash = int(hashlib.sha256(word.encode(\"utf-8\")).hexdigest(), 16)\n return hash % vocab_size" }, { "identifier": "length_to_mask", "path": "audiocraft/utils/utils.py", "snippet": "def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor:\n \"\"\"Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences).\n For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]]\n\n Args:\n lengths (torch.Tensor): tensor with lengths\n max_len (int): can set the max length manually. Defaults to None.\n Returns:\n torch.Tensor: mask with 0s where there is pad tokens else 1s\n \"\"\"\n assert len(lengths.shape) == 1, \"Length shape should be 1 dimensional.\"\n final_length = lengths.max().item() if not max_len else max_len\n final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor\n return torch.arange(final_length, device=lengths.device)[None, :] < lengths[:, None]" }, { "identifier": "load_clap_state_dict", "path": "audiocraft/utils/utils.py", "snippet": "def load_clap_state_dict(clap_model, path: tp.Union[str, Path]):\n \"\"\"Wrapper around state dict loading of CLAP model\n addressing compatibility issues between CLAP and AudioCraft\n HuggingFace transformer version.\n See: https://github.com/LAION-AI/CLAP/issues/118\n \"\"\"\n from clap_module.factory import load_state_dict # type: ignore\n pkg = load_state_dict(path)\n pkg.pop('text_branch.embeddings.position_ids', None)\n clap_model.model.load_state_dict(pkg)" }, { "identifier": "warn_once", "path": "audiocraft/utils/utils.py", "snippet": "@lru_cache(None)\ndef warn_once(logger, msg):\n \"\"\"Warn about a given message only once.\"\"\"\n logger.warning(msg)" }, { "identifier": "chords", "path": "audiocraft/modules/btc/utils/chords.py", "snippet": "def chords(self, labels):\n\n \"\"\"\n Transform a list of chord labels into an array of internal numeric\n representations.\n\n Parameters\n ----------\n labels : list\n List of chord labels (str).\n\n Returns\n -------\n chords : numpy.array\n Structured array with columns 'root', 'bass', and 'intervals',\n containing a numeric representation of chords.\n\n \"\"\"\n crds = np.zeros(len(labels), dtype=CHORD_DTYPE)\n cache = {}\n for i, lbl in enumerate(labels):\n cv = cache.get(lbl, None)\n if cv is None:\n cv = self.chord(lbl)\n cache[lbl] = cv\n crds[i] = cv\n\n return crds" } ]
from collections import defaultdict from copy import deepcopy from dataclasses import dataclass, field from itertools import chain from pathlib import Path from num2words import num2words from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore from torch import nn from torch.nn.utils.rnn import pad_sequence from .chroma import ChromaExtractor from .chord_chroma import ChordExtractor from .streaming import StreamingModule from .transformer import create_sin_embedding from ..data.audio import audio_read from ..data.audio_dataset import SegmentInfo from ..data.audio_utils import convert_audio from ..environment import AudioCraftEnvironment from ..quantization import ResidualVectorQuantizer from ..utils.autocast import TorchAutocast from ..utils.cache import EmbeddingCache from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once from .btc.utils import chords from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio import logging import math import random import re import typing as tp import warnings import einops import spacy import torch import torch.nn.functional as F import numpy as np import laion_clap # type: ignore
13,135
def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]: """Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly. This should be called before starting any real GPU work to avoid synchronization points. This will return a dict matching conditioner names to their arbitrary tokenized representations. Args: inputs (list[ConditioningAttributes]): List of ConditioningAttributes objects containing text and wav conditions. """ assert all([isinstance(x, ConditioningAttributes) for x in inputs]), ( "Got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]", f" but types were {set([type(x) for x in inputs])}" ) output = {} text = self._collate_text(inputs) wavs = self._collate_wavs(inputs) joint_embeds = self._collate_joint_embeds(inputs) assert set(text.keys() | wavs.keys() | joint_embeds.keys()).issubset(set(self.conditioners.keys())), ( f"Got an unexpected attribute! Expected {self.conditioners.keys()}, ", f"got {text.keys(), wavs.keys(), joint_embeds.keys()}" ) for attribute, batch in chain(text.items(), wavs.items(), joint_embeds.items()): output[attribute] = self.conditioners[attribute].tokenize(batch) return output def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]: """Compute pairs of `(embedding, mask)` using the configured conditioners and the tokenized representations. The output is for example: { "genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])), "description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])), ... } Args: tokenized (dict): Dict of tokenized representations as returned by `tokenize()`. """ output = {} for attribute, inputs in tokenized.items(): condition, mask = self.conditioners[attribute](inputs) output[attribute] = (condition, mask) return output def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]: """Given a list of ConditioningAttributes objects, compile a dictionary where the keys are the attributes and the values are the aggregated input per attribute. For example: Input: [ ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...), ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...), ] Output: { "genre": ["Rock", "Hip-hop"], "description": ["A rock song with a guitar solo", "A hip-hop verse"] } Args: samples (list of ConditioningAttributes): List of ConditioningAttributes samples. Returns: dict[str, list[str, optional]]: A dictionary mapping an attribute name to text batch. """ out: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list) texts = [x.text for x in samples] for text in texts: for condition in self.text_conditions: out[condition].append(text[condition]) return out def _collate_wavs(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Union[WavCondition, WavChordTextCondition]]: """Generate a dict where the keys are attributes by which we fetch similar wavs, and the values are Tensors of wavs according to said attributes. *Note*: by the time the samples reach this function, each sample should have some waveform inside the "wav" attribute. It should be either: 1. A real waveform 2. A null waveform due to the sample having no similar waveforms (nullified by the dataset) 3. A null waveform due to it being dropped in a dropout module (nullified by dropout) Args: samples (list of ConditioningAttributes): List of ConditioningAttributes samples. Returns: dict[str, WavCondition]: A dictionary mapping an attribute name to wavs. """ wavs = defaultdict(list) lengths = defaultdict(list) sample_rates = defaultdict(list) paths = defaultdict(list) seek_times = defaultdict(list) bpms = defaultdict(list) meters = defaultdict(list) out: tp.Dict[str, WavCondition] = {} for sample in samples: for attribute in self.wav_conditions: if isinstance(sample.wav[attribute], WavCondition): wav, length, sample_rate, path, seek_time = sample.wav[attribute] assert wav.dim() == 3, f"Got wav with dim={wav.dim()}, but expected 3 [1, C, T]" assert wav.size(0) == 1, f"Got wav [B, C, T] with shape={wav.shape}, but expected B == 1" # mono-channel conditioning wav = wav.mean(1, keepdim=True) # [1, 1, T] wavs[attribute].append(wav.flatten()) # [T] else: wav, length, sample_rate, path, seek_time, bpm, meter = sample.wav[attribute] wavs[attribute].append(wav[0]) bpms[attribute].append(bpm[0]) meters[attribute].append(meter[0]) lengths[attribute].append(length) sample_rates[attribute].extend(sample_rate) paths[attribute].extend(path) seek_times[attribute].extend(seek_time) # stack all wavs to a single tensor for attribute in self.wav_conditions: if isinstance(wavs[attribute][0], torch.Tensor):
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask class WavCondition(tp.NamedTuple): wav: torch.Tensor length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] class WavChordTextCondition(tp.NamedTuple): wav: tp.Union[torch.Tensor,str,tp.List[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] bpm : tp.List[tp.Optional[tp.Union[int, float]]] = [] meter : tp.List[tp.Optional[int]] = [] class JointEmbedCondition(tp.NamedTuple): wav: torch.Tensor text: tp.List[tp.Optional[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] @dataclass class ConditioningAttributes: text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) wav: tp.Dict[str, tp.Union[WavCondition,WavChordTextCondition]] = field(default_factory=dict) joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict) def __getitem__(self, item): return getattr(self, item) @property def text_attributes(self): return self.text.keys() @property def wav_attributes(self): return self.wav.keys() @property def joint_embed_attributes(self): return self.joint_embed.keys() @property def attributes(self): return { "text": self.text_attributes, "wav": self.wav_attributes, "joint_embed": self.joint_embed_attributes, } def to_flat_dict(self): return { **{f"text.{k}": v for k, v in self.text.items()}, **{f"wav.{k}": v for k, v in self.wav.items()}, **{f"joint_embed.{k}": v for k, v in self.joint_embed.items()} } @classmethod def from_flat_dict(cls, x): out = cls() for k, v in x.items(): kind, att = k.split(".") out[kind][att] = v return out class SegmentWithAttributes(SegmentInfo): """Base class for all dataclasses that are used for conditioning. All child classes should implement `to_condition_attributes` that converts the existing attributes to a dataclass of type ConditioningAttributes. """ def to_condition_attributes(self) -> ConditioningAttributes: raise NotImplementedError() def nullify_condition(condition: ConditionType, dim: int = 1): """Transform an input condition to a null condition. The way it is done by converting it to a single zero vector similarly to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. Args: condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) dim (int): The dimension that will be truncated (should be the time dimension) WARNING!: dim should not be the batch dimension! Returns: ConditionType: A tuple of null condition and mask """ assert dim != 0, "dim cannot be the batch dimension!" assert isinstance(condition, tuple) and \ isinstance(condition[0], torch.Tensor) and \ isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!" cond, mask = condition B = cond.shape[0] last_dim = cond.dim() - 1 out = cond.transpose(dim, last_dim) out = 0. * out[..., :1] out = out.transpose(dim, last_dim) mask = torch.zeros((B, 1), device=out.device).int() assert cond.dim() == out.dim() return out, mask def nullify_wav(cond: tp.Union[WavCondition,WavChordTextCondition]) -> tp.Union[WavCondition,WavChordTextCondition]: """Transform a WavCondition to a nullified WavCondition. It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. Args: cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. Returns: WavCondition: Nullified wav condition. """ if not isinstance(cond, WavChordTextCondition): null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1) return WavCondition( wav=null_wav, length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device), sample_rate=cond.sample_rate, path=[None] * cond.wav.shape[0], seek_time=[None] * cond.wav.shape[0], ) else: return WavChordTextCondition( wav=['N']* len(cond.wav), length=torch.tensor([0] * len(cond.wav), device=cond.length.device), sample_rate=cond.sample_rate, path=[None], seek_time=[None], bpm = cond.bpm, meter = cond.meter ) def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition: """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, and replacing metadata by dummy attributes. Args: cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T]. """ null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1) return JointEmbedCondition( wav=null_wav, text=[None] * len(embed.text), length=torch.LongTensor([0]).to(embed.wav.device), sample_rate=embed.sample_rate, path=[None] * embed.wav.shape[0], seek_time=[0] * embed.wav.shape[0], ) class Tokenizer: """Base tokenizer implementation (in case we want to introduce more advances tokenizers in the future). """ def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError() class WhiteSpaceTokenizer(Tokenizer): """This tokenizer should be used for natural language descriptions. For example: ["he didn't, know he's going home.", 'shorter sentence'] => [[78, 62, 31, 4, 78, 25, 19, 34], [59, 77, 0, 0, 0, 0, 0, 0]] """ PUNCTUATION = "?:!.,;" def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", lemma: bool = True, stopwords: bool = True) -> None: self.n_bins = n_bins self.pad_idx = pad_idx self.lemma = lemma self.stopwords = stopwords try: self.nlp = spacy.load(language) except IOError: spacy.cli.download(language) # type: ignore self.nlp = spacy.load(language) @tp.no_type_check def __call__(self, texts: tp.List[tp.Optional[str]], return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Take a list of strings and convert them to a tensor of indices. Args: texts (list[str]): List of strings. return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. Returns: tuple[torch.Tensor, torch.Tensor]: - Indices of words in the LUT. - And a mask indicating where the padding tokens are """ output, lengths = [], [] texts = deepcopy(texts) for i, text in enumerate(texts): # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(torch.Tensor([self.pad_idx])) lengths.append(0) continue # convert numbers to words text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore # normalize text text = self.nlp(text) # type: ignore # remove stopwords if self.stopwords: text = [w for w in text if not w.is_stop] # type: ignore # remove punctuation text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore # lemmatize if needed text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore texts[i] = " ".join(text) lengths.append(len(text)) # convert to tensor tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text]) output.append(tokens) mask = length_to_mask(torch.IntTensor(lengths)).int() padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() if return_text: return padded_output, mask, texts # type: ignore return padded_output, mask class NoopTokenizer(Tokenizer): """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will split it to ["Jeff", "Buckley"] and return an index per word. For example: ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] ["Metal", "Rock", "Classical"] => [0, 223, 51] """ def __init__(self, n_bins: int, pad_idx: int = 0): self.n_bins = n_bins self.pad_idx = pad_idx def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: output, lengths = [], [] for text in texts: # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(self.pad_idx) lengths.append(0) else: output.append(hash_trick(text, self.n_bins)) lengths.append(1) tokens = torch.LongTensor(output).unsqueeze(1) mask = length_to_mask(torch.IntTensor(lengths)).int() return tokens, mask class BaseConditioner(nn.Module): """Base model for all conditioner modules. We allow the output dim to be different than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; 2) make all condition dims consistent. Args: dim (int): Hidden dim of the model. output_dim (int): Output dim of the conditioner. """ def __init__(self, dim: int, output_dim: int): super().__init__() self.dim = dim self.output_dim = output_dim self.output_proj = nn.Linear(dim, output_dim) def tokenize(self, *args, **kwargs) -> tp.Any: """Should be any part of the processing that will lead to a synchronization point, e.g. BPE tokenization with transfer to the GPU. The returned value will be saved and return later when calling forward(). """ raise NotImplementedError() def forward(self, inputs: tp.Any) -> ConditionType: """Gets input that should be used as conditioning (e.g, genre, description or a waveform). Outputs a ConditionType, after the input data was embedded as a dense vector. Returns: ConditionType: - A tensor of size [B, T, D] where B is the batch size, T is the length of the output embedding and D is the dimension of the embedding. - And a mask indicating where the padding tokens. """ raise NotImplementedError() class TextConditioner(BaseConditioner): ... class LUTConditioner(TextConditioner): """Lookup table TextConditioner. Args: n_bins (int): Number of bins. dim (int): Hidden dim of the model (text-encoder/LUT). output_dim (int): Output dim of the conditioner. tokenizer (str): Name of the tokenizer. pad_idx (int, optional): Index for padding token. Defaults to 0. """ def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): super().__init__(dim, output_dim) self.embed = nn.Embedding(n_bins, dim) self.tokenizer: Tokenizer if tokenizer == 'whitespace': self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) elif tokenizer == 'noop': self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) else: raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: device = self.embed.weight.device tokens, mask = self.tokenizer(x) tokens, mask = tokens.to(device), mask.to(device) return tokens, mask def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: tokens, mask = inputs embeds = self.embed(tokens) embeds = self.output_proj(embeds) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class T5Conditioner(TextConditioner): """T5-based TextConditioner. Args: name (str): Name of the T5 model. output_dim (int): Output dim of the conditioner. finetune (bool): Whether to fine-tune T5 at train time. device (str): Device for T5 Conditioner. autocast_dtype (tp.Optional[str], optional): Autocast dtype. word_dropout (float, optional): Word dropout probability. normalize_text (bool, optional): Whether to apply text normalization. """ MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", "google/flan-t5-xl", "google/flan-t5-xxl"] MODELS_DIMS = { "t5-small": 512, "t5-base": 768, "t5-large": 1024, "t5-3b": 1024, "t5-11b": 1024, "google/flan-t5-small": 512, "google/flan-t5-base": 768, "google/flan-t5-large": 1024, "google/flan-t5-3b": 1024, "google/flan-t5-11b": 1024, } def __init__(self, name: str, output_dim: int, finetune: bool, device: str, autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., normalize_text: bool = False): assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})" super().__init__(self.MODELS_DIMS[name], output_dim) self.device = device self.name = name self.finetune = finetune self.word_dropout = word_dropout if autocast_dtype is None or self.device == 'cpu': self.autocast = TorchAutocast(enabled=False) if self.device != 'cpu': logger.warning("T5 has no autocast, this might lead to NaN") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # Let's disable logging temporarily because T5 will vomit some errors otherwise. # thanks https://gist.github.com/simon-weber/7853144 previous_level = logging.root.manager.disable logging.disable(logging.ERROR) with warnings.catch_warnings(): warnings.simplefilter("ignore") try: self.t5_tokenizer = T5Tokenizer.from_pretrained(name) t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) finally: logging.disable(previous_level) if finetune: self.t5 = t5 else: # this makes sure that the t5 models is not part # of the saved checkpoint self.__dict__['t5'] = t5.to(device) self.normalize_text = normalize_text if normalize_text: self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: # if current sample doesn't have a certain attribute, replace with empty string entries: tp.List[str] = [xi if xi is not None else "" for xi in x] if self.normalize_text: _, _, entries = self.text_normalizer(entries, return_text=True) if self.word_dropout > 0. and self.training: new_entries = [] for entry in entries: words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] new_entries.append(" ".join(words)) entries = new_entries empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device) mask = inputs['attention_mask'] mask[empty_idx, :] = 0 # zero-out index where the input is non-existant return inputs def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: mask = inputs['attention_mask'] with torch.set_grad_enabled(self.finetune), self.autocast: embeds = self.t5(**inputs).last_hidden_state embeds = self.output_proj(embeds.to(self.output_proj.weight)) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class WaveformConditioner(BaseConditioner): """Base class for all conditioners that take a waveform as input. Classes that inherit must implement `_get_wav_embedding` that outputs a continuous tensor, and `_downsampling_factor` that returns the down-sampling factor of the embedding model. Args: dim (int): The internal representation dimension. output_dim (int): Output dimension. device (tp.Union[torch.device, str]): Device. """ def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): super().__init__(dim, output_dim) self.device = device # if False no masking is done, used in ChromaStemConditioner when completing by periodicity a sample. self._use_masking = True def tokenize(self, x: WavCondition) -> WavCondition: wav, length, sample_rate, path, seek_time = x assert length is not None return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Gets as input a WavCondition and returns a dense embedding.""" raise NotImplementedError() def _downsampling_factor(self): """Returns the downsampling factor of the embedding model.""" raise NotImplementedError() def forward(self, x: WavCondition) -> ConditionType: """Extract condition embedding and mask from a waveform and its metadata. Args: x (WavCondition): Waveform condition containing raw waveform and metadata. Returns: ConditionType: a dense vector representing the conditioning along with its mask """ wav, lengths, *_ = x with torch.no_grad(): embeds = self._get_wav_embedding(x) embeds = embeds.to(self.output_proj.weight) embeds = self.output_proj(embeds) if lengths is not None and self._use_masking: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds[..., 0]) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class ChromaStemConditioner(WaveformConditioner): """Chroma conditioner based on stems. The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(dim=n_chroma, output_dim=output_dim, device=device) self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32) self.sample_rate = sample_rate self.match_len_on_eval = match_len_on_eval if match_len_on_eval: self._use_masking = False self.duration = duration self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources # type: ignore self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, **kwargs).to(device) self.chroma_len = self._get_chroma_len() self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) self.cache = None if cache_path is not None: self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, compute_embed_fn=self._get_full_chroma_for_cache, extract_embed_fn=self._extract_chroma_chunk) def _downsampling_factor(self) -> int: return self.chroma.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) @torch.no_grad() def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None: warn_once(logger, "Using precomputed evaluation wavs!") sampled_wav = self._sample_eval_wavs(len(x.wav)) no_undefined_paths = all(p is not None for p in x.path) no_nullified_cond = x.wav.shape[-1] > 1 if sampled_wav is not None: chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate) elif self.cache is not None and no_undefined_paths and no_nullified_cond: paths = [Path(p) for p in x.path if p is not None] chroma = self.cache.get_embed_from_cache(paths, x) else: assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal." chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0]) if self.match_len_on_eval: B, T, C = chroma.shape if T > self.chroma_len: chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})") elif T < self.chroma_len: n_repeat = int(math.ceil(self.chroma_len / T)) chroma = chroma.repeat(1, n_repeat, 1) chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})") return chroma def tokenize(self, x: WavCondition) -> WavCondition: """Apply WavConditioner tokenization and populate cache if needed.""" x = super().tokenize(x) no_undefined_paths = all(p is not None for p in x.path) if self.cache is not None and no_undefined_paths: paths = [Path(p) for p in x.path if p is not None] self.cache.populate_embed_cache(paths, x) return x class ChromaChordConditioner(ChromaStemConditioner): """Chord Chroma conditioner based on stems. The ChromaChordConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(output_dim = output_dim, sample_rate = sample_rate, n_chroma = n_chroma, radix2_exp = radix2_exp, duration = duration, match_len_on_eval = match_len_on_eval, eval_wavs = eval_wavs, n_eval_wavs = n_eval_wavs, cache_path = cache_path, device = device) self.winhop = self.chroma.winhop self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('bass'), stem_sources.index('other')]).to(device) self.chroma_len = self._get_chroma_len() self.bar2chromabin = self.sample_rate / self.winhop self.chroma = ChordExtractor(device = device, sample_rate=sample_rate, n_chroma=n_chroma, max_duration = duration, chroma_len = self.chroma_len, winhop = self.winhop).to(device) self.chords = chords.Chords() self.chroma_coefficient = 1 self.continuation_count = 0 # for infinite generation with text chroma #3 Layered MLP projection override ''' self.output_proj = nn.Sequential( nn.Linear(n_chroma, 128), nn.ReLU(), nn.Linear(128, 256), nn.ReLU(), nn.Linear(256, output_dim) ) ''' def _downsampling_factor(self) -> int: return self.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: # print("1515151") return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) # print("2727272") return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) def set_continuation_count(self, sub_duration_ratio, current_iter): self.continuation_count = int(self.chroma_len * sub_duration_ratio * current_iter) @torch.no_grad() def _get_wav_embedding(self, x: tp.Union[WavCondition, WavChordTextCondition]) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ if isinstance(x, WavCondition): sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None: warn_once(logger, "Using precomputed evaluation wavs!") sampled_wav = self._sample_eval_wavs(len(x.wav)) no_undefined_paths = all(p is not None for p in x.path) no_nullified_cond = x.wav.shape[-1] > 1 if sampled_wav is not None: chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate) # print("111111") elif self.cache is not None and no_undefined_paths and no_nullified_cond: paths = [Path(p) for p in x.path if p is not None] chroma = self.cache.get_embed_from_cache(paths, x) # print("222222") #Works here else: assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal." chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0]) # print("333333") #and here in training else: chromas = [] for wav, bpm, meter in zip(x.wav, x.bpm, x.meter): chroma = torch.zeros([self.chroma_len, self.dim]) count = 0 offset = 0 stext = wav.split(" ") barsec = 60/(bpm/meter) timebin = barsec * self.bar2chromabin while count < self.chroma_len: for tokens in stext: if count >= self.chroma_len: break stoken = tokens.split(',') for token in stoken: off_timebin = timebin + offset rounded_timebin = round(off_timebin) offset = off_timebin - rounded_timebin offset = offset/len(stoken) add_step = rounded_timebin//len(stoken) mhot = self.chords.chord(token) rolled = np.roll(mhot[2], mhot[0]) for i in range(count, count + add_step): if self.continuation_count > 0: self.continuation_count -= 1 continue if count >= self.chroma_len: break chroma[i] = torch.Tensor(rolled) count += 1 chromas.append(chroma) chroma = torch.stack(chromas)*self.chroma_coefficient if self.match_len_on_eval: B, T, C = chroma.shape if T > self.chroma_len: chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})") elif T < self.chroma_len: n_repeat = int(math.ceil(self.chroma_len / T)) chroma = chroma.repeat(1, n_repeat, 1) chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})") return chroma def tokenize(self, x: tp.Union[WavCondition, WavChordTextCondition]) -> tp.Union[WavCondition, WavChordTextCondition]: if isinstance(x, WavCondition): wav, length, sample_rate, path, seek_time = x assert length is not None return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) else: wav, length, sample_rate, path, seek_time, bpm, meter = x return WavChordTextCondition(wav, length.to(self.device), sample_rate, path, seek_time, bpm, meter) def forward(self, x: WavCondition) -> ConditionType: """Extract condition embedding and mask from a waveform and its metadata. Args: x (WavCondition): Waveform condition containing raw waveform and metadata. Returns: ConditionType: a dense vector representing the conditioning along with its mask """ wav, lengths, *_ = x with torch.no_grad(): embeds = self._get_wav_embedding(x) #chroma embeds = embeds.to(self.output_proj.weight) # embeds = embeds * (torch.rand(embeds.shape).to(self.device) * 0.3) embeds = self.output_proj(embeds) if self.match_len_on_eval: if lengths is not None: for i in range(len(lengths)): if lengths[i] > 0 and lengths[i] < self.duration * self.sample_rate: lengths[i] = torch.Tensor([(self.duration+1) * self.sample_rate]) lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds) else: if lengths is not None: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds) embeds = (embeds.to(self.device) * mask.unsqueeze(2).to(self.device)) return embeds.to(self.device), mask.to(self.device) class JointEmbeddingConditioner(BaseConditioner): """Joint embedding conditioning supporting both audio or text conditioning. Args: dim (int): Dimension. output_dim (int): Output dimension. device (str): Device. attribute (str): Attribute used by the conditioner. autocast_dtype (str): Autocast for the conditioner. quantize (bool): Whether to quantize the CLAP embedding. n_q (int): Number of residual quantizers (used if quantize is true). bins (int): Quantizers' codebooks size (used if quantize is true). kwargs: Additional parameters for residual vector quantizer. """ def __init__(self, dim: int, output_dim: int, device: str, attribute: str, autocast_dtype: tp.Optional[str] = 'float32', quantize: bool = True, n_q: int = 12, bins: int = 1024, **kwargs): super().__init__(dim=dim, output_dim=output_dim) self.device = device self.attribute = attribute if autocast_dtype is None or device == 'cpu': self.autocast = TorchAutocast(enabled=False) logger.warning("JointEmbeddingConditioner has no autocast, this might lead to NaN.") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"JointEmbeddingConditioner will be evaluated with autocast as {autocast_dtype}.") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # residual vector quantizer to discretize the conditioned embedding self.quantizer: tp.Optional[ResidualVectorQuantizer] = None if quantize: self.quantizer = ResidualVectorQuantizer(dim, n_q=n_q, bins=bins, **kwargs) def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Get joint embedding in latent space from the inputs. Returns: tuple[torch.Tensor, torch.Tensor]: Tensor for the latent embedding and corresponding empty indexes. """ raise NotImplementedError() def forward(self, x: JointEmbedCondition) -> ConditionType: with self.autocast: embed, empty_idx = self._get_embed(x) if self.quantizer is not None: embed = embed.view(-1, self.dim, 1) q_res = self.quantizer(embed, frame_rate=1) out_embed = q_res.x.view(-1, self.dim) else: out_embed = embed out_embed = self.output_proj(out_embed).view(-1, 1, self.output_dim) mask = torch.ones(*out_embed.shape[:2], device=out_embed.device) mask[empty_idx, :] = 0 # zero-out index where the input is non-existant out_embed = (out_embed * mask.unsqueeze(-1)) return out_embed, mask def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition: return x class CLAPEmbeddingConditioner(JointEmbeddingConditioner): """Joint Embedding conditioner based on pre-trained CLAP model. This CLAP-based conditioner supports a caching mechanism over the computed embeddings for faster training. Args: dim (int): Dimension. output_dim (int): Output dimension. device (str): Device. attribute (str): Attribute used by the conditioner. quantize (bool): Whether to quantize the CLAP embedding. n_q (int): Number of residual quantizers (used if quantize is true). bins (int): Quantizers' codebooks size (used if quantize is true). checkpoint (str): Path to CLAP checkpoint. model_arch (str): CLAP model architecture. enable_fusion (bool): Enable fusion for CLAP model. sample_rate (int): Sample rate used by CLAP model. max_audio_length (float): Maximum audio length for CLAP model. audio_stride (float): Stride to use for getting a CLAP embedding on the full sequence. normalize (bool): Whether to normalize the CLAP embedding. text_p (float): Probability of using text representation instead of audio at train time. batch_size (Optional[int]): Batch size for CLAP embedding computation. autocast_dtype (str): Autocast for the conditioner. cache_path (Optional[str]): Path for pre-computed embeddings caching. kwargs: Additional parameters for residual vector quantizer. """ def __init__(self, dim: int, output_dim: int, device: str, attribute: str, quantize: bool, n_q: int, bins: int, checkpoint: tp.Union[str, Path], model_arch: str, enable_fusion: bool, sample_rate: int, max_audio_length: int, audio_stride: int, normalize: bool, text_p: bool, batch_size: tp.Optional[int] = None, autocast_dtype: tp.Optional[str] = 'float32', cache_path: tp.Optional[str] = None, **kwargs): try: except ImportError: raise ImportError("Please install CLAP to use the CLAPEmbeddingConditioner: 'pip install laion_clap'") warnings.warn("Sample rate for CLAP conditioner was fixed in version v1.1.0, (from 44.1 to 48 kHz). " "Please retrain all models.") checkpoint = AudioCraftEnvironment.resolve_reference_path(checkpoint) clap_tokenize = RobertaTokenizer.from_pretrained('roberta-base') clap_model = laion_clap.CLAP_Module(enable_fusion=enable_fusion, amodel=model_arch) load_clap_state_dict(clap_model, checkpoint) clap_model.eval() clap_model.to(device) super().__init__(dim=dim, output_dim=output_dim, device=device, attribute=attribute, autocast_dtype=autocast_dtype, quantize=quantize, n_q=n_q, bins=bins, **kwargs) self.checkpoint = checkpoint self.enable_fusion = enable_fusion self.model_arch = model_arch self.clap: laion_clap.CLAP_Module self.clap_tokenize: RobertaTokenizer self.clap_sample_rate = sample_rate self.clap_max_frames = int(self.clap_sample_rate * max_audio_length) self.clap_stride = int(self.clap_sample_rate * audio_stride) self.batch_size = batch_size or 1 self.normalize = normalize self.text_p = text_p self.__dict__['clap_tokenize'] = clap_tokenize self.__dict__['clap'] = clap_model self.wav_cache, self.text_cache = None, None if cache_path is not None: self.wav_cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, compute_embed_fn=self._get_wav_embedding_for_cache, extract_embed_fn=self._extract_wav_embedding_chunk) self.text_cache = EmbeddingCache(Path(cache_path) / 'text', self.device, compute_embed_fn=self._get_text_embedding_for_cache) def _tokenizer(self, texts: tp.Union[str, tp.List[str]]) -> dict: # we use the default params from CLAP module here as well return self.clap_tokenize(texts, padding="max_length", truncation=True, max_length=77, return_tensors="pt") def _compute_text_embedding(self, text: tp.List[str]) -> torch.Tensor: """Compute text embedding from CLAP model on a given a batch of text. Args: text (list[str]): List of text for the batch, with B items. Returns: torch.Tensor: CLAP embedding derived from text, of shape [B, 1, D], with D the CLAP embedding dimension. """ with torch.no_grad(): embed = self.clap.get_text_embedding(text, tokenizer=self._tokenizer, use_tensor=True) return embed.view(embed.size(0), 1, embed.size(-1)) def _get_text_embedding_for_cache(self, path: tp.Union[Path, str], x: JointEmbedCondition, idx: int) -> torch.Tensor: """Get text embedding function for the cache.""" text = x.text[idx] text = text if text is not None else "" return self._compute_text_embedding([text])[0] def _preprocess_wav(self, wav: torch.Tensor, length: torch.Tensor, sample_rates: tp.List[int]) -> torch.Tensor: """Preprocess wav to expected format by CLAP model. Args: wav (torch.Tensor): Audio wav, of shape [B, C, T]. length (torch.Tensor): Actual length of the audio for each item in the batch, of shape [B]. sample_rates (list[int]): Sample rates for each sample in the batch Returns: torch.Tensor: Audio wav of shape [B, T]. """ assert wav.dim() == 3, "Expecting wav to be [B, C, T]" if sample_rates is not None: _wav = [] for i, audio in enumerate(wav): sr = sample_rates[i] audio = convert_audio(audio, from_rate=sr, to_rate=self.clap_sample_rate, to_channels=1) _wav.append(audio) wav = torch.stack(_wav, dim=0) wav = wav.mean(dim=1) return wav def _compute_wav_embedding(self, wav: torch.Tensor, length: torch.Tensor, sample_rates: tp.List[int], reduce_mean: bool = False) -> torch.Tensor: """Compute audio wave embedding from CLAP model. Since CLAP operates on a fixed sequence length audio inputs and we need to process longer audio sequences, we calculate the wav embeddings on `clap_max_frames` windows with `clap_stride`-second stride and average the resulting embeddings. Args: wav (torch.Tensor): Audio wav, of shape [B, C, T]. length (torch.Tensor): Actual length of the audio for each item in the batch, of shape [B]. sample_rates (list[int]): Sample rates for each sample in the batch. reduce_mean (bool): Whether to get the average tensor. Returns: torch.Tensor: Audio embedding of shape [B, F, D], F being the number of chunks, D the dimension. """ with torch.no_grad(): wav = self._preprocess_wav(wav, length, sample_rates) B, T = wav.shape if T >= self.clap_max_frames: wav = wav.unfold(-1, self.clap_max_frames, self.clap_stride) # [B, F, T] else: wav = wav.view(-1, 1, T) # [B, F, T] with F=1 wav = einops.rearrange(wav, 'b f t -> (b f) t') embed_list = [] for i in range(0, wav.size(0), self.batch_size): _wav = wav[i:i+self.batch_size, ...] _embed = self.clap.get_audio_embedding_from_data(_wav, use_tensor=True) embed_list.append(_embed) embed = torch.cat(embed_list, dim=0) embed = einops.rearrange(embed, '(b f) d -> b f d', b=B) if reduce_mean: embed = embed.mean(dim=1, keepdim=True) return embed # [B, F, D] with F=1 if reduce_mean is True def _get_wav_embedding_for_cache(self, path: tp.Union[str, Path], x: JointEmbedCondition, idx: int) -> torch.Tensor: """Compute audio wave embedding for the cache. The embedding is computed on a given audio read from file. Args: path (str or Path): Path to the full audio file. Returns: torch.Tensor: Single-item tensor of shape [F, D], F being the number of chunks, D the dimension. """ wav, sr = audio_read(path) # [C, T] wav = wav.unsqueeze(0).to(self.device) # [1, C, T] wav_len = torch.LongTensor([wav.shape[-1]]).to(self.device) embed = self._compute_wav_embedding(wav, wav_len, [sr], reduce_mean=False) # [B, F, D] return embed.squeeze(0) # [F, D] def _extract_wav_embedding_chunk(self, full_embed: torch.Tensor, x: JointEmbedCondition, idx: int) -> torch.Tensor: """Extract the chunk of embedding matching the seek_time and length from the full CLAP audio embedding. Args: full_embed (torch.Tensor): CLAP embedding computed on the full wave, of shape [F, D]. x (JointEmbedCondition): Joint embedding condition for the full batch. idx (int): Index considered for the given embedding to extract. Returns: torch.Tensor: Wav embedding averaged on sliding window, of shape [1, D]. """ sample_rate = x.sample_rate[idx] seek_time = x.seek_time[idx] seek_time = 0. if seek_time is None else seek_time clap_stride = int(self.clap_stride / self.clap_sample_rate) * sample_rate end_seek_time = seek_time + self.clap_max_frames / self.clap_sample_rate start_offset = int(seek_time * sample_rate // clap_stride) end_offset = int(end_seek_time * sample_rate // clap_stride) wav_embed = full_embed[start_offset:end_offset, ...] wav_embed = wav_embed.mean(dim=0, keepdim=True) return wav_embed.to(self.device) # [F, D] def _get_text_embedding(self, x: JointEmbedCondition) -> torch.Tensor: """Get CLAP embedding from a batch of text descriptions.""" no_nullified_cond = x.wav.shape[-1] > 1 # we don't want to read from cache when condition dropout if self.text_cache is not None and no_nullified_cond: assert all(p is not None for p in x.path), "Cache requires all JointEmbedCondition paths to be provided" paths = [Path(p) for p in x.path if p is not None] embed = self.text_cache.get_embed_from_cache(paths, x) else: text = [xi if xi is not None else "" for xi in x.text] embed = self._compute_text_embedding(text) if self.normalize: embed = torch.nn.functional.normalize(embed, p=2.0, dim=-1) return embed def _get_wav_embedding(self, x: JointEmbedCondition) -> torch.Tensor: """Get CLAP embedding from a batch of audio tensors (and corresponding sample rates).""" no_undefined_paths = all(p is not None for p in x.path) no_nullified_cond = x.wav.shape[-1] > 1 # we don't want to read from cache when condition dropout if self.wav_cache is not None and no_undefined_paths and no_nullified_cond: paths = [Path(p) for p in x.path if p is not None] embed = self.wav_cache.get_embed_from_cache(paths, x) else: embed = self._compute_wav_embedding(x.wav, x.length, x.sample_rate, reduce_mean=True) if self.normalize: embed = torch.nn.functional.normalize(embed, p=2.0, dim=-1) return embed def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition: # Trying to limit as much as possible sync points when the cache is warm. no_undefined_paths = all(p is not None for p in x.path) if self.wav_cache is not None and no_undefined_paths: assert all([p is not None for p in x.path]), "Cache requires all JointEmbedCondition paths to be provided" paths = [Path(p) for p in x.path if p is not None] self.wav_cache.populate_embed_cache(paths, x) if self.text_cache is not None and no_undefined_paths: assert all([p is not None for p in x.path]), "Cache requires all JointEmbedCondition paths to be provided" paths = [Path(p) for p in x.path if p is not None] self.text_cache.populate_embed_cache(paths, x) return x def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Extract shared latent representation from either the wav or the text using CLAP.""" # decide whether to use text embedding at train time or not use_text_embed = random.random() < self.text_p if self.training and not use_text_embed: embed = self._get_wav_embedding(x) empty_idx = torch.LongTensor([]) # we assume we always have the audio wav else: embed = self._get_text_embedding(x) empty_idx = torch.LongTensor([i for i, xi in enumerate(x.text) if xi is None or xi == ""]) return embed, empty_idx def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str) -> ConditioningAttributes: """Utility function for nullifying an attribute inside an ConditioningAttributes object. If the condition is of type "wav", then nullify it using `nullify_condition` function. If the condition is of any other type, set its value to None. Works in-place. """ if condition_type not in ['text', 'wav', 'joint_embed']: raise ValueError( "dropout_condition got an unexpected condition type!" f" expected 'text', 'wav' or 'joint_embed' but got '{condition_type}'" ) if condition not in getattr(sample, condition_type): raise ValueError( "dropout_condition received an unexpected condition!" f" expected wav={sample.wav.keys()} and text={sample.text.keys()}" f" but got '{condition}' of type '{condition_type}'!" ) if condition_type == 'wav': wav_cond = sample.wav[condition] sample.wav[condition] = nullify_wav(wav_cond) elif condition_type == 'joint_embed': embed = sample.joint_embed[condition] sample.joint_embed[condition] = nullify_joint_embed(embed) else: sample.text[condition] = None return sample class DropoutModule(nn.Module): """Base module for all dropout modules.""" def __init__(self, seed: int = 1234): super().__init__() self.rng = torch.Generator() self.rng.manual_seed(seed) class AttributeDropout(DropoutModule): """Dropout with a given probability per attribute. This is different from the behavior of ClassifierFreeGuidanceDropout as this allows for attributes to be dropped out separately. For example, "artist" can be dropped while "genre" remains. This is in contrast to ClassifierFreeGuidanceDropout where if "artist" is dropped "genre" must also be dropped. Args: p (tp.Dict[str, float]): A dict mapping between attributes and dropout probability. For example: ... "genre": 0.1, "artist": 0.5, "wav": 0.25, ... active_on_eval (bool, optional): Whether the dropout is active at eval. Default to False. seed (int, optional): Random seed. """ def __init__(self, p: tp.Dict[str, tp.Dict[str, float]], active_on_eval: bool = False, seed: int = 1234): super().__init__(seed=seed) self.active_on_eval = active_on_eval # construct dict that return the values from p otherwise 0 self.p = {} for condition_type, probs in p.items(): self.p[condition_type] = defaultdict(lambda: 0, probs) def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: """ Args: samples (list[ConditioningAttributes]): List of conditions. Returns: list[ConditioningAttributes]: List of conditions after certain attributes were set to None. """ if not self.training and not self.active_on_eval: return samples samples = deepcopy(samples) for condition_type, ps in self.p.items(): # for condition types [text, wav] for condition, p in ps.items(): # for attributes of each type (e.g., [artist, genre]) if torch.rand(1, generator=self.rng).item() < p: for sample in samples: dropout_condition(sample, condition_type, condition) return samples def __repr__(self): return f"AttributeDropout({dict(self.p)})" class ClassifierFreeGuidanceDropout(DropoutModule): """Classifier Free Guidance dropout. All attributes are dropped with the same probability. Args: p (float): Probability to apply condition dropout during training. seed (int): Random seed. """ def __init__(self, p: float, seed: int = 1234): super().__init__(seed=seed) self.p = p def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: """ Args: samples (list[ConditioningAttributes]): List of conditions. Returns: list[ConditioningAttributes]: List of conditions after all attributes were set to None. """ if not self.training: return samples # decide on which attributes to drop in a batched fashion drop = torch.rand(1, generator=self.rng).item() < self.p if not drop: return samples # nullify conditions of all attributes samples = deepcopy(samples) for condition_type in ["wav", "text"]: for sample in samples: for condition in sample.attributes[condition_type]: dropout_condition(sample, condition_type, condition) return samples def __repr__(self): return f"ClassifierFreeGuidanceDropout(p={self.p})" class ConditioningProvider(nn.Module): """Prepare and provide conditions given all the supported conditioners. Args: conditioners (dict): Dictionary of conditioners. device (torch.device or str, optional): Device for conditioners and output condition types. """ def __init__(self, conditioners: tp.Dict[str, BaseConditioner], device: tp.Union[torch.device, str] = "cpu"): super().__init__() self.device = device self.conditioners = nn.ModuleDict(conditioners) @property def joint_embed_conditions(self): return [m.attribute for m in self.conditioners.values() if isinstance(m, JointEmbeddingConditioner)] @property def has_joint_embed_conditions(self): return len(self.joint_embed_conditions) > 0 @property def text_conditions(self): return [k for k, v in self.conditioners.items() if isinstance(v, TextConditioner)] @property def wav_conditions(self): return [k for k, v in self.conditioners.items() if isinstance(v, WaveformConditioner)] @property def has_wav_condition(self): return len(self.wav_conditions) > 0 def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]: """Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly. This should be called before starting any real GPU work to avoid synchronization points. This will return a dict matching conditioner names to their arbitrary tokenized representations. Args: inputs (list[ConditioningAttributes]): List of ConditioningAttributes objects containing text and wav conditions. """ assert all([isinstance(x, ConditioningAttributes) for x in inputs]), ( "Got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]", f" but types were {set([type(x) for x in inputs])}" ) output = {} text = self._collate_text(inputs) wavs = self._collate_wavs(inputs) joint_embeds = self._collate_joint_embeds(inputs) assert set(text.keys() | wavs.keys() | joint_embeds.keys()).issubset(set(self.conditioners.keys())), ( f"Got an unexpected attribute! Expected {self.conditioners.keys()}, ", f"got {text.keys(), wavs.keys(), joint_embeds.keys()}" ) for attribute, batch in chain(text.items(), wavs.items(), joint_embeds.items()): output[attribute] = self.conditioners[attribute].tokenize(batch) return output def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]: """Compute pairs of `(embedding, mask)` using the configured conditioners and the tokenized representations. The output is for example: { "genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])), "description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])), ... } Args: tokenized (dict): Dict of tokenized representations as returned by `tokenize()`. """ output = {} for attribute, inputs in tokenized.items(): condition, mask = self.conditioners[attribute](inputs) output[attribute] = (condition, mask) return output def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]: """Given a list of ConditioningAttributes objects, compile a dictionary where the keys are the attributes and the values are the aggregated input per attribute. For example: Input: [ ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...), ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...), ] Output: { "genre": ["Rock", "Hip-hop"], "description": ["A rock song with a guitar solo", "A hip-hop verse"] } Args: samples (list of ConditioningAttributes): List of ConditioningAttributes samples. Returns: dict[str, list[str, optional]]: A dictionary mapping an attribute name to text batch. """ out: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list) texts = [x.text for x in samples] for text in texts: for condition in self.text_conditions: out[condition].append(text[condition]) return out def _collate_wavs(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Union[WavCondition, WavChordTextCondition]]: """Generate a dict where the keys are attributes by which we fetch similar wavs, and the values are Tensors of wavs according to said attributes. *Note*: by the time the samples reach this function, each sample should have some waveform inside the "wav" attribute. It should be either: 1. A real waveform 2. A null waveform due to the sample having no similar waveforms (nullified by the dataset) 3. A null waveform due to it being dropped in a dropout module (nullified by dropout) Args: samples (list of ConditioningAttributes): List of ConditioningAttributes samples. Returns: dict[str, WavCondition]: A dictionary mapping an attribute name to wavs. """ wavs = defaultdict(list) lengths = defaultdict(list) sample_rates = defaultdict(list) paths = defaultdict(list) seek_times = defaultdict(list) bpms = defaultdict(list) meters = defaultdict(list) out: tp.Dict[str, WavCondition] = {} for sample in samples: for attribute in self.wav_conditions: if isinstance(sample.wav[attribute], WavCondition): wav, length, sample_rate, path, seek_time = sample.wav[attribute] assert wav.dim() == 3, f"Got wav with dim={wav.dim()}, but expected 3 [1, C, T]" assert wav.size(0) == 1, f"Got wav [B, C, T] with shape={wav.shape}, but expected B == 1" # mono-channel conditioning wav = wav.mean(1, keepdim=True) # [1, 1, T] wavs[attribute].append(wav.flatten()) # [T] else: wav, length, sample_rate, path, seek_time, bpm, meter = sample.wav[attribute] wavs[attribute].append(wav[0]) bpms[attribute].append(bpm[0]) meters[attribute].append(meter[0]) lengths[attribute].append(length) sample_rates[attribute].extend(sample_rate) paths[attribute].extend(path) seek_times[attribute].extend(seek_time) # stack all wavs to a single tensor for attribute in self.wav_conditions: if isinstance(wavs[attribute][0], torch.Tensor):
stacked_wav, _ = collate(wavs[attribute], dim=0)
11
2023-10-09 09:55:24+00:00
16k
Texaser/MTN
nerf/network_grid.py
[ { "identifier": "trunc_exp", "path": "activation.py", "snippet": "class _trunc_exp(Function):\n def forward(ctx, x):\n def backward(ctx, g):\ndef biased_softplus(x, bias=0):" }, { "identifier": "NeRFRenderer", "path": "nerf/renderer.py", "snippet": "class NeRFRenderer(nn.Module):\n def __init__(self, opt):\n super().__init__()\n\n self.opt = opt\n self.bound = opt.bound\n self.cascade = 1 + math.ceil(math.log2(opt.bound))\n self.grid_size = 128\n self.max_level = None\n self.dmtet = opt.dmtet\n self.cuda_ray = opt.cuda_ray\n self.taichi_ray = opt.taichi_ray\n self.min_near = opt.min_near\n self.density_thresh = opt.density_thresh\n self.train_step = 0\n self.max_train_step = 6000\n # prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax)\n # NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing.\n aabb_train = torch.FloatTensor([-opt.bound, -opt.bound, -opt.bound, opt.bound, opt.bound, opt.bound])\n aabb_infer = aabb_train.clone()\n self.register_buffer('aabb_train', aabb_train)\n self.register_buffer('aabb_infer', aabb_infer)\n\n self.glctx = None\n\n # extra state for cuda raymarching\n if self.cuda_ray:\n # density grid\n density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n \n if self.opt.dmtet:\n # load dmtet vertices\n tets = np.load('tets/{}_tets.npz'.format(self.opt.tet_grid_size))\n self.verts = - torch.tensor(tets['vertices'], dtype=torch.float32, device='cuda') * 2 # covers [-1, 1]\n self.indices = torch.tensor(tets['indices'], dtype=torch.long, device='cuda')\n self.tet_scale = torch.tensor([1, 1, 1], dtype=torch.float32, device='cuda')\n self.dmtet = DMTet('cuda')\n\n # vert sdf and deform\n sdf = torch.nn.Parameter(torch.zeros_like(self.verts[..., 0]), requires_grad=True)\n self.register_parameter('sdf', sdf)\n deform = torch.nn.Parameter(torch.zeros_like(self.verts), requires_grad=True)\n self.register_parameter('deform', deform)\n\n edges = torch.tensor([0,1, 0,2, 0,3, 1,2, 1,3, 2,3], dtype=torch.long, device=\"cuda\") # six edges for each tetrahedron.\n all_edges = self.indices[:,edges].reshape(-1,2) # [M * 6, 2]\n all_edges_sorted = torch.sort(all_edges, dim=1)[0]\n self.all_edges = torch.unique(all_edges_sorted, dim=0)\n\n if self.opt.h <= 2048 and self.opt.w <= 2048:\n self.glctx = dr.RasterizeCudaContext()\n else:\n self.glctx = dr.RasterizeGLContext()\n \n if self.taichi_ray:\n from einops import rearrange\n from taichi_modules import RayMarcherTaichi\n from taichi_modules import VolumeRendererTaichi\n from taichi_modules import RayAABBIntersector as RayAABBIntersectorTaichi\n from taichi_modules import raymarching_test as raymarching_test_taichi\n from taichi_modules import composite_test as composite_test_fw\n from taichi_modules import packbits as packbits_taichi\n self.rearrange = rearrange\n self.packbits_taichi = packbits_taichi\n self.ray_aabb_intersector = RayAABBIntersectorTaichi\n self.raymarching_test_taichi = raymarching_test_taichi\n self.composite_test_fw = composite_test_fw\n self.ray_marching = RayMarcherTaichi(batch_size=4096) # TODO: hard encoded batch size\n self.volume_render = VolumeRendererTaichi(batch_size=4096) # TODO: hard encoded batch size\n # density grid\n density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n \n @torch.no_grad()\n def density_blob(self, x):\n # x: [B, N, 3]\n \n d = (x ** 2).sum(-1)\n \n if self.opt.density_activation == 'exp':\n g = self.opt.blob_density * torch.exp(- d / (2 * self.opt.blob_radius ** 2))\n else:\n g = self.opt.blob_density * (1 - torch.sqrt(d) / self.opt.blob_radius)\n\n return g\n \n def forward(self, x, d):\n raise NotImplementedError()\n\n def density(self, x):\n raise NotImplementedError()\n\n def reset_extra_state(self):\n if not (self.cuda_ray or self.taichi_ray):\n return \n # density grid\n self.density_grid.zero_()\n self.mean_density = 0\n self.iter_density = 0\n\n @torch.no_grad()\n def export_mesh(self, path, resolution=None, decimate_target=-1, S=128):\n\n if self.opt.dmtet:\n\n sdf = self.sdf\n deform = torch.tanh(self.deform) / self.opt.tet_grid_size\n\n vertices, triangles = self.dmtet(self.verts + deform, sdf, self.indices)\n\n vertices = vertices.detach().cpu().numpy()\n triangles = triangles.detach().cpu().numpy()\n\n else:\n\n if resolution is None:\n resolution = self.grid_size\n\n if self.cuda_ray:\n density_thresh = min(self.mean_density, self.density_thresh) \\\n if np.greater(self.mean_density, 0) else self.density_thresh\n else:\n density_thresh = self.density_thresh\n \n # TODO: use a larger thresh to extract a surface mesh from the density field, but this value is very empirical...\n if self.opt.density_activation == 'softplus':\n density_thresh = density_thresh * 25\n \n sigmas = np.zeros([resolution, resolution, resolution], dtype=np.float32)\n\n # query\n X = torch.linspace(-1, 1, resolution).split(S)\n Y = torch.linspace(-1, 1, resolution).split(S)\n Z = torch.linspace(-1, 1, resolution).split(S)\n\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [S, 3]\n val = self.density(pts.to(self.aabb_train.device))\n sigmas[xi * S: xi * S + len(xs), yi * S: yi * S + len(ys), zi * S: zi * S + len(zs)] = val['sigma'].reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() # [S, 1] --> [x, y, z]\n\n print(f'[INFO] marching cubes thresh: {density_thresh} ({sigmas.min()} ~ {sigmas.max()})')\n\n vertices, triangles = mcubes.marching_cubes(sigmas, density_thresh)\n vertices = vertices / (resolution - 1.0) * 2 - 1\n\n # clean\n vertices = vertices.astype(np.float32)\n triangles = triangles.astype(np.int32)\n vertices, triangles = clean_mesh(vertices, triangles, remesh=True, remesh_size=0.01)\n \n # decimation\n if decimate_target > 0 and triangles.shape[0] > decimate_target:\n vertices, triangles = decimate_mesh(vertices, triangles, decimate_target)\n\n v = torch.from_numpy(vertices).contiguous().float().to(self.aabb_train.device)\n f = torch.from_numpy(triangles).contiguous().int().to(self.aabb_train.device)\n\n # mesh = trimesh.Trimesh(vertices, triangles, process=False) # important, process=True leads to seg fault...\n # mesh.export(os.path.join(path, f'mesh.ply'))\n\n def _export(v, f, h0=2048, w0=2048, ssaa=1, name=''):\n # v, f: torch Tensor\n device = v.device\n v_np = v.cpu().numpy() # [N, 3]\n f_np = f.cpu().numpy() # [M, 3]\n\n print(f'[INFO] running xatlas to unwrap UVs for mesh: v={v_np.shape} f={f_np.shape}')\n\n # unwrap uvs\n import xatlas\n import nvdiffrast.torch as dr\n from sklearn.neighbors import NearestNeighbors\n from scipy.ndimage import binary_dilation, binary_erosion\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(v_np, f_np)\n chart_options = xatlas.ChartOptions()\n chart_options.max_iterations = 4 # for faster unwrap...\n atlas.generate(chart_options=chart_options)\n vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n # vmapping, ft_np, vt_np = xatlas.parametrize(v_np, f_np) # [N], [M, 3], [N, 2]\n\n vt = torch.from_numpy(vt_np.astype(np.float32)).float().to(device)\n ft = torch.from_numpy(ft_np.astype(np.int64)).int().to(device)\n\n # render uv maps\n uv = vt * 2.0 - 1.0 # uvs to range [-1, 1]\n uv = torch.cat((uv, torch.zeros_like(uv[..., :1]), torch.ones_like(uv[..., :1])), dim=-1) # [N, 4]\n\n if ssaa > 1:\n h = int(h0 * ssaa)\n w = int(w0 * ssaa)\n else:\n h, w = h0, w0\n \n if self.glctx is None:\n if h <= 2048 and w <= 2048:\n self.glctx = dr.RasterizeCudaContext()\n else:\n self.glctx = dr.RasterizeGLContext()\n\n rast, _ = dr.rasterize(self.glctx, uv.unsqueeze(0), ft, (h, w)) # [1, h, w, 4]\n xyzs, _ = dr.interpolate(v.unsqueeze(0), rast, f) # [1, h, w, 3]\n mask, _ = dr.interpolate(torch.ones_like(v[:, :1]).unsqueeze(0), rast, f) # [1, h, w, 1]\n\n # masked query \n xyzs = xyzs.view(-1, 3)\n mask = (mask > 0).view(-1)\n \n feats = torch.zeros(h * w, 3, device=device, dtype=torch.float32)\n\n if mask.any():\n xyzs = xyzs[mask] # [M, 3]\n\n # batched inference to avoid OOM\n all_feats = []\n head = 0\n while head < xyzs.shape[0]:\n tail = min(head + 640000, xyzs.shape[0])\n results_ = self.density(xyzs[head:tail])\n all_feats.append(results_['albedo'].float())\n head += 640000\n\n feats[mask] = torch.cat(all_feats, dim=0)\n \n feats = feats.view(h, w, -1)\n mask = mask.view(h, w)\n\n # quantize [0.0, 1.0] to [0, 255]\n feats = feats.cpu().numpy()\n feats = (feats * 255).astype(np.uint8)\n\n ### NN search as an antialiasing ...\n mask = mask.cpu().numpy()\n\n inpaint_region = binary_dilation(mask, iterations=3)\n inpaint_region[mask] = 0\n\n search_region = mask.copy()\n not_search_region = binary_erosion(search_region, iterations=2)\n search_region[not_search_region] = 0\n\n search_coords = np.stack(np.nonzero(search_region), axis=-1)\n inpaint_coords = np.stack(np.nonzero(inpaint_region), axis=-1)\n\n knn = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(search_coords)\n _, indices = knn.kneighbors(inpaint_coords)\n\n feats[tuple(inpaint_coords.T)] = feats[tuple(search_coords[indices[:, 0]].T)]\n\n feats = cv2.cvtColor(feats, cv2.COLOR_RGB2BGR)\n\n # do ssaa after the NN search, in numpy\n if ssaa > 1:\n feats = cv2.resize(feats, (w0, h0), interpolation=cv2.INTER_LINEAR)\n\n cv2.imwrite(os.path.join(path, f'{name}albedo.png'), feats)\n\n # save obj (v, vt, f /)\n obj_file = os.path.join(path, f'{name}mesh.obj')\n mtl_file = os.path.join(path, f'{name}mesh.mtl')\n\n print(f'[INFO] writing obj mesh to {obj_file}')\n with open(obj_file, \"w\") as fp:\n fp.write(f'mtllib {name}mesh.mtl \\n')\n \n print(f'[INFO] writing vertices {v_np.shape}')\n for v in v_np:\n fp.write(f'v {v[0]} {v[1]} {v[2]} \\n')\n \n print(f'[INFO] writing vertices texture coords {vt_np.shape}')\n for v in vt_np:\n fp.write(f'vt {v[0]} {1 - v[1]} \\n') \n\n print(f'[INFO] writing faces {f_np.shape}')\n fp.write(f'usemtl mat0 \\n')\n for i in range(len(f_np)):\n fp.write(f\"f {f_np[i, 0] + 1}/{ft_np[i, 0] + 1} {f_np[i, 1] + 1}/{ft_np[i, 1] + 1} {f_np[i, 2] + 1}/{ft_np[i, 2] + 1} \\n\")\n\n with open(mtl_file, \"w\") as fp:\n fp.write(f'newmtl mat0 \\n')\n fp.write(f'Ka 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Kd 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Ks 0.000000 0.000000 0.000000 \\n')\n fp.write(f'Tr 1.000000 \\n')\n fp.write(f'illum 1 \\n')\n fp.write(f'Ns 0.000000 \\n')\n fp.write(f'map_Kd {name}albedo.png \\n')\n\n _export(v, f)\n\n def run(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # bg_color: [BN, 3] in range [0, 1]\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n results = {}\n\n # choose aabb\n aabb = self.aabb_train if self.training else self.aabb_infer\n\n # sample steps\n # nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, aabb, self.min_near)\n # nears.unsqueeze_(-1)\n # fars.unsqueeze_(-1)\n nears, fars = near_far_from_bound(rays_o, rays_d, self.bound, type='sphere', min_near=self.min_near)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) # [N, 3]\n\n #print(f'nears = {nears.min().item()} ~ {nears.max().item()}, fars = {fars.min().item()} ~ {fars.max().item()}')\n\n z_vals = torch.linspace(0.0, 1.0, self.opt.num_steps, device=device).unsqueeze(0) # [1, T]\n z_vals = z_vals.expand((N, self.opt.num_steps)) # [N, T]\n z_vals = nears + (fars - nears) * z_vals # [N, T], in [nears, fars]\n\n # perturb z_vals\n sample_dist = (fars - nears) / self.opt.num_steps\n if perturb:\n z_vals = z_vals + (torch.rand(z_vals.shape, device=device) - 0.5) * sample_dist\n #z_vals = z_vals.clamp(nears, fars) # avoid out of bounds xyzs.\n\n # generate xyzs\n xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * z_vals.unsqueeze(-1) # [N, 1, 3] * [N, T, 1] -> [N, T, 3]\n xyzs = torch.min(torch.max(xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n #plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n\n # query SDF and RGB\n density_outputs = self.density(xyzs.reshape(-1, 3))\n\n #sigmas = density_outputs['sigma'].view(N, self.opt.num_steps) # [N, T]\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(N, self.opt.num_steps, -1)\n\n # upsample z_vals (nerf-like)\n if self.opt.upsample_steps > 0:\n with torch.no_grad():\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1)\n\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T]\n\n # sample new z_vals\n z_vals_mid = (z_vals[..., :-1] + 0.5 * deltas[..., :-1]) # [N, T-1]\n new_z_vals = sample_pdf(z_vals_mid, weights[:, 1:-1], self.opt.upsample_steps, det=not self.training).detach() # [N, t]\n\n new_xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * new_z_vals.unsqueeze(-1) # [N, 1, 3] * [N, t, 1] -> [N, t, 3]\n new_xyzs = torch.min(torch.max(new_xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n # only forward new points to save computation\n new_density_outputs = self.density(new_xyzs.reshape(-1, 3))\n #new_sigmas = new_density_outputs['sigma'].view(N, self.opt.upsample_steps) # [N, t]\n for k, v in new_density_outputs.items():\n new_density_outputs[k] = v.view(N, self.opt.upsample_steps, -1)\n\n # re-order\n z_vals = torch.cat([z_vals, new_z_vals], dim=1) # [N, T+t]\n z_vals, z_index = torch.sort(z_vals, dim=1)\n\n xyzs = torch.cat([xyzs, new_xyzs], dim=1) # [N, T+t, 3]\n xyzs = torch.gather(xyzs, dim=1, index=z_index.unsqueeze(-1).expand_as(xyzs))\n\n for k in density_outputs:\n tmp_output = torch.cat([density_outputs[k], new_density_outputs[k]], dim=1)\n density_outputs[k] = torch.gather(tmp_output, dim=1, index=z_index.unsqueeze(-1).expand_as(tmp_output))\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T+t-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1)\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T+t]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+t+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T+t]\n\n dirs = rays_d.view(-1, 1, 3).expand_as(xyzs)\n light_d = light_d.view(-1, 1, 3).expand_as(xyzs)\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(-1, v.shape[-1])\n\n dirs = safe_normalize(dirs)\n sigmas, rgbs, normals = self(xyzs.reshape(-1, 3), dirs.reshape(-1, 3), light_d.reshape(-1, 3), ratio=ambient_ratio, shading=shading)\n rgbs = rgbs.view(N, -1, 3) # [N, T+t, 3]\n if normals is not None:\n normals = normals.view(N, -1, 3)\n\n # calculate weight_sum (mask)\n weights_sum = weights.sum(dim=-1) # [N]\n \n # calculate depth \n depth = torch.sum(weights * z_vals, dim=-1)\n\n # calculate color\n image = torch.sum(weights.unsqueeze(-1) * rgbs, dim=-2) # [N, 3], in [0, 1]\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n \n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n\n image = image.view(*prefix, 3)\n depth = depth.view(*prefix)\n weights_sum = weights_sum.reshape(*prefix)\n\n if self.training:\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss\n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.sum(-1).mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if (self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0) and normals is not None:\n normal_image = torch.sum(weights.unsqueeze(-1) * (normals + 1) / 2, dim=-2) # [N, 3], in [0, 1]\n results['normal_image'] = normal_image\n \n results['image'] = image\n results['depth'] = depth\n results['weights'] = weights\n results['weights_sum'] = weights_sum\n\n return results\n\n\n def run_cuda(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, binarize=False, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, self.aabb_train if self.training else self.aabb_infer)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) # [N, 3]\n\n results = {}\n\n if self.training:\n self.train_step += 1\n # print(self.train_epoch)\n xyzs, dirs, ts, rays = raymarching.march_rays_train(rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb, self.opt.dt_gamma, self.opt.max_steps)\n dirs = safe_normalize(dirs)\n\n if light_d.shape[0] > 1:\n flatten_rays = raymarching.flatten_rays(rays, xyzs.shape[0]).long()\n light_d = light_d[flatten_rays]\n\n \n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n weights, weights_sum, depth, image = raymarching.composite_rays_train(sigmas, rgbs, ts, rays, T_thresh, binarize)\n \n # normals related regularizations\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss \n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if (self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0) and normals is not None:\n _, _, _, normal_image = raymarching.composite_rays_train(sigmas.detach(), (normals + 1) / 2, ts, rays, T_thresh, binarize)\n results['normal_image'] = normal_image\n \n # weights normalization\n results['weights'] = weights\n\n else:\n \n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n \n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = nears.clone() # [N]\n\n step = 0\n \n while step < self.opt.max_steps: # hard coded max step\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n\n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n n_step = max(min(N // n_alive, 8), 1)\n\n xyzs, dirs, ts = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb if step == 0 else False, self.opt.dt_gamma, self.opt.max_steps)\n dirs = safe_normalize(dirs)\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, ts, weights_sum, depth, image, T_thresh, binarize)\n\n rays_alive = rays_alive[rays_alive >= 0]\n #print(f'step = {step}, n_step = {n_step}, n_alive = {n_alive}, xyzs: {xyzs.shape}')\n\n step += n_step\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n # bg_color = 1\n # bg_color = 1e-3\n if shading == 'normal':\n bg_color = 1\n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n\n image = image.view(*prefix, 3)\n depth = depth.view(*prefix)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n \n return results\n\n @torch.no_grad()\n def init_tet(self, mesh=None):\n\n if mesh is not None:\n # normalize mesh\n scale = 0.8 / np.array(mesh.bounds[1] - mesh.bounds[0]).max()\n center = np.array(mesh.bounds[1] + mesh.bounds[0]) / 2\n mesh.vertices = (mesh.vertices - center) * scale\n\n # init scale\n # self.tet_scale = torch.from_numpy(np.abs(mesh.vertices).max(axis=0) + 1e-1).to(self.verts.dtype).cuda()\n self.tet_scale = torch.from_numpy(np.array([np.abs(mesh.vertices).max()]) + 1e-1).to(self.verts.dtype).cuda()\n self.verts = self.verts * self.tet_scale\n\n # init sdf\n import cubvh\n BVH = cubvh.cuBVH(mesh.vertices, mesh.faces)\n sdf, _, _ = BVH.signed_distance(self.verts, return_uvw=False, mode='watertight')\n sdf *= -10 # INNER is POSITIVE, also make it stronger\n self.sdf.data += sdf.to(self.sdf.data.dtype).clamp(-1, 1)\n\n else:\n\n if self.cuda_ray:\n density_thresh = min(self.mean_density, self.density_thresh)\n else:\n density_thresh = self.density_thresh\n \n if self.opt.density_activation == 'softplus':\n density_thresh = density_thresh * 25\n\n # init scale\n sigma = self.density(self.verts)['sigma'] # verts covers [-1, 1] now\n mask = sigma > density_thresh\n valid_verts = self.verts[mask]\n self.tet_scale = valid_verts.abs().amax(dim=0) + 1e-1\n self.verts = self.verts * self.tet_scale\n\n # init sigma\n sigma = self.density(self.verts)['sigma'] # new verts\n self.sdf.data += (sigma - density_thresh).clamp(-1, 1)\n\n print(f'[INFO] init dmtet: scale = {self.tet_scale}')\n\n\n def run_dmtet(self, rays_o, rays_d, mvp, h, w, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, **kwargs):\n # mvp: [B, 4, 4]\n\n device = mvp.device\n campos = rays_o[:, 0, :] # only need one ray per batch\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = safe_normalize(campos + torch.randn_like(campos)).view(-1, 1, 1, 3) # [B, 1, 1, 3]\n\n results = {}\n\n # get mesh\n sdf = self.sdf\n deform = torch.tanh(self.deform) / self.opt.tet_grid_size\n\n verts, faces = self.dmtet(self.verts + deform, sdf, self.indices)\n\n # get normals\n i0, i1, i2 = faces[:, 0], faces[:, 1], faces[:, 2]\n v0, v1, v2 = verts[i0, :], verts[i1, :], verts[i2, :]\n\n faces = faces.int()\n \n face_normals = torch.cross(v1 - v0, v2 - v0)\n face_normals = safe_normalize(face_normals)\n \n vn = torch.zeros_like(verts)\n vn.scatter_add_(0, i0[:, None].repeat(1,3), face_normals)\n vn.scatter_add_(0, i1[:, None].repeat(1,3), face_normals)\n vn.scatter_add_(0, i2[:, None].repeat(1,3), face_normals)\n\n vn = torch.where(torch.sum(vn * vn, -1, keepdim=True) > 1e-20, vn, torch.tensor([0.0, 0.0, 1.0], dtype=torch.float32, device=vn.device))\n\n # rasterization\n verts_clip = torch.bmm(F.pad(verts, pad=(0, 1), mode='constant', value=1.0).unsqueeze(0).repeat(mvp.shape[0], 1, 1), \n mvp.permute(0,2,1)).float() # [B, N, 4]\n rast, rast_db = dr.rasterize(self.glctx, verts_clip, faces, (h, w))\n \n alpha = (rast[..., 3:] > 0).float()\n xyzs, _ = dr.interpolate(verts.unsqueeze(0), rast, faces) # [B, H, W, 3]\n normal, _ = dr.interpolate(vn.unsqueeze(0).contiguous(), rast, faces)\n normal = safe_normalize(normal)\n\n xyzs = xyzs.view(-1, 3)\n mask = (rast[..., 3:] > 0).view(-1).detach()\n\n # do the lighting here since we have normal from mesh now.\n albedo = torch.zeros_like(xyzs, dtype=torch.float32)\n if mask.any():\n masked_albedo = self.density(xyzs[mask])['albedo']\n albedo[mask] = masked_albedo.float()\n albedo = albedo.view(-1, h, w, 3)\n\n # these two modes lead to no parameters to optimize if using --lock_geo.\n if self.opt.lock_geo and shading in ['textureless', 'normal']:\n shading = 'lambertian'\n\n if shading == 'albedo':\n color = albedo\n elif shading == 'textureless':\n lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0)\n color = lambertian.unsqueeze(-1).repeat(1, 1, 1, 3)\n elif shading == 'normal':\n color = (normal + 1) / 2\n else: # 'lambertian'\n lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0)\n color = albedo * lambertian.unsqueeze(-1)\n\n color = dr.antialias(color, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3]\n alpha = dr.antialias(alpha, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 1]\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n \n if torch.is_tensor(bg_color) and len(bg_color.shape) > 1:\n bg_color = bg_color.view(-1, h, w, 3)\n \n depth = rast[:, :, :, [2]] # [B, H, W]\n color = color + (1 - alpha) * bg_color\n\n results['depth'] = depth \n results['image'] = color\n results['weights_sum'] = alpha.squeeze(-1)\n\n if self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0:\n normal_image = dr.antialias((normal + 1) / 2, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3]\n results['normal_image'] = normal_image\n \n # regularizations\n if self.training:\n if self.opt.lambda_mesh_normal > 0:\n results['normal_loss'] = normal_consistency(face_normals, faces)\n if self.opt.lambda_mesh_laplacian > 0:\n results['lap_loss'] = laplacian_smooth_loss(verts, faces)\n\n return results\n\n def run_taichi(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n exp_step_factor = kwargs.get('exp_step_factor', 0.)\n MAX_SAMPLES = 1024\n NEAR_DISTANCE = 0.01\n center = torch.zeros(1, 3)\n half_size = torch.ones(1, 3)\n _, hits_t, _ = self.ray_aabb_intersector.apply(rays_o, rays_d, center, half_size, 1)\n hits_t[(hits_t[:, 0, 0] >= 0) & (hits_t[:, 0, 0] < NEAR_DISTANCE), 0, 0] = NEAR_DISTANCE\n\n # TODO: should sample different light_d for each batch... but taichi end doesn't have a flatten_ray implemented currently...\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = (rays_o[0] + torch.randn(3, device=device, dtype=torch.float))\n light_d = safe_normalize(light_d)\n\n results = {}\n\n if self.training:\n rays_a, xyzs, dirs, deltas, ts, _ = self.ray_marching(rays_o, rays_d, hits_t[:, 0], self.density_bitfield, self.cascade, self.bound, exp_step_factor, self.grid_size, MAX_SAMPLES)\n dirs = safe_normalize(dirs)\n # plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n _, weights_sum, depth, image, weights = self.volume_render(sigmas, rgbs, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4))\n \n # normals related regularizations\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss \n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if (self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0) and normals is not None:\n _, _, _, normal_image, _ = self.volume_render(sigmas.detach(), (normals + 1) / 2, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4))\n results['normal_image'] = normal_image\n \n # weights normalization\n results['weights'] = weights\n\n else:\n \n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n \n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = hits_t[:, 0, 0]\n step = 0\n \n min_samples = 1 if exp_step_factor == 0 else 4\n\n while step < self.opt.max_steps: # hard coded max step\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n\n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n # n_step = max(min(N // n_alive, 8), 1)\n n_step = max(min(N // n_alive, 64), min_samples)\n\n xyzs, dirs, deltas, ts, N_eff_samples = \\\n self.raymarching_test_taichi(rays_o, rays_d, hits_t[:, 0], rays_alive,\n self.density_bitfield, self.cascade,\n self.bound, exp_step_factor,\n self.grid_size, MAX_SAMPLES, n_step)\n\n xyzs = self.rearrange(xyzs, 'n1 n2 c -> (n1 n2) c')\n dirs = self.rearrange(dirs, 'n1 n2 c -> (n1 n2) c')\n dirs = safe_normalize(dirs)\n valid_mask = ~torch.all(dirs == 0, dim=1)\n if valid_mask.sum() == 0:\n break\n\n sigmas = torch.zeros(len(xyzs), device=device)\n rgbs = torch.zeros(len(xyzs), 3, device=device)\n normals = torch.zeros(len(xyzs), 3, device=device)\n\n sigmas[valid_mask], _rgbs, normals = self(xyzs[valid_mask], dirs[valid_mask], light_d, ratio=ambient_ratio, shading=shading)\n rgbs[valid_mask] = _rgbs.float()\n sigmas = self.rearrange(sigmas, '(n1 n2) -> n1 n2', n2=n_step)\n rgbs = self.rearrange(rgbs, '(n1 n2) c -> n1 n2 c', n2=n_step)\n if normals is not None:\n normals = self.rearrange(normals, '(n1 n2) c -> n1 n2 c', n2=n_step)\n\n self.composite_test_fw(sigmas, rgbs, deltas, ts, hits_t[:,0], rays_alive,\n kwargs.get('T_threshold', 1e-4), N_eff_samples,\n weights_sum, depth, image)\n\n rays_alive = rays_alive[rays_alive >= 0]\n\n step += n_step\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n\n image = image + self.rearrange(1 - weights_sum, 'n -> n 1') * bg_color\n image = image.view(*prefix, 3)\n\n depth = depth.view(*prefix)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n \n return results\n\n\n @torch.no_grad()\n def update_extra_state(self, decay=0.95, S=128):\n # call before each epoch to update extra states.\n\n if not (self.cuda_ray or self.taichi_ray):\n return \n \n ### update density grid\n tmp_grid = - torch.ones_like(self.density_grid)\n \n X = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n\n for xs in X:\n for ys in Y:\n for zs in Z:\n \n # construct points\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)\n indices = raymarching.morton3D(coords).long() # [N]\n xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1]\n\n # cascading\n for cas in range(self.cascade):\n bound = min(2 ** cas, self.bound)\n half_grid_size = bound / self.grid_size\n # scale to current cascade's resolution\n cas_xyzs = xyzs * (bound - half_grid_size)\n # add noise in [-hgs, hgs]\n cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size\n # query density\n sigmas = self.density(cas_xyzs)['sigma'].reshape(-1).detach()\n # assign \n tmp_grid[cas, indices] = sigmas\n # ema update\n valid_mask = self.density_grid >= 0\n self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])\n self.mean_density = torch.mean(self.density_grid[valid_mask]).item()\n self.iter_density += 1\n\n # convert to bitfield\n density_thresh = min(self.mean_density, self.density_thresh)\n if self.cuda_ray:\n self.density_bitfield = raymarching.packbits(self.density_grid, density_thresh, self.density_bitfield)\n elif self.taichi_ray:\n self.packbits_taichi(self.density_grid.reshape(-1).contiguous(), density_thresh, self.density_bitfield)\n\n # print(f'[density grid] min={self.density_grid.min().item():.4f}, max={self.density_grid.max().item():.4f}, mean={self.mean_density:.4f}, occ_rate={(self.density_grid > density_thresh).sum() / (128**3 * self.cascade):.3f}')\n\n\n def render(self, rays_o, rays_d, mvp, h, w, staged=False, max_ray_batch=4096, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # return: pred_rgb: [B, N, 3]\n B, N = rays_o.shape[:2]\n device = rays_o.device\n\n if self.dmtet:\n results = self.run_dmtet(rays_o, rays_d, mvp, h, w, **kwargs)\n elif self.cuda_ray:\n results = self.run_cuda(rays_o, rays_d, **kwargs)\n elif self.taichi_ray:\n results = self.run_taichi(rays_o, rays_d, **kwargs)\n else:\n if staged:\n depth = torch.empty((B, N), device=device)\n image = torch.empty((B, N, 3), device=device)\n weights_sum = torch.empty((B, N), device=device)\n\n for b in range(B):\n head = 0\n while head < N:\n tail = min(head + max_ray_batch, N)\n results_ = self.run(rays_o[b:b+1, head:tail], rays_d[b:b+1, head:tail], **kwargs)\n depth[b:b+1, head:tail] = results_['depth']\n weights_sum[b:b+1, head:tail] = results_['weights_sum']\n image[b:b+1, head:tail] = results_['image']\n head += max_ray_batch\n \n results = {}\n results['depth'] = depth\n results['image'] = image\n results['weights_sum'] = weights_sum\n\n else:\n results = self.run(rays_o, rays_d, **kwargs)\n\n return results" }, { "identifier": "get_encoder", "path": "encoding.py", "snippet": "def get_encoder(encoding, input_dim=3, \n multires=6, \n degree=4,\n num_levels=16, level_dim=2, base_resolution=16, log2_hashmap_size=19, desired_resolution=2048, align_corners=False, interpolation='linear',\n **kwargs):\n\n if encoding == 'None':\n return lambda x, **kwargs: x, input_dim\n \n elif encoding == 'frequency_torch':\n encoder = FreqEncoder_torch(input_dim=input_dim, max_freq_log2=multires-1, N_freqs=multires, log_sampling=True)\n\n elif encoding == 'frequency': # CUDA implementation, faster than torch.\n from freqencoder import FreqEncoder\n encoder = FreqEncoder(input_dim=input_dim, degree=multires)\n\n elif encoding == 'sphere_harmonics':\n from shencoder import SHEncoder\n encoder = SHEncoder(input_dim=input_dim, degree=degree)\n\n elif encoding == 'hashgrid':\n from gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='hash', align_corners=align_corners, interpolation=interpolation)\n \n elif encoding == 'tiledgrid':\n from gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='tiled', align_corners=align_corners, interpolation=interpolation)\n \n elif encoding == 'hashgrid_taichi':\n from taichi_modules.hash_encoder import HashEncoderTaichi\n encoder = HashEncoderTaichi(batch_size=4096) #TODO: hard encoded batch size\n\n elif encoding == 'multiscale_triplane':\n from gridencoder import MultiScaleTriplane\n # encoder = MiniTriplane(input_dim=input_dim)\n encoder = MultiScaleTriplane(input_dim=input_dim)\n\n elif encoding == 'multiscale_triplane_pooling':\n from gridencoder import MultiScaleTriplane_Pooling\n encoder = MultiScaleTriplane_Pooling(input_dim=input_dim)\n else:\n raise NotImplementedError('Unknown encoding mode, choose from [None, frequency, sphere_harmonics, hashgrid, tiledgrid]')\n\n return encoder, encoder.output_dim" }, { "identifier": "safe_normalize", "path": "nerf/utils.py", "snippet": "def safe_normalize(x, eps=1e-20):\n return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps))" } ]
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from activation import trunc_exp, biased_softplus from .renderer import NeRFRenderer from encoding import get_encoder from .utils import safe_normalize
14,121
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x class NeRFNetwork(NeRFRenderer): def __init__(self, opt, num_layers=3, hidden_dim=64, num_layers_bg=2, hidden_dim_bg=32, ): super().__init__(opt) self.num_layers = num_layers self.hidden_dim = hidden_dim # self.encoder, self.in_dim = get_encoder('hashgrid', input_dim=3, log2_hashmap_size=19, desired_resolution=2048 * self.bound, interpolation='smoothstep') self.encoder, self.in_dim = get_encoder('multiscale_triplane_pooling', input_dim=3, iteration=0, is_training=True) self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True) # self.normal_net = MLP(self.in_dim, 3, hidden_dim, num_layers, bias=True) self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else biased_softplus # background network if self.opt.bg_radius > 0: self.num_layers_bg = num_layers_bg self.hidden_dim_bg = hidden_dim_bg # use a very simple network to avoid it learning the prompt... self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3, multires=6) self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True) else: self.bg_net = None def common_forward(self, x): # sigma # enc = self.encoder(x, bound=self.bound, max_level=self.max_level) enc = self.encoder(x, bound=self.bound, iteration=self.train_step, is_training=self.training) h = self.sigma_net(enc) # sigma = self.density_activation(h[..., 0] + self.density_blob(x)) # albedo = torch.sigmoid(h[..., 1:]) sigma = self.density_activation(h[:, ..., 0] + self.density_blob(x)) # if self.train_step > self.max_train_step // 3: # albedo = torch.sigmoid(h[:, ..., 1:]) # else: albedo = torch.sigmoid(h[:, ..., 1:]) # albedo = h[:, ..., 1:] # albedo.clamp_(min=0, max=1) # 前期不要sigmoid 后期sigmoid return sigma, albedo # ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192 def finite_difference_normal(self, x, epsilon=1e-2): # x: [N, 3] dx_pos, _ = self.common_forward((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dx_neg, _ = self.common_forward((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dy_pos, _ = self.common_forward((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dy_neg, _ = self.common_forward((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dz_pos, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound)) dz_neg, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound)) normal = torch.stack([ 0.5 * (dx_pos - dx_neg) / epsilon, 0.5 * (dy_pos - dy_neg) / epsilon, 0.5 * (dz_pos - dz_neg) / epsilon ], dim=-1) return -normal def normal(self, x): normal = self.finite_difference_normal(x)
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x class NeRFNetwork(NeRFRenderer): def __init__(self, opt, num_layers=3, hidden_dim=64, num_layers_bg=2, hidden_dim_bg=32, ): super().__init__(opt) self.num_layers = num_layers self.hidden_dim = hidden_dim # self.encoder, self.in_dim = get_encoder('hashgrid', input_dim=3, log2_hashmap_size=19, desired_resolution=2048 * self.bound, interpolation='smoothstep') self.encoder, self.in_dim = get_encoder('multiscale_triplane_pooling', input_dim=3, iteration=0, is_training=True) self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True) # self.normal_net = MLP(self.in_dim, 3, hidden_dim, num_layers, bias=True) self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else biased_softplus # background network if self.opt.bg_radius > 0: self.num_layers_bg = num_layers_bg self.hidden_dim_bg = hidden_dim_bg # use a very simple network to avoid it learning the prompt... self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3, multires=6) self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True) else: self.bg_net = None def common_forward(self, x): # sigma # enc = self.encoder(x, bound=self.bound, max_level=self.max_level) enc = self.encoder(x, bound=self.bound, iteration=self.train_step, is_training=self.training) h = self.sigma_net(enc) # sigma = self.density_activation(h[..., 0] + self.density_blob(x)) # albedo = torch.sigmoid(h[..., 1:]) sigma = self.density_activation(h[:, ..., 0] + self.density_blob(x)) # if self.train_step > self.max_train_step // 3: # albedo = torch.sigmoid(h[:, ..., 1:]) # else: albedo = torch.sigmoid(h[:, ..., 1:]) # albedo = h[:, ..., 1:] # albedo.clamp_(min=0, max=1) # 前期不要sigmoid 后期sigmoid return sigma, albedo # ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192 def finite_difference_normal(self, x, epsilon=1e-2): # x: [N, 3] dx_pos, _ = self.common_forward((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dx_neg, _ = self.common_forward((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dy_pos, _ = self.common_forward((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dy_neg, _ = self.common_forward((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) dz_pos, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound)) dz_neg, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound)) normal = torch.stack([ 0.5 * (dx_pos - dx_neg) / epsilon, 0.5 * (dy_pos - dy_neg) / epsilon, 0.5 * (dz_pos - dz_neg) / epsilon ], dim=-1) return -normal def normal(self, x): normal = self.finite_difference_normal(x)
normal = safe_normalize(normal)
3
2023-10-11 04:06:20+00:00
16k
oracle/guardian-ai
guardian_ai/privacy_estimation/attack_runner.py
[ { "identifier": "ClassificationDataset", "path": "guardian_ai/privacy_estimation/dataset.py", "snippet": "class ClassificationDataset(Dataset):\n \"\"\"\n Generic classification dataset in a tabular format, read in a somewhat consistent manner\n \"\"\"\n\n def __init__(self, name, df_x=None, df_y=None):\n \"\"\"\n Create a Classification Dataset wrapper.\n\n Parameters\n ----------\n name: str\n Name of the dataset\n df_x: {array-like, sparse matrix} of shape (n_samples, n_feature),\n where ``n_samples`` is the number of samples and ``n_features`` is the number of features.\n df_y: darray of shape (n_samples,)\n Output labels.\n\n \"\"\"\n self.df_x = df_x\n self.df_y = df_y\n self.column_transformer = None\n self.label_encoder = None\n self.target_model_data = None\n self.attack_model_data = None\n super(ClassificationDataset, self).__init__(name)\n\n def load_data_from_df(self, input_features, target):\n \"\"\"\n Load data from another data frame.\n\n Parameters\n ----------\n input_features: pandas.DataFrame\n target: pandas.DataFrame\n\n Returns\n -------\n None\n\n \"\"\"\n self.df_x = input_features\n self.df_y = target\n\n def load_data(\n self,\n source_file,\n contains_header: bool = False,\n target_ix: int = None,\n ignore_ix: List[int] = None,\n ):\n \"\"\"\n Method that specifies how the data should be loaded. Mainly applicable for tabular data.\n\n Parameters\n ----------\n source_file: os.path\n Filename of the source file.\n contains_header: bool\n Whether to contain header.\n target_ix: int\n Index of the target variable.\n ignore_ix: List[int]\n Indices to be ignored.\n\n Returns\n -------\n pandas dataframe of shape (n_samples, n_feature), pandas df of shape (n_samples,)\n Input features and output labels.\n\n \"\"\"\n df = None\n if source_file.endswith(\".csv\"):\n if contains_header:\n df = pd.read_csv(\n source_file, sep=\",\", skiprows=1, header=None, encoding=\"utf-8\"\n ) # ignore the headers, especially when reading lots of datasets.\n else:\n df = pd.read_csv(source_file, sep=\",\", header=None, encoding=\"utf-8\")\n elif source_file.endswith(\".arff\"):\n data = arff.loadarff(source_file)\n df = pd.DataFrame(data[0])\n else:\n raise ValueError\n\n # first, find the y index and remove it to get x\n y_ix = target_ix if target_ix is not None else len(df.columns) - 1\n self.df_y = df.iloc[:, y_ix]\n if isinstance(self.df_y[0], bytes):\n self.df_y = self.df_y.str.decode(\"utf-8\")\n self.df_x = df.drop(df.columns[y_ix], axis=1)\n\n # next remove the ones that need to be ignored.\n if ignore_ix is not None:\n self.df_x = self.df_x.drop(ignore_ix, axis=1)\n\n def get_column_transformer(self):\n \"\"\"\n Transforming categorical and numerical features.\n\n Returns\n -------\n Pipeline\n pipeline of column transformers.\n\n \"\"\"\n if self.column_transformer is None:\n assert self.df_x is not None\n\n # select categorical and numerical features\n cat_ix = self.df_x.select_dtypes(include=[\"object\", \"bool\"]).columns\n num_ix = self.df_x.select_dtypes(include=[\"int64\", \"float64\"]).columns\n\n # get the column indices, since the drops mess up the column names\n cat_new_ix = [self.df_x.columns.get_loc(col) for col in cat_ix]\n num_new_ix = [self.df_x.columns.get_loc(col) for col in num_ix]\n\n # pipeline for categorical data\n cat_preprocessing = make_pipeline(\n SimpleImputer(strategy=\"constant\", fill_value=\"NA\"),\n OneHotEncoder(handle_unknown=\"ignore\"),\n )\n\n # pipeline for numerical data\n num_preprocessing = make_pipeline(\n SimpleImputer(strategy=\"mean\"), MinMaxScaler()\n )\n\n # combine both pipeline using a columnTransformer\n self.column_transformer = ColumnTransformer(\n [\n (\"num\", num_preprocessing, num_new_ix),\n (\"cat\", cat_preprocessing, cat_new_ix),\n ]\n )\n\n return self.column_transformer\n\n def get_label_encoder(self):\n \"\"\"\n Encode the labels.\n\n Returns\n -------\n LabelEncoder\n\n \"\"\"\n if self.label_encoder is None:\n self.label_encoder = LabelEncoder()\n return self.label_encoder\n\n def fit_encoders_and_transform(self, df_x, df_y):\n \"\"\"\n Transform the data and encode labels\n :param df_x: {array-like, sparse matrix} of shape (n_samples, n_feature),\n Input features\n :param df_y: Output labels\n :return: Transformed features and encoded labels\n \"\"\"\n df_x = self.column_transformer.fit_transform(df_x)\n df_y = self.label_encoder.fit_transform(df_y)\n return df_x, df_y\n\n def fit_encoders(self, df_x, df_y):\n \"\"\"\n Fit the column transformer and label encoders. This should really be only done\n on the train set to avoid accidentally learning something from the test dataset\n\n Parameters\n ----------\n df_x: {array-like, sparse matrix} of shape (n_samples, n_feature),\n Input features\n df_y: darray of shape (n_samples,)\n Output labels\n\n Returns\n -------\n None\n\n \"\"\"\n self.get_column_transformer() # this will set the column transformer\n self.get_label_encoder() # this will set the label encoder\n\n self.column_transformer.fit(df_x)\n unique_values = list(df_y.unique())\n if df_y.dtypes == \"int64\":\n unique_values.append(-10000)\n else:\n unique_values.append(\"Unseen\")\n self.label_encoder = self.label_encoder.fit(unique_values)\n\n def encode_data(self, df_x, df_y):\n \"\"\"\n Apply the column transformer and label encoder\n\n Parameters\n ----------\n df_x: {array-like, sparse matrix} of shape (n_samples, n_feature),\n Input features\n df_y: darray of shape (n_samples,)\n Output labels\n\n Returns\n -------\n {array-like, sparse matrix} of shape (n_samples, n_feature), darray of shape (n_samples,)\n Encoded data\n\n \"\"\"\n df_x = self.column_transformer.transform(df_x)\n for i in range(len(df_y)):\n label = df_y.array[i]\n if label not in self.label_encoder.classes_:\n if df_y.dtypes == \"int64\":\n df_y = df_y.replace(to_replace=label, value=-10000)\n else:\n df_y = df_y.replace(to_replace=label, value=\"Unseen\")\n df_y = self.label_encoder.transform(df_y)\n return df_x, df_y\n\n def get_num_rows(self):\n \"\"\"\n Get number of rows in the dataset.\n\n Returns\n -------\n int\n number of rows in the dataset.\n\n \"\"\"\n return self.df_y.shape[0]\n\n def prepare_target_and_attack_data(\n self,\n data_split_seed,\n dataset_split_ratios,\n ):\n \"\"\"\n Given the data split ratios, preform the data split, and prepare appropriate datasets\n for training and testing the target and attack models.\n\n Parameters\n ----------\n data_split_seed: int\n Random seed for splitting the data.\n dataset_split_ratios: dict[DataSplit -> float]\n Map of data split names and fractions.\n\n Returns\n -------\n None\n\n \"\"\"\n data_split_names = [e.name for e in dataset_split_ratios.keys()]\n data_split_ratios = list(dataset_split_ratios.values())\n self.split_dataset(data_split_seed, data_split_ratios, data_split_names)\n\n \"\"\"\n Merge appropriate splits to create the train set for the target model. Also fit data\n encoders on this training set, and encode the target train and test sets.\n \"\"\"\n X_target_train, y_target_train = self.get_merged_sets(\n (\n DataSplit.ATTACK_TRAIN_IN.name,\n DataSplit.ATTACK_TEST_IN.name,\n DataSplit.TARGET_ADDITIONAL_TRAIN.name,\n )\n )\n X_target_valid, y_target_valid = self.splits[DataSplit.TARGET_VALID.name]\n X_target_test, y_target_test = self.splits[DataSplit.TARGET_TEST.name]\n # encoding the data\n self.fit_encoders(X_target_train, y_target_train)\n X_target_train, y_target_train = self.encode_data(\n X_target_train, y_target_train\n )\n X_target_valid, y_target_valid = self.encode_data(\n X_target_valid, y_target_valid\n )\n X_target_test, y_target_test = self.encode_data(X_target_test, y_target_test)\n\n self.target_model_data = TargetModelData(\n X_target_train,\n y_target_train,\n X_target_valid,\n y_target_valid,\n X_target_test,\n y_target_test,\n )\n \"\"\"\n Prepare attack model train and test sets by merging appropriate splits, and calculating the\n membership ground truth label - i.e., recording whether or not this data point was used as\n part of the training set for the target model. This label is stored in y_membership_train\n and y_membership_test, for the attack train and test sets respectively. Finally, encode the\n attack data points.\n \"\"\"\n\n (\n X_attack_train,\n y_attack_train,\n y_membership_train,\n ) = self.create_attack_set_from_splits(\n DataSplit.ATTACK_TRAIN_IN.name, DataSplit.ATTACK_TRAIN_OUT.name\n )\n\n (\n X_attack_test,\n y_attack_test,\n y_membership_test,\n ) = self.create_attack_set_from_splits(\n DataSplit.ATTACK_TEST_IN.name, DataSplit.ATTACK_TEST_OUT.name\n )\n\n # encode data\n X_attack_train, y_attack_train = self.encode_data(\n X_attack_train, y_attack_train\n )\n X_attack_test, y_attack_test = self.encode_data(X_attack_test, y_attack_test)\n\n self.attack_model_data = AttackModelData(\n X_attack_train,\n y_attack_train,\n y_membership_train,\n X_attack_test,\n y_attack_test,\n y_membership_test,\n )" }, { "identifier": "TargetModelData", "path": "guardian_ai/privacy_estimation/dataset.py", "snippet": "class TargetModelData:\n \"\"\"\n Convenience class to easily pass around the dataset prepared for training and testing\n the target model\n \"\"\"\n\n def __init__(\n self,\n X_target_train,\n y_target_train,\n X_target_valid,\n y_target_valid,\n X_target_test,\n y_target_test,\n ):\n \"\"\"\n Create Target Model Data\n All X variables are {array-like, sparse matrix} of shape (n_samples, n_features),\n where ``n_samples`` is the number of samples and ``n_features`` is the number of features.\n\n Parameters\n ----------\n X_target_train: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input variables used to train the target model.\n y_target_train: ndarray of shape (n_samples,)\n Output labels used to train the target model.\n X_target_valid: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input variables used to tune the target model.\n y_target_valid: ndarray of shape (n_samples,)\n Output variables used to tune the target model.\n X_target_test: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input variables used to test the target model.\n y_target_test: ndarray of shape (n_samples,)\n Output variables used to test the target model.\n\n \"\"\"\n self.X_target_train = X_target_train\n self.y_target_train = y_target_train\n self.X_target_valid = X_target_valid\n self.y_target_valid = y_target_valid\n self.X_target_test = X_target_test\n self.y_target_test = y_target_test" }, { "identifier": "AttackModelData", "path": "guardian_ai/privacy_estimation/dataset.py", "snippet": "class AttackModelData:\n \"\"\"\n Convenience class to easily pass around the dataset prepared for training and testing\n the attack model\n \"\"\"\n\n def __init__(\n self,\n X_attack_train,\n y_attack_train,\n y_membership_train,\n X_attack_test,\n y_attack_test,\n y_membership_test,\n ):\n \"\"\"\n Create Attack Model Data\n\n All X variables are {array-like, sparse matrix} of shape (n_samples, n_features),\n where `n_samples` is the number of samples and n_features` is the number of features.\n All y variables are ndarray of shape (n_samples,)\n\n Parameters\n ----------\n X_attack_train: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input variables for the dataset on which we want to train\n the attack model. These are the original features (not attack/membership features)\n y_attack_train: ndarray of shape (n_samples,)\n Output labels for the dataset on which we want to train\n the attack model. These are the original labels (not membership labels)\n y_membership_train: ndarray of shape (n_samples,)\n Membership labels for the dataset on which we want to train\n the attack model. These are binary and indicate whether the data point was included\n X_attack_test: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input variables for the dataset on which to run the attack model.\n These are the original features (not attack/membership features)\n y_attack_test: ndarray of shape (n_samples,)\n Output labels for the dataset on which to run the attack model.\n These are the original labels (not membership labels)\n y_membership_test: ndarray of shape (n_samples,)\n Membership labels for the dataset on which we want to run\n the attack model. These are binary and indicate whether the data point was included\n in the training dataset of the target model, and helps us evaluate the attack model's\n accuracy.\n\n \"\"\"\n self.X_attack_train = X_attack_train\n self.y_attack_train = y_attack_train\n self.y_membership_train = y_membership_train\n self.X_attack_test = X_attack_test\n self.y_attack_test = y_attack_test\n self.y_membership_test = y_membership_test" }, { "identifier": "AttackType", "path": "guardian_ai/privacy_estimation/attack.py", "snippet": "class AttackType(enum.Enum):\n \"\"\"\n All the attack types currently supported by this tool.\n \"\"\"\n\n LossBasedBlackBoxAttack = 0\n ExpectedLossBasedBlackBoxAttack = 1\n ConfidenceBasedBlackBoxAttack = 2\n ExpectedConfidenceBasedBlackBoxAttack = 3\n MerlinAttack = 4\n CombinedBlackBoxAttack = 5\n CombinedWithMerlinBlackBoxAttack = 6\n MorganAttack = 7" }, { "identifier": "LossBasedBlackBoxAttack", "path": "guardian_ai/privacy_estimation/attack.py", "snippet": "class LossBasedBlackBoxAttack(BlackBoxAttack):\n \"\"\"\n One of the simplest, but fairly effective attack - which looks at the loss value of the\n attack point. Attacker hypothesis is that lower loss indicates that the target model has\n seen this data point at training time.\n \"\"\"\n\n def __init__(\n self,\n attack_model: BaseEstimator,\n ):\n \"\"\"\n Instantiate the Loss based attack.\n\n Parameters\n -------\n attack_model: sklearn.base.BaseEstimator\n Typically Threshold classifier, but could also\n be a single feature logistic regression.\n\n \"\"\"\n super(LossBasedBlackBoxAttack, self).__init__(\n attack_model, name=AttackType.LossBasedBlackBoxAttack.name\n )\n\n def transform_attack_data(\n self,\n target_model: TargetModel,\n X_attack,\n y_attack,\n split_type: str = None,\n use_cache: bool = False,\n ):\n \"\"\"\n Takes the input attack points, and calculates loss values on them.\n\n Parameters\n ----------\n target_model: guardian_ai.privacy_estimation.model.TargetModel\n Target model being attacked.\n X_attack: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack datapoints, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n y_attack: ndarray of shape (n_samples,)\n Vector containing the output labels of the attack data points (not membership label).\n split_type: str\n Whether this is \"train\" set or \"test\" set, which is used for Morgan\n attack, which uses cached values of loss and merlin ratios for efficiency.\n use_cache: bool\n Using the cache or not.\n\n Returns\n -------\n X_membership: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input loss value features for the attack model, where ``n_samples`` is\n the number of samples and ``n_features`` is the number of features.\n\n \"\"\"\n labels = target_model.model.classes_\n probs = target_model.get_prediction_probs(X_attack)\n X_membership = -log_loss_vector(\n y_attack, probs, labels=labels\n ) # lower is better\n return X_membership" }, { "identifier": "ConfidenceBasedBlackBoxAttack", "path": "guardian_ai/privacy_estimation/attack.py", "snippet": "class ConfidenceBasedBlackBoxAttack(BlackBoxAttack):\n \"\"\"\n One of the simplest, but fairly effective attack - which looks at the confidence of the\n attack point. Attacker hypothesis is that higher confidence indicates that the target\n model has seen this data point at training time.\n \"\"\"\n\n def __init__(self, attack_model: BaseEstimator):\n \"\"\"\n Instantiate the Confidence based attack\n Parameters\n ----------\n attack_model: sklearn.base.BaseEstimator\n Typically Threshold classifier, but could also\n be a single feature logistic regression.\n \"\"\"\n super(ConfidenceBasedBlackBoxAttack, self).__init__(\n attack_model, name=AttackType.ConfidenceBasedBlackBoxAttack.name\n )\n\n def transform_attack_data(\n self,\n target_model: TargetModel,\n X_attack,\n y_attack,\n split_type: str = None,\n use_cache: bool = False,\n ):\n \"\"\"\n Takes the input attack points, and calculates confidence values on them.\n\n Parameters\n ----------\n target_model: guardian_ai.privacy_estimation.model.TargetModel\n Target model being attacked.\n X_attack: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack datapoints, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n y_attack: ndarray of shape (n_samples,)\n Vector containing the output labels of the attack data points (not membership label)\n split_type: str\n Whether this is \"train\" set or \"test\" set, which is used for Morgan\n attack, which uses cached values of loss and merlin ratios for efficiency\n use_cache: bool\n Using the cache or not\n\n Returns\n -------\n X_membership: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input confidence value features for the attack model, where ``n_samples`` is\n the number of samples and ``n_features`` is the number of features.\n\n \"\"\"\n probs = target_model.get_prediction_probs(X_attack)\n X_membership = np.max(probs, 1)\n return X_membership" }, { "identifier": "ExpectedLossBasedBlackBoxAttack", "path": "guardian_ai/privacy_estimation/attack.py", "snippet": "class ExpectedLossBasedBlackBoxAttack(BlackBoxAttack):\n \"\"\"\n Same as Loss based attack, but the difference is that we're going to use a logistic\n regression classifier. The only reason we need a separate attack for this is because the\n shape of the attack feature needs to be different.\n \"\"\"\n\n def __init__(self, attack_model: BaseEstimator):\n \"\"\"\n Instantiate the Expected Loss based attack.\n\n Parameters\n ----------\n attack_model: sklearn.base.BaseEstimator\n Typically a single feature logistic regression.\n\n \"\"\"\n\n super(ExpectedLossBasedBlackBoxAttack, self).__init__(\n attack_model, name=AttackType.ExpectedLossBasedBlackBoxAttack.name\n )\n\n def transform_attack_data(\n self,\n target_model: TargetModel,\n X_attack,\n y_attack,\n split_type: str = None,\n use_cache: bool = False,\n ):\n \"\"\"\n Takes the input attack points, and calculates loss values on them.\n\n Parameters\n ----------\n target_model: guardian_ai.privacy_estimation.model.TargetModel\n Target model being attacked.\n X_attack: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack datapoints, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n y_attack: ndarray of shape (n_samples,)\n Vector containing the output labels of the attack data points (not membership label).\n split_type: str\n Whether this is \"train\" set or \"test\" set, which is used for Morgan\n attack, which uses cached values of loss and merlin ratios for efficiency\n use_cache: bool\n Using the cache or not.\n\n Returns\n -------\n X_membership: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input loss value features for the attack model, where `n_samples` is\n the number of samples and `n_features` is the number of features.\n\n \"\"\"\n labels = target_model.model.classes_\n probs = target_model.get_prediction_probs(X_attack)\n X_membership = -log_loss_vector(\n y_attack, probs, labels=labels\n ) # lower is better\n # Note that this is the main difference.\n # We're using the right shape to be used with a classifier with a single feature\n return np.column_stack((X_membership, np.zeros((X_membership.shape[0], 1))))" }, { "identifier": "ExpectedConfidenceBasedBlackBoxAttack", "path": "guardian_ai/privacy_estimation/attack.py", "snippet": "class ExpectedConfidenceBasedBlackBoxAttack(BlackBoxAttack):\n \"\"\"\n Classification based version of the Confidence based attack\n \"\"\"\n\n def __init__(self, attack_model: BaseEstimator):\n \"\"\"\n Instantiate the Expected Confidence based attack\n\n Parameters\n ----------\n attack_model: sklearn.base.BaseEstimator\n Typically a single feature logistic regression.\n\n \"\"\"\n super(ExpectedConfidenceBasedBlackBoxAttack, self).__init__(\n attack_model, name=AttackType.ExpectedConfidenceBasedBlackBoxAttack.name\n )\n\n def transform_attack_data(\n self,\n target_model: TargetModel,\n X_attack,\n y_attack,\n split_type: str = None,\n use_cache: bool = False,\n ):\n \"\"\"\n Takes the input attack points, and calculates loss values on them.\n\n Parameters\n ----------\n target_model: guardian_ai.privacy_estimation.model.TargetModel\n Target model being attacked.\n X_attack: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack datapoints, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n y_attack: ndarray of shape (n_samples,)\n Vector containing the output labels of the attack data points (not membership label).\n split_type: str\n Whether this is \"train\" set or \"test\" set, which is used for Morgan\n attack, which uses cached values of loss and merlin ratios for efficiency\n use_cache: bool\n Using the cache or not\n\n Returns\n -------\n X_membership: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input confidence value features for the attack model, where ``n_samples`` is\n the number of samples and ``n_features`` is the number of features.\n \"\"\"\n probs = target_model.get_prediction_probs(X_attack)\n X_membership = np.max(probs, 1)\n return np.column_stack((X_membership, np.zeros((X_membership.shape[0], 1))))" }, { "identifier": "ThresholdClassifier", "path": "guardian_ai/privacy_estimation/attack.py", "snippet": "class ThresholdClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"\n Base Classifier for all threshold based attacks. For a given attack point with just\n a single feature, a threshold based classifier predicts if that feature value is over\n a threshold value.\n \"\"\"\n\n def __init__(self, threshold: float = 0.5):\n \"\"\"\n Instantiate the classifier\n\n Parameters\n ----------\n threshold: float, Default value is 0.5.\n This threshold is usually tuned.\n\n \"\"\"\n self.parameters = {}\n self.classes_ = None\n self.parameters[\"threshold\"] = threshold\n\n def fit(self, X, y):\n \"\"\"\n Fit the data to the classifier, but because this is a simple threshold classifier, fit\n doesn't really do much, except record the data and the domain of the class labels.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack model, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n y: array-like of shape (n_samples,)\n Output label of the attack model (usually 0/1).\n\n Returns\n -------\n ThresholdClassifier\n The trained classifier.\n\n \"\"\"\n self.classes_, y = np.unique(y, return_inverse=True)\n self.X_ = X\n self.y = y\n return self\n\n def predict(self, X):\n \"\"\"\n Make prediction using the decision function of the classifier.\n\n Parameters\n ----------\n X: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack datapoints, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n\n Returns\n -------\n y_pred : ndarray of shape (n_samples,)\n Vector containing the membership labels for each attack point.\n\n \"\"\"\n d = self.decision_function(X)\n return self.classes_[np.argmax(d, axis=1)]\n\n def decision_function(self, X):\n \"\"\"\n For a given attack point with just a single feature, a threshold based classifier\n predicts if that feature value is over a threshold value.\n\n Parameters\n ----------\n X: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack datapoints, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features. For ThresholdClassifier, it's usually just\n a single feature, but can be more.\n\n Returns\n -------\n Binary decision ndarray of shape (n_samples,) or (n_samples, n_classes)\n The feature value over a certain threshold.\n\n \"\"\"\n check_is_fitted(self)\n\n threshold = self.parameters[\"threshold\"]\n if hasattr(self, \"threshold\"):\n threshold = self.threshold\n\n d_true = X >= threshold\n\n index_of_true = np.where(self.classes_ == 1)\n if index_of_true == 0:\n d = np.column_stack((d_true, np.zeros((X.shape[0], 1))))\n else:\n d = np.column_stack((np.zeros((X.shape[0], 1)), d_true))\n return d\n\n def get_params(self, deep: bool = True):\n \"\"\"\n Get parameters for this estimator.\n\n Parameters\n ----------\n deep: bool, default is True.\n If True, will return the parameters for this estimator and contained\n subobjects that are estimators.\n\n Returns\n -------\n dict\n Parameter names mapped to their values.\n \"\"\"\n return self.parameters\n\n def set_params(self, **parameters):\n \"\"\"\n Set estimator parametes.\n\n Parameters\n ----------\n parameters: dict\n Estimator parameters.\n\n Returns\n -------\n Estimator instance.\n\n \"\"\"\n for parameter, value in parameters.items():\n setattr(self, parameter, value)\n return self" }, { "identifier": "CombinedBlackBoxAttack", "path": "guardian_ai/privacy_estimation/combined_attacks.py", "snippet": "class CombinedBlackBoxAttack(BlackBoxAttack):\n \"\"\"\n Similar in spirit to the Morgan attack, which combines loss and the merlin ratio.\n In this attack, we combine loss, and confidence values and instead of tuning the\n thresholds, we combine them using a trained classifier, like stacking.\n \"\"\"\n\n def __init__(\n self,\n attack_model: BaseEstimator,\n loss_attack: LossBasedBlackBoxAttack = None,\n confidence_attack: ConfidenceBasedBlackBoxAttack = None,\n ):\n \"\"\"\n Initialize CombinedBlackBoxAttack.\n\n Parameters\n ----------\n attack_model: sklearn.base.BaseEstimator\n loss_attack: guardian_ai.privacy_estimation.attack.LossBasedBlackBoxAttack\n confidence_attack: guardian_ai.privacy_estimation.attack.ConfidenceBasedBlackBoxAttack\n\n \"\"\"\n self.loss_attack = loss_attack\n self.confidence_attack = confidence_attack\n super(CombinedBlackBoxAttack, self).__init__(\n attack_model, name=AttackType.CombinedBlackBoxAttack.name\n )\n\n def transform_attack_data(\n self,\n target_model: TargetModel,\n X_attack,\n y_attack,\n split_type: str = None,\n use_cache=False,\n ):\n \"\"\"\n Overriding the method transform_attack_data from the base class.\n Calculates the per instance loss and confidence.\n\n Parameters\n ----------\n target_model: guardian_ai.privacy_estimation.model.TargetModel\n Target model being attacked.\n X_attack: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack datapoints, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n y_attack: ndarray of shape (n_samples,)\n Vector containing the output labels of the attack data points (not membership label).\n split_type: str\n Use information cached from running the loss based and merlin attacks\n use_cache: bool\n Using the cache or not\n\n Returns\n -------\n X_membership: {array-like, sparse matrix} of shape (n_samples, n_features),\n where ``n_samples`` is the number of samples and ``n_features`` is\n the number of features.\n Input feature for the attack model - in this case,\n per-instance loss and confidence values\n\n \"\"\"\n if use_cache:\n if split_type == \"train\":\n my_per_instance_loss = self.loss_attack.X_membership_train\n my_confidence = self.confidence_attack.X_membership_train\n elif split_type == \"test\":\n my_per_instance_loss = self.loss_attack.X_membership_test\n my_confidence = self.confidence_attack.X_membership_test\n else:\n raise Exception(\"split type specified is not cached\")\n else:\n labels = target_model.model.classes_\n probs = target_model.get_prediction_probs(X_attack)\n my_per_instance_loss = -log_loss_vector(y_attack, probs, labels=labels)\n my_confidence = np.max(probs, 1)\n X_membership = np.column_stack((my_per_instance_loss, my_confidence))\n return X_membership" }, { "identifier": "CombinedWithMerlinBlackBoxAttack", "path": "guardian_ai/privacy_estimation/combined_attacks.py", "snippet": "class CombinedWithMerlinBlackBoxAttack(BlackBoxAttack):\n \"\"\"\n Similar in spirit to the Morgan attack, which combines loss and the merlin ratio.\n In this attack, we combine loss, confidence values and merlin ratio,\n and instead of tuning the thresholds, we combine them using\n a trained classifier, like stacking.\n \"\"\"\n\n def __init__(\n self,\n attack_model: BaseEstimator,\n merlin_attack: MerlinAttack, # this must be passed\n loss_attack: LossBasedBlackBoxAttack = None,\n confidence_attack: ConfidenceBasedBlackBoxAttack = None,\n ):\n self.merlin_attack = merlin_attack\n self.loss_attack = loss_attack\n self.confidence_attack = confidence_attack\n super(CombinedWithMerlinBlackBoxAttack, self).__init__(\n attack_model, name=AttackType.CombinedWithMerlinBlackBoxAttack.name\n )\n\n def transform_attack_data(\n self,\n target_model: TargetModel,\n X_attack,\n y_attack,\n split_type: str = None,\n use_cache: bool = False,\n ):\n \"\"\"\n Overriding the method transform_attack_data from the base class.\n Calculates the Merlin ratio, and combines it with per instance loss and confidence\n\n Parameters\n ----------\n target_model: guardian_ai.privacy_estimation.model.TargetModel\n Target model being attacked.\n X_attack: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack datapoints, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n y_attack: ndarray of shape (n_samples,)\n Vector containing the output labels of the attack data points (not membership label).\n split_type: str\n Use information cached from running the loss based and merlin attacks\n use_cache: bool\n Using the cache or not\n\n Returns\n -------\n X_membership: {array-like, sparse matrix} of shape (n_samples, n_features),\n where ``n_samples`` is the number of samples and ``n_features`` is\n the number of features.\n Input feature for the attack model - in this case the Merlin\n ratio, per-instance loss and confidence values.\n\n \"\"\"\n if use_cache:\n if split_type == \"train\":\n my_per_instance_loss = self.loss_attack.X_membership_train\n my_confidence = self.confidence_attack.X_membership_train\n merlin_ratio = self.merlin_attack.X_membership_train\n elif split_type == \"test\":\n my_per_instance_loss = self.loss_attack.X_membership_test\n my_confidence = self.confidence_attack.X_membership_test\n merlin_ratio = self.merlin_attack.X_membership_test\n else:\n raise Exception(\"split type specified is not cached\")\n else:\n labels = target_model.model.classes_\n probs = target_model.get_prediction_probs(X_attack)\n my_per_instance_loss = -log_loss_vector(y_attack, probs, labels=labels)\n my_confidence = np.max(probs, 1)\n merlin_ratio = self.merlin_attack.get_merlin_ratio(\n target_model, X_attack, y_attack\n )\n X_membership = np.column_stack(\n (my_per_instance_loss, my_confidence, merlin_ratio)\n )\n return X_membership" }, { "identifier": "MerlinAttack", "path": "guardian_ai/privacy_estimation/merlin_attack.py", "snippet": "class MerlinAttack(BlackBoxAttack):\n \"\"\"\n Implements the Merlin Attack as described in the paper: Revisiting Membership Inference\n Under Realistic Assumptions by Jayaraman et al.\n The main idea is to perturb a data point, and calculate noise on all the data points in\n this neighborhood. If the loss of large fraction of these points is above the target point,\n it might imply that the target point is in a local minima, and therefore the model might\n have fitted around it, implying it might have seen it at training time.\n \"\"\"\n\n def __init__(\n self,\n attack_model: BaseEstimator,\n noise_type: str = \"gaussian\",\n noise_coverage: str = \"full\",\n noise_magnitude: float = 0.01,\n max_t: int = 50,\n ):\n \"\"\"\n These default values are mostly taken from the original implementation of this attack.\n\n Parameters\n ----------\n attack_model: sklearn.base.BaseEstimator\n The type of attack model to be used.\n Typically, it's ThresholdClassifier.\n noise_type: str\n Choose the type of noise to add based on the data.\n Supports uniform and gaussian.\n noise_coverage: str\n Add noise to all attributes (\"full\") or only a subset.\n noise_magnitude: float\n Size of the noise.\n max_t: int\n The number of noisy points to generate to calculate the Merlin Ratio.\n\n \"\"\"\n self.noise_type = noise_type\n self.noise_coverage = noise_coverage\n self.noise_magnitude = noise_magnitude\n self.max_t = max_t\n super(MerlinAttack, self).__init__(\n attack_model, name=AttackType.MerlinAttack.name\n )\n\n def generate_noise(self, shape: np.shape, dtype):\n \"\"\"\n Generate noise to be added to the target data point.\n\n Parameters\n ----------\n shape: : np.shape\n Shape of the target data point\n dtype: np.dtype\n Datatype of the target data point\n\n Returns\n -------\n {array-like}\n Noise generated according to the parameters to match the shape of the target.\n\n \"\"\"\n noise = np.zeros(shape, dtype=dtype)\n if self.noise_coverage == \"full\":\n if self.noise_type == \"uniform\":\n noise = np.array(\n np.random.uniform(0, self.noise_magnitude, size=shape), dtype=dtype\n )\n else:\n noise = np.array(\n np.random.normal(0, self.noise_magnitude, size=shape), dtype=dtype\n )\n else:\n attr = np.random.randint(shape[1])\n if self.noise_type == \"uniform\":\n noise[:, attr] = np.array(\n np.random.uniform(0, self.noise_magnitude, size=shape[0]),\n dtype=dtype,\n )\n else:\n noise[:, attr] = np.array(\n np.random.normal(0, self.noise_magnitude, size=shape[0]),\n dtype=dtype,\n )\n return noise\n\n def get_merlin_ratio(self, target_model: TargetModel, X_attack, y_attack):\n \"\"\"\n Returns the merlin-ratio for the Merlin attack.\n\n Parameters\n ----------\n target_model: guardian_ai.privacy_estimation.model.TargetModel\n Model that is being targeted by the attack.\n X_attack: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack datapoints, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n\n y_attack: ndarray of shape (n_samples,)\n Vector containing the output labels of the attack data points (not membership label).\n\n Returns\n -------\n float\n Merlin Ratio. Value between 0 and 1.\n\n \"\"\"\n\n labels = target_model.model.classes_\n pred_y = target_model.get_prediction_probs(X_attack)\n my_per_instance_loss = log_loss_vector(y_attack, pred_y, labels=labels)\n counts = np.zeros((X_attack).shape[0])\n for _t in range(self.max_t):\n noise = self.generate_noise(X_attack.shape, X_attack.dtype)\n if sp.issparse(X_attack):\n noise = sp.csr_matrix(noise)\n noisy_x = X_attack + noise\n predictions = target_model.get_prediction_probs(noisy_x)\n my_noisy_per_instance_loss = log_loss_vector(\n y_attack, predictions, labels=labels\n )\n counts += np.where(my_noisy_per_instance_loss > my_per_instance_loss, 1, 0)\n return counts / self.max_t\n\n def transform_attack_data(\n self,\n target_model: TargetModel,\n X_attack,\n y_attack,\n split_type: str = None,\n use_cache=False,\n ):\n \"\"\"\n Overriding the method transform_attack_data from the base class.\n Calculates the merlin ratio.\n\n Parameters\n ----------\n target_model: guardian_ai.privacy_estimation.model.TargetModel\n Target model being attacked.\n X_attack: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack datapoints, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n y_attack: ndarray of shape (n_samples,)\n Vector containing the output labels of the attack data points (not membership label).\n split_type: str\n Use information cached from running the loss based and merlin attacks.\n use_cache: bool\n Using the cache or not.\n\n Returns\n -------\n X_membership: {array-like, sparse matrix} of shape (n_samples, n_features),\n where ``n_samples`` is the number of samples and ``n_features`` is\n the number of features.\n Input feature for the attack model - in this case, the Merlin\n ratio.\n\n \"\"\"\n X_membership = self.get_merlin_ratio(target_model, X_attack, y_attack)\n return X_membership" }, { "identifier": "MorganAttack", "path": "guardian_ai/privacy_estimation/morgan_attack.py", "snippet": "class MorganAttack(BlackBoxAttack):\n \"\"\"\n Implements the Morgan Attack as described in the paper: Revisiting Membership Inference\n Under Realistic Assumptions by Jayaraman et al.\n The main idea is to combine the merlin ratio and per instance loss using multiple thresholds.\n \"\"\"\n\n def __init__(\n self,\n attack_model: BaseEstimator,\n loss_attack: LossBasedBlackBoxAttack,\n merlin_attack: MerlinAttack,\n ):\n \"\"\"\n Initialize MorganAttack.\n\n Parameters\n ----------\n attack_model: sklearn.base.BaseEstimator\n Base attack model. Usually the Morgan Classifier.\n loss_attack: guardian_ai.privacy_estimation.attack.LossBasedBlackBoxAttack\n Loss attack object.\n merlin_attack: guardian_ai.privacy_estimation.merlin_attack.MerlinAttack\n Merlin attack object.\n\n \"\"\"\n self.loss_attack = loss_attack\n self.merlin_attack = merlin_attack\n super(MorganAttack, self).__init__(\n attack_model, name=AttackType.MorganAttack.name\n )\n\n def transform_attack_data(\n self,\n target_model: TargetModel,\n X_attack,\n y_attack,\n split_type: str = None,\n use_cache=False,\n ):\n \"\"\"\n Overriding the method transform_attack_data from the base class.\n Calculates the Merlin ratio, and combines it with per instance loss.\n\n Parameters\n ----------\n target_model: guardian_ai.privacy_estimation.model\n Target model being attacked.\n X_attack: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack datapoints, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n y_attack: ndarray of shape (n_samples,)\n Vector containing the output labels of the attack data points (not membership label).\n split_type: str\n Use information cached from running the loss based and merlin attacks.\n use_cache: bool\n Using the cache or not.\n\n Returns\n -------\n X_membership: {array-like, sparse matrix} of shape (n_samples, n_features),\n where ``n_samples`` is the number of samples and ``n_features`` is\n the number of features.\n Input feature for the attack model - in this case the Merlin ratio\n and per-instance loss.\n\n \"\"\"\n if use_cache:\n if split_type == \"train\":\n my_per_instance_loss = self.loss_attack.X_membership_train\n merlin_ratio = self.merlin_attack.X_membership_train\n elif split_type == \"test\":\n my_per_instance_loss = self.loss_attack.X_membership_test\n merlin_ratio = self.merlin_attack.X_membership_test\n else:\n raise Exception(\"split type specified is not cached\")\n else:\n labels = target_model.model.classes_\n pred_y = target_model.get_prediction_probs(X_attack)\n my_per_instance_loss = -log_loss_vector(y_attack, pred_y, labels=labels)\n merlin_ratio = self.merlin_attack.get_merlin_ratio(\n target_model, X_attack, y_attack\n )\n X_membership = np.column_stack((my_per_instance_loss, merlin_ratio))\n return X_membership" }, { "identifier": "MorganClassifier", "path": "guardian_ai/privacy_estimation/morgan_attack.py", "snippet": "class MorganClassifier(ThresholdClassifier):\n \"\"\"\n Implements the Morgan Attack as described in the paper: Revisiting Membership Inference\n Under Realistic Assumptions by Jayaraman et al.\n The main idea is to combine the merlin ratio and per instance loss using multiple\n thresholds. This classifier goes along with the Morgan Attack, which implements a\n custom decision function that combines the three thresholds.\n \"\"\"\n\n def __init__(\n self,\n loss_lower_threshold: float,\n merlin_threshold: float,\n threshold: float = 0.5,\n ):\n \"\"\"\n Morgan attack uses three thresholds, of which, two are given and one is tuned.\n\n Parameters\n ----------\n loss_lower_threshold: float\n Lower threshold on the per instance loss.\n merlin_threshold: float\n Threshold on the merlin ration.\n threshold: float\n Upper threshold on the per instance loss.\n\n \"\"\"\n super(MorganClassifier, self).__init__(threshold)\n self.parameters[\"loss_lower_threshold\"] = loss_lower_threshold\n # I'm doing it this way, since the attack tuner calls a clone object,\n # which messes up this constructor\n self.parameters[\"merlin_threshold\"] = merlin_threshold\n\n def predict(self, X):\n \"\"\"\n Calls the custom decision function that is required for the Morgan attack\n\n Parameters\n ----------\n X: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack datapoints, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n\n Returns\n -------\n y_pred : ndarray of shape (n_samples,)\n Vector containing the membership labels for each attack point.\n \"\"\"\n d = self.decision_function(X)\n return self.classes_[np.argmax(d, axis=1)]\n\n def decision_function(self, X):\n \"\"\"\n Custom decision function that applies the three thresholds of the Morgan attack\n\n Parameters\n ----------\n X: {array-like, sparse matrix} of shape (n_samples, n_features)\n Input features of the attack datapoints, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n\n Returns\n -------\n Binary decision ndarray of shape (n_samples,) or (n_samples, n_classes)\n The feature value over a certain threshold.\n\n \"\"\"\n check_is_fitted(self)\n\n threshold = self.parameters[\"threshold\"]\n if hasattr(self, \"threshold\"):\n threshold = self.threshold\n assert X.shape[1] == 2\n\n d_true = (\n (self.parameters[\"loss_lower_threshold\"] <= X[:, 0])\n & (X[:, 0] <= threshold)\n & (X[:, 1] >= self.parameters[\"merlin_threshold\"])\n )\n\n # create the decision vector\n index_of_true = np.where(self.classes_ == 1)\n if index_of_true == 0:\n d = np.column_stack((d_true, np.zeros((X.shape[0], 1))))\n else:\n d = np.column_stack((np.zeros((X.shape[0], 1)), d_true))\n return d" }, { "identifier": "TargetModel", "path": "guardian_ai/privacy_estimation/model.py", "snippet": "class TargetModel:\n \"\"\"\n Wrapper for the target model that is being attacked.\n For now, we're only supporting sklearn classifiers that implement .predict_proba\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Create the target model that is being attacked, and check that it's a classifier\n \"\"\"\n self.model = self.get_model()\n assert base.is_classifier(self.model)\n\n @abstractmethod\n def get_model(self):\n \"\"\"\n Create the target model that is being attacked.\n\n Returns\n -------\n Model that is not yet trained.\n \"\"\"\n pass\n\n def train_model(self, x_train, y_train):\n \"\"\"\n Train the model that is being attacked.\n\n Parameters\n ----------\n x_train: {array-like, sparse matrix} of shape (n_samples, n_features),\n where ``n_samples`` is the number of samples and ``n_features`` is the number of features.\n Input variables of the training set for the target model.\n y_train: ndarray of shape (n_samples,)\n Output labels of the training set for the target model.\n\n Returns\n -------\n Trained model\n\n \"\"\"\n return self.model.fit(x_train, y_train)\n\n def test_model(self, x_test, y_test):\n \"\"\"\n Test the model that is being attacked.\n\n Parameters\n ----------\n x_test: {array-like, sparse matrix} of shape (n_samples, n_features),\n where ``n_samples`` is the number of samples and ``n_features`` is the number of features.\n Input variables of the test set for the target model.\n y_test: ndarray of shape (n_samples,)\n Output labels of the test set for the target model.\n\n Returns\n -------\n None\n\n \"\"\"\n predictions = self.model.predict(x_test)\n print(classification_report(y_test, predictions))\n\n def get_f1(self, x_test, y_test):\n \"\"\"\n Gets f1 score.\n\n Parameters\n ----------\n x_test: {array-like, sparse matrix} of shape (n_samples, n_features),\n where ``n_samples`` is the number of samples and ``n_features`` is the number of features.\n y_test: ndarray of shape (n_samples,)\n\n \"\"\"\n predictions = self.model.predict(x_test)\n return f1_score(y_test, predictions, average=\"macro\")\n\n def get_predictions(self, X):\n \"\"\"\n Gets model prediction.\n\n Parameters\n ----------\n {array-like, sparse matrix} of shape (n_samples, n_features),\n where ``n_samples`` is the number of samples and ``n_features`` is the number of features.\n\n \"\"\"\n return self.model.predict(X)\n\n def get_prediction_probs(self, X):\n \"\"\"\n Gets model proba.\n\n Parameters\n ----------\n X: {array-like, sparse matrix} of shape (n_samples, n_features),\n where ``n_samples`` is the number of samples and ``n_features`` is the number of features.\n\n \"\"\"\n probs = []\n try:\n probs = self.model.predict_proba(X)\n except NotImplementedError:\n print(\"This classifier doesn't output probabilities\")\n return probs\n\n def save_model(self, filename):\n \"\"\"\n Save model.\n\n Parameters\n ----------\n filename: FileDescriptorOrPath\n\n \"\"\"\n pickle.dump(self.model, open(filename, \"wb\"))\n\n def load_model(self, filename):\n \"\"\"\n Load model.\n\n Parameters\n ----------\n filename: FileDescriptorOrPath\n\n \"\"\"\n self.model = pickle.load(open(filename, \"rb\"))\n\n def get_model_name(self):\n \"\"\"Get default model name.\"\"\"\n return \"default_target_model\"" } ]
from guardian_ai.privacy_estimation.dataset import ( ClassificationDataset, TargetModelData, AttackModelData, ) from guardian_ai.privacy_estimation.attack import ( AttackType, LossBasedBlackBoxAttack, ConfidenceBasedBlackBoxAttack, ExpectedLossBasedBlackBoxAttack, ExpectedConfidenceBasedBlackBoxAttack, ThresholdClassifier, ) from guardian_ai.privacy_estimation.combined_attacks import ( CombinedBlackBoxAttack, CombinedWithMerlinBlackBoxAttack, ) from guardian_ai.privacy_estimation.merlin_attack import MerlinAttack from guardian_ai.privacy_estimation.morgan_attack import MorganAttack, MorganClassifier from guardian_ai.privacy_estimation.model import TargetModel from typing import List, Dict from sklearn.linear_model import LogisticRegression
13,339
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at # https://oss.oracle.com/licenses/upl/ class AttackRunner: """ Class that can run the specified attacks against specified target models using the given dataset """ def __init__( self, dataset: ClassificationDataset, target_models: List[TargetModel], attacks: List[AttackType], threshold_grids, ): """ Initialize AttackRunner. Parameters ---------- dataset: ClassificationDataset Dataset that has been split and prepared for running the attacks target_models: List[TargetModel] Target models to run the attacks against attacks: Dict[str:List[float]], List of attacks to run. Use the pattern AttackType.LossBasedBlackBoxAttack.name Returns ------- AttackRunner """ self.dataset = dataset assert self.dataset.target_model_data is not None assert self.dataset.attack_model_data is not None self.target_models = target_models self.attacks = attacks self.threshold_grids = threshold_grids self.target_model_result_strings = {} self.attack_cache = {} def train_target_models(self): for target_model in self.target_models: print("Target Model: " + target_model.get_model_name()) target_model_data: TargetModelData = self.dataset.target_model_data classifier = target_model.train_model( target_model_data.X_target_train, target_model_data.y_target_train ) print("Target Model Train Evaluation: ") target_model.test_model( target_model_data.X_target_train, target_model_data.y_target_train ) train_f1 = target_model.get_f1( target_model_data.X_target_train, target_model_data.y_target_train ) print("Target Model Test Evaluation: ") target_model.test_model( target_model_data.X_target_test, target_model_data.y_target_test ) test_f1 = target_model.get_f1( target_model_data.X_target_test, target_model_data.y_target_test ) result_string = ( target_model.get_model_name() + "\t" + str(train_f1) + "\t" + str(test_f1) ) self.target_model_result_strings[ target_model.get_model_name() ] = result_string def _get_attack_object( self, attack_type: AttackType, target_model: TargetModel, # need this for Morgan Attack use_cache: bool = False, ): """ Instantiate the attack object of the specified attack_type. Some complex attack types may require training simpler attacks first if they have not been cached. Parameters ---------- attack_type: AttackType Type of the attack to instantiate target_model: TargetModel Target model is required to train simpler attacks as needed use_cache: bool Use attacks previously cached Returns ------- Attack Attack object """ attack = None if attack_type == AttackType.LossBasedBlackBoxAttack: attack = LossBasedBlackBoxAttack(ThresholdClassifier()) elif attack_type == AttackType.ExpectedLossBasedBlackBoxAttack: attack = ExpectedLossBasedBlackBoxAttack(LogisticRegression())
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at # https://oss.oracle.com/licenses/upl/ class AttackRunner: """ Class that can run the specified attacks against specified target models using the given dataset """ def __init__( self, dataset: ClassificationDataset, target_models: List[TargetModel], attacks: List[AttackType], threshold_grids, ): """ Initialize AttackRunner. Parameters ---------- dataset: ClassificationDataset Dataset that has been split and prepared for running the attacks target_models: List[TargetModel] Target models to run the attacks against attacks: Dict[str:List[float]], List of attacks to run. Use the pattern AttackType.LossBasedBlackBoxAttack.name Returns ------- AttackRunner """ self.dataset = dataset assert self.dataset.target_model_data is not None assert self.dataset.attack_model_data is not None self.target_models = target_models self.attacks = attacks self.threshold_grids = threshold_grids self.target_model_result_strings = {} self.attack_cache = {} def train_target_models(self): for target_model in self.target_models: print("Target Model: " + target_model.get_model_name()) target_model_data: TargetModelData = self.dataset.target_model_data classifier = target_model.train_model( target_model_data.X_target_train, target_model_data.y_target_train ) print("Target Model Train Evaluation: ") target_model.test_model( target_model_data.X_target_train, target_model_data.y_target_train ) train_f1 = target_model.get_f1( target_model_data.X_target_train, target_model_data.y_target_train ) print("Target Model Test Evaluation: ") target_model.test_model( target_model_data.X_target_test, target_model_data.y_target_test ) test_f1 = target_model.get_f1( target_model_data.X_target_test, target_model_data.y_target_test ) result_string = ( target_model.get_model_name() + "\t" + str(train_f1) + "\t" + str(test_f1) ) self.target_model_result_strings[ target_model.get_model_name() ] = result_string def _get_attack_object( self, attack_type: AttackType, target_model: TargetModel, # need this for Morgan Attack use_cache: bool = False, ): """ Instantiate the attack object of the specified attack_type. Some complex attack types may require training simpler attacks first if they have not been cached. Parameters ---------- attack_type: AttackType Type of the attack to instantiate target_model: TargetModel Target model is required to train simpler attacks as needed use_cache: bool Use attacks previously cached Returns ------- Attack Attack object """ attack = None if attack_type == AttackType.LossBasedBlackBoxAttack: attack = LossBasedBlackBoxAttack(ThresholdClassifier()) elif attack_type == AttackType.ExpectedLossBasedBlackBoxAttack: attack = ExpectedLossBasedBlackBoxAttack(LogisticRegression())
elif attack_type == AttackType.ConfidenceBasedBlackBoxAttack:
5
2023-10-09 09:48:50+00:00
16k