repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
giulio98/functional-diffusion-processes | src/functional_diffusion_processes/losses/mse_loss.py | [
{
"identifier": "BaseMAML",
"path": "src/functional_diffusion_processes/models/base_maml.py",
"snippet": "class BaseMAML(nn.Module, abc.ABC):\n \"\"\"Abstract model class for implementing Model-Agnostic Meta-Learning (MAML).\n\n The Model-Agnostic Meta-Learning (MAML) algorithm is designed to train models\n in a manner that they can be fine-tuned for new tasks with a small number of examples.\n This implementation is based on the MAML algorithm introduced in the paper\n \"Model-Agnostic Meta-Learning for Fast Adaptation of Deep Networks\"\n (https://arxiv.org/abs/1703.03400).\n\n Attributes:\n model_config (DictConfig): Configuration dictionary for the model.\n optimizer_inner (optax.GradientTransformation): Inner optimizer configuration.\n inner_steps (int): Number of inner optimization steps.\n\n Methods:\n __call__(self, inputs: jnp.ndarray) -> jnp.ndarray: Implement the forward pass of the model.\n initialize_model(self, rng: jax.random.PRNGKey, batch_input: jnp.ndarray) -> FrozenDict[str, Mapping[str, Any]]: Initialize the model with dummy inputs.\n initialize_input(self, shape: Tuple[int, ...]) -> jnp.ndarray: Create input tensor for the model based on the specified shape.\n make_update_params_fn(self) -> Callable[..., Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]]: Create a function to update the model parameters.\n make_update_inner_fn(self, optimizer_inner: optax.GradientTransformation, n_steps: int) -> Callable[..., Tuple[jnp.ndarray, jnp.ndarray]]: Create a function to update model parameters for inner optimization.\n make_predict_fn(self) -> Callable[..., jnp.ndarray]: Creates a function for making predictions with the model.\n \"\"\"\n\n model_config: DictConfig\n optimizer_inner: optax.GradientTransformation\n inner_steps: int\n\n @abc.abstractmethod\n @nn.compact\n def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Implement the forward pass of the model.\n\n Args:\n inputs (jnp.ndarray): Input tensor to the model.\n\n Returns:\n jnp.ndarray: Output tensor from the model.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the __call__ method.\")\n\n def initialize_model(self, rng: jax.random.PRNGKey, batch_input: jnp.ndarray) -> FrozenDict[str, Mapping[str, Any]]:\n \"\"\"Initialize the model with dummy inputs.\n\n This method initializes the model parameters by passing a batch of dummy inputs\n through the model. This is a common practice to infer the dimensions of the model's\n parameters.\n\n Args:\n rng (jax.random.PRNGKey): A random key for generating initial model parameters.\n batch_input (jnp.ndarray): A batch of dummy inputs for initializing the model.\n\n Returns:\n FrozenDict[str, Mapping[str, Any]]: The initialized model parameters.\n \"\"\"\n self.optimizer_inner = hydra.utils.instantiate(self.optimizer_inner)\n return self.init(rng, batch_input)\n\n def initialize_input(self, shape: Tuple[int, ...]) -> jnp.ndarray:\n \"\"\"Create input tensor for the model based on the specified shape.\n\n Args:\n shape (Tuple[int, ...]): Shape of the input tensor.\n\n Returns:\n jnp.ndarray: Initialized input tensor.\n \"\"\"\n batch_size = shape[0]\n num_channels = shape[-1]\n grid_size = shape[1:-1]\n if not self.model_config.y_input:\n num_channels = None\n coordinates = make_coordinates(batch_size, grid_size, num_channels)\n return coordinates\n\n def make_update_params_fn(self) -> Callable[..., Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]]:\n \"\"\"Create a function to update the model parameters.\n\n This method creates a function that performs the forward pass of the model\n and updates the model parameters.\n\n Returns:\n Callable[..., Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]]: Function to update model parameters.\n \"\"\"\n update_inner_fn = self.make_update_inner_fn(\n optimizer_inner=self.optimizer_inner,\n n_steps=self.inner_steps,\n )\n\n def apply_forward(\n rng: jax.random.PRNGKey,\n params: Params,\n batch_input: jnp.ndarray,\n batch_corrupted: jnp.ndarray,\n psm: jnp.ndarray,\n ) -> Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]:\n \"\"\"Apply the (outer) forward pass and update the model parameters.\n\n Args:\n rng (jax.random.PRNGKey): Random key.\n params (Params): Initial model parameters.\n batch_input (jnp.ndarray): Input tensor to the model.\n batch_corrupted (jnp.ndarray): Corrupted version of the output tensor.\n psm (jnp.ndarray): Power special matrix.\n\n Returns:\n Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]: A tuple containing a new random key, the model output, and the inner loss.\n \"\"\"\n params_adapted, loss_inner = update_inner_fn(params, batch_input, batch_corrupted, psm)\n model_output = jax.vmap(self.apply)(params_adapted, batch_input)\n\n return rng, model_output, loss_inner\n\n return apply_forward\n\n def make_update_inner_fn(\n self, optimizer_inner: optax.GradientTransformation, n_steps: int\n ) -> Callable[[Params, jnp.ndarray, jnp.ndarray, jnp.ndarray], Tuple[jnp.ndarray, jnp.ndarray]]:\n \"\"\"Create a function to update model parameters for inner optimization.\n\n This method creates a function that performs the inner optimization updates\n during the meta-training phase, which is a key component of the MAML algorithm.\n\n Args:\n optimizer_inner (optax.GradientTransformation): The optimizer used for inner optimization.\n n_steps (int): The number of optimization steps.\n\n Returns:\n Callable[..., Tuple[jnp.ndarray, jnp.ndarray]]: Function to update model parameters for inner optimization.\n \"\"\"\n\n @partial(jax.vmap, in_axes=0)\n @partial(jax.grad, has_aux=True)\n def loss_inner_fn(params_i: Params, batch_input: T, y_corrupted: T, psm: T) -> T:\n \"\"\"Computes the loss for inner optimization.\n\n This inner method computes the loss for inner optimization by comparing\n the model's output against the corrupted batch using mean square error.\n The method is vectorized using JAX's vmap function for efficiency.\n\n Args:\n params_i (Params): Model parameters.\n batch_input (T): Input batch.\n y_corrupted (T): Corrupted batch.\n psm (T): Power special matrix.\n\n Returns:\n T: Loss value.\n \"\"\"\n c = y_corrupted.shape[-1]\n model_output = self.apply(params_i, batch_input)\n if len(psm.shape) == 3:\n model_output_freq = jnp.fft.fft2(model_output.reshape(*psm.shape[:-1], c), norm=\"ortho\", axes=(0, 1))\n y_corrupted_freq = jnp.fft.fft2(y_corrupted.reshape(*psm.shape[:-1], c), norm=\"ortho\", axes=(0, 1))\n else:\n model_output_freq = jnp.fft.fft(model_output.reshape(*psm.shape[:-1], c), norm=\"ortho\", axis=0)\n y_corrupted_freq = jnp.fft.fft(y_corrupted.reshape(*psm.shape[:-1], c), norm=\"ortho\", axis=0)\n mse = mean_square_error(\n y_corrupted_freq.reshape(-1, c),\n model_output_freq.reshape(-1, c),\n psm.reshape(-1, 1),\n )\n loss: jnp.ndarray = jnp.mean(mse)\n\n return loss, loss\n\n def apply_inner_forward(\n params: Params, batch_input: jnp.ndarray, batch_corrupted: jnp.ndarray, psm: jnp.ndarray\n ):\n \"\"\"Applies inner forward pass for updating model parameters.\n\n Args:\n params (Params): Model parameters.\n batch_input (jnp.ndarray): Input batch.\n batch_corrupted (jnp.ndarray): Corrupted batch.\n psm (jnp.ndarray): Power special matrix.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Updated model parameters and inner loss.\n \"\"\"\n\n def inner_opt_loop(\n carry: Tuple[Params, jnp.ndarray, int, Any, jnp.ndarray], _: None\n ) -> Tuple[Tuple[Params, jnp.ndarray, int, Any, jnp.ndarray], None]:\n \"\"\"Inner optimization loop for updating model parameters.\n\n Args:\n carry (Tuple[Params, jnp.ndarray, int, optax.OptState, jnp.ndarray]): Tuple containing model parameters,\n loss vector, iteration index, optimizer state, and corrupted batch.\n _ (None): A throwaway variable as no second argument is used in this function.\n\n Returns:\n Tuple[Params, jnp.ndarray, int, optax.OptState, jnp.ndarray]: Updated tuple with new model parameters,\n updated loss vector, incremented iteration index, updated optimizer state, and corrupted batch.\n \"\"\"\n params_i, loss_inner_vec, it, opt_inner_state_params, batch_corrupted_i = carry\n\n grad_params, (loss) = loss_inner_fn(params_i, batch_input, batch_corrupted_i, psm)\n loss_inner_vec = loss_inner_vec.at[it].set(jnp.mean(loss))\n\n if self.model_config.use_dense_lr:\n # separate learning rates from grad_params\n grad_params_true, _ = separate_learning_rates(unfreeze(grad_params))\n\n # separate learning rates from params_i\n params_i_true, learning_rates = separate_learning_rates(unfreeze(params_i))\n\n # calculate updates using meta-sgd\n updates_params = jax.tree_map(\n lambda g, lr: -jnp.clip(lr, 0, 1) * g,\n grad_params_true,\n learning_rates,\n )\n\n # merge updates_params and learning_rates\n merged_updates = merge_learning_rates(unfreeze(updates_params), unfreeze(learning_rates))\n params_i1 = optax.apply_updates(params_i, merged_updates)\n\n # after update of params clip learning rates to [0, 1]\n params_i1 = clip_learning_rates(params_i1)\n else:\n updates_params, opt_state = optimizer_inner.update(grad_params, opt_inner_state_params, params_i)\n params_i1 = optax.apply_updates(params_i, updates_params)\n return (\n params_i1,\n loss_inner_vec,\n it + 1,\n opt_inner_state_params,\n batch_corrupted,\n ), _\n\n base_params = jax.tree_map(\n lambda base_param: jnp.stack(\n [\n base_param,\n ]\n * batch_input.shape[0],\n axis=0,\n ),\n params,\n )\n loss_inner = jnp.zeros((n_steps,))\n i = 0\n initial_state = (\n base_params,\n loss_inner,\n i,\n optimizer_inner.init(base_params),\n batch_corrupted,\n )\n params_adapted, loss_inner, *_ = jax.lax.scan(inner_opt_loop, initial_state, xs=None, length=n_steps)[0]\n return params_adapted, loss_inner\n\n return apply_inner_forward\n\n def make_predict_fn(\n self,\n ) -> Callable[\n [Params, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, Optional[jnp.ndarray]], jnp.ndarray\n ]:\n \"\"\"Creates a function for making predictions with the model.\n\n This method creates a function that can be used to make predictions with the model.\n\n Returns:\n Callable[..., jnp.ndarray]: A function for making predictions with the model.\n \"\"\"\n update_inner_fn = self.make_update_inner_fn(\n optimizer_inner=self.optimizer_inner,\n n_steps=self.inner_steps,\n )\n\n def predict(\n params: Params,\n batch_corrupted: jnp.ndarray,\n batch_input: jnp.ndarray,\n time: jnp.ndarray,\n psm: jnp.ndarray,\n shape: jnp.ndarray,\n ) -> jnp.ndarray:\n \"\"\"Make predictions using the model.\n\n Args:\n params (Params): Model parameters.\n batch_corrupted (jnp.ndarray): Corrupted version of the output tensor.\n batch_input (jnp.ndarray): Input tensor to the model.\n time (jnp.ndarray): Time tensor.\n psm (jnp.ndarray): Power special matrix.\n shape (jnp.ndarray): Shape of the input tensor.\n\n Returns:\n jnp.ndarray: Reconstructed output tensor.\n \"\"\"\n b, g, c = batch_corrupted.shape\n t_aux = jnp.reshape(time, (b, 1, 1))\n t_aux = jnp.broadcast_to(t_aux, (b, g, 1)) * 2 - 1\n batch_input = batch_input.at[:, :, -1:].set(t_aux)\n if self.model_config.y_input:\n batch_input = batch_input.at[:, :, len(shape) : len(shape) + c].set(batch_corrupted)\n params_adapted, _ = update_inner_fn(params, batch_input, batch_corrupted, psm)\n batch_reconstructed = jax.vmap(self.apply)(params_adapted, batch_input)\n\n return batch_reconstructed\n\n return predict\n\n def make_super_resolution_fn(\n self,\n ) -> Callable[\n [Params, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, Optional[jnp.ndarray]], jnp.ndarray\n ]:\n \"\"\"Creates a function for making super resolution output with the model.\n\n This method creates a function that can be used to make super resolution task with the model.\n\n Returns:\n Callable[..., jnp.ndarray]: A function for making super resolution output with the model.\n \"\"\"\n update_inner_fn = self.make_update_inner_fn(\n optimizer_inner=self.optimizer_inner,\n n_steps=self.inner_steps,\n )\n\n def super_resolution_fn(\n params: Params,\n batch_corrupted: jnp.ndarray,\n batch_input: jnp.ndarray,\n time: jnp.ndarray,\n psm: jnp.ndarray,\n shape: jnp.ndarray,\n target_shape: Optional[jnp.ndarray] = None,\n ) -> jnp.ndarray:\n \"\"\"Make last prediction for super resolution task using the model.\n\n Args:\n params (Params): Model parameters.\n batch_corrupted (jnp.ndarray): Corrupted version of the output tensor.\n batch_input (jnp.ndarray): Input tensor to the model.\n time (jnp.ndarray): Time tensor.\n psm (jnp.ndarray): Power special matrix.\n shape (jnp.ndarray): Shape of the input tensor.\n target_shape (Optional[jnp.ndarray]): Target shape of the output tensor.\n\n Returns:\n jnp.ndarray: Reconstructed output tensor at super-resolution.\n \"\"\"\n b, g, c = batch_corrupted.shape\n t_aux = jnp.reshape(time, (b, 1, 1))\n t_aux = jnp.broadcast_to(t_aux, (b, g, 1)) * 2 - 1\n batch_input = batch_input.at[:, :, -1:].set(t_aux)\n if self.model_config.y_input:\n batch_input = batch_input.at[:, :, len(shape) : len(shape) + c].set(batch_corrupted)\n params_adapted, _ = update_inner_fn(params, batch_input, batch_corrupted, psm)\n if self.model_config.y_input:\n batch_reconstructed = jax.vmap(self.apply)(params_adapted, batch_input)\n batch_input = batch_input.at[:, :, len(shape) : len(shape) + c].set(batch_reconstructed)\n batch_input = batch_input.reshape((b, *shape, -1))\n\n new_h, new_w = target_shape\n\n batch_input_new = jax.image.resize(batch_input, (b, new_h, new_w, batch_input.shape[-1]), method=\"bilinear\")\n batch_input_new = batch_input_new.reshape((b, new_h * new_w, -1))\n batch_reconstructed = jax.vmap(self.apply)(params_adapted, batch_input_new)\n\n return batch_reconstructed\n\n return super_resolution_fn"
},
{
"identifier": "BaseViT",
"path": "src/functional_diffusion_processes/models/base_vit.py",
"snippet": "class BaseViT(nn.Module, abc.ABC):\n \"\"\"Abstract base class for Vision Transformer (ViT) models.\n\n Introduced in the paper \"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale\" (https://arxiv.org/abs/2010.11929).\n\n Attributes:\n model_config (DictConfig): Configuration dictionary for the model.\n \"\"\"\n\n model_config: DictConfig\n\n @abc.abstractmethod\n @nn.compact\n def __call__(self, inputs: jnp.ndarray, train: bool) -> jnp.ndarray:\n \"\"\"Performs the forward pass of the model.\n\n Args:\n inputs (jnp.ndarray): Input data.\n train (bool): Indicates whether the model is in training mode.\n\n Returns:\n jnp.ndarray: Model's output.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the __call__ method.\")\n\n def initialize_model(self, rng: jax.random.PRNGKey, batch_input: jnp.ndarray) -> FrozenDict[str, Mapping[str, Any]]:\n \"\"\"Initializes the model with dummy inputs.\n\n Args:\n rng (jax.random.PRNGKey): The random number generator key.\n batch_input (jnp.ndarray): The input data for batch.\n\n Returns:\n FrozenDict[str, Mapping[str, Any]]: The initialized model.\n \"\"\"\n return self.init(rng, batch_input, train=False)\n\n @staticmethod\n def initialize_input(shape: Tuple[int, ...]) -> jnp.ndarray:\n \"\"\"Creates input for the model based on the specified shape.\n\n Args:\n shape (Tuple[int, ...]): The shape of the input.\n\n Returns:\n jnp.ndarray: The created input.\n \"\"\"\n batch_size = shape[0]\n num_channels = shape[-1]\n grid_size = shape[1:-1]\n coordinates = make_coordinates(batch_size, grid_size, num_channels)\n return coordinates\n\n def make_update_params_fn(self) -> Callable:\n \"\"\"Creates a function to update model parameters.\n\n Returns:\n Callable: The created function to update model parameters.\n \"\"\"\n\n def apply_forward(\n rng: jax.random.PRNGKey, params: Params, batch_input: jnp.ndarray, batch_corrupted: jnp.ndarray, psm: Any\n ) -> Tuple[jax.random.PRNGKey, jnp.ndarray, None]: # noqa\n \"\"\"Updates model parameters in a forward pass.\n\n Args:\n rng (jax.random.PRNGKey): The random number generator key.\n params (Params): The model parameters.\n batch_input (jnp.ndarray): The input data for the batch.\n batch_corrupted (jnp.ndarray): The corrupted version of the output tensor.\n psm (Any): Power special matrix.\n\n Returns:\n Tuple[jax.random.PRNGKey, jnp.ndarray, None]: A tuple containing a new random key,\n the model output, and the inner loss (which is None in this case).\n \"\"\"\n _, new_rng = jax.random.split(rng)\n dropout_rng = jax.random.fold_in(rng, jax.lax.axis_index(\"device\"))\n model_output = self.apply(params, rngs={\"dropout\": dropout_rng}, inputs=batch_input, train=True)\n loss_inner = None\n return new_rng, model_output, loss_inner\n\n return apply_forward\n\n def make_predict_fn(self) -> Callable:\n \"\"\"Creates a function for making predictions with the model.\n\n Returns:\n Callable: The created function for making predictions.\n \"\"\"\n\n def predict(\n params: Params,\n batch_corrupted: jnp.ndarray,\n batch_input: jnp.ndarray,\n time: jnp.ndarray,\n psm: jnp.ndarray,\n shape: Tuple[int, ...],\n ) -> jnp.ndarray: # noqa\n \"\"\"Makes predictions with the model.\n\n Args:\n params (Params): The model parameters.\n batch_corrupted (jnp.ndarray): The corrupted version of the output tensor.\n batch_input (jnp.ndarray): The input data for the batch.\n time (jnp.ndarray): The time tensor.\n psm (jnp.ndarray): Power special matrix.\n shape (Tuple[int, ...]): The shape of the input tensor.\n\n Returns:\n jnp.ndarray: The model's output.\n \"\"\"\n b, g, c = batch_corrupted.shape\n t_aux = jnp.reshape(time, (b, 1, 1))\n t_aux = jnp.broadcast_to(t_aux, (b, g, 1))\n batch_input = batch_input.at[:, :, -1:].set(t_aux)\n batch_input = batch_input.at[:, :, len(shape) : len(shape) + c].set(batch_corrupted)\n model_output = self.apply(params, batch_input, train=False)\n return model_output\n\n return predict"
},
{
"identifier": "SDE",
"path": "src/functional_diffusion_processes/sdetools/base_sde.py",
"snippet": "class SDE(abc.ABC):\n \"\"\"Abstract base class for representing Stochastic Differential Equations (SDEs).\n\n This class provides a structured way to define and work with SDEs, including computing\n Fourier transforms, discretizing the equations, and defining the drift and diffusion terms.\n\n Attributes:\n sde_config (DictConfig): Configuration object containing SDE settings.\n T (float): Total time duration.\n N (int): Number of time steps.\n eps (float): Small constant for numerical stability.\n is_unidimensional (bool): Flag indicating if the SDE is unidimensional.\n \"\"\"\n\n def __init__(self, sde_config: DictConfig) -> None:\n \"\"\"Initializes the SDE with the given configuration.\n\n Args:\n sde_config (DictConfig): Configuration object containing SDE settings.\n \"\"\"\n super().__init__()\n self.sde_config = sde_config\n self.T = self.sde_config.T\n self.N = self.sde_config.N\n self.eps = self.sde_config.eps\n self.is_unidimensional = True if len(self.sde_config.shape) == 1 else False\n\n def fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.fft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.fft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n def inverse_fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the inverse Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose inverse Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Inverse Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.ifft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.ifft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n @abc.abstractmethod\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Abstract method to compute the drift and diffusion terms of the SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the drift and diffusion terms of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the sde method.\")\n\n @abc.abstractmethod\n def marginal_prob(\n self,\n rng: PRNGKeyArray,\n x: jnp.ndarray,\n t: jnp.ndarray,\n t0: Optional[jnp.ndarray] = None,\n ) -> Tuple[Any, jnp.ndarray | Any]:\n \"\"\"Computes the marginal probability density at a given time.\n\n This is an abstract method that should be overridden by subclasses to\n compute the marginal probability density based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): State of the system.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[Any, jnp.ndarray | Any]: Marginal probability density at the given time.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the marginal_prob method.\")\n\n @abc.abstractmethod\n def diffuse(\n self, rng: PRNGKeyArray, x: jnp.ndarray, t: jnp.ndarray, t0: Optional[jnp.ndarray] = None\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Performs diffusion of the input from time t0 to time t.\n\n This is an abstract method that should be overridden by subclasses to\n implement the diffusion process based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): Input state.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Mean of the corrupted input and the corrupted input.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the diffuse method.\")\n\n @abc.abstractmethod\n def prior_sampling(\n self, rng: PRNGKeyArray, shape: Tuple[int, ...], t0: Optional[jnp.ndarray] = None\n ) -> jnp.ndarray:\n \"\"\"Generates a sample from the prior distribution of the SDE.\n\n This is an abstract method that should be overridden by subclasses to\n implement the prior sampling process based on the shape and initial time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the sample to be generated.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n jnp.ndarray: A sample from the prior distribution of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the prior_sampling method.\")\n\n @abc.abstractmethod\n def score_fn(\n self, y_corrupted: jnp.ndarray, y_reconstructed: jnp.ndarray, t: jnp.ndarray, rng: Optional[PRNGKeyArray] = None\n ) -> jnp.ndarray:\n \"\"\"Computes the score function based on the corrupted and reconstructed states.\n\n This is an abstract method that should be overridden by subclasses to\n compute the score function based on the state and time.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n y_reconstructed (jnp.ndarray): Reconstructed state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n\n Returns:\n jnp.ndarray: The score function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the score_fn method.\")\n\n @abc.abstractmethod\n def get_psm(self, t: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Power-Special-Matrix(PSM) used as a weighting factor for the loss.\n\n This is an abstract method that should be overridden by subclasses to\n compute the state-dependent diffusion matrix based on the time.\n\n Args:\n t (jnp.ndarray): Current time.\n\n Returns:\n jnp.ndarray: The state-dependent diffusion matrix.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_psm method.\")\n\n @abc.abstractmethod\n def get_reverse_noise(self, rng: PRNGKeyArray, shape: Tuple[int, ...]) -> jnp.ndarray:\n \"\"\"Generates noise for the reverse SDE.\n\n This is an abstract method that should be overridden by subclasses to\n generate reverse noise based on the shape.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the noise to be generated.\n\n Returns:\n jnp.ndarray: The reverse noise.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_reverse_noise method.\")\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the SDE into an iterative update rule.\n\n This method computes the discrete drift and diffusion terms based on the continuous SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the discrete drift and diffusion terms.\n \"\"\"\n dt = (self.T - self.eps) / self.N\n drift, diffusion = self.sde(y_corrupted, t, y_reconstructed)\n f = drift * dt\n g = diffusion * jnp.sqrt(dt)\n return f, g\n\n def reverse(self):\n \"\"\"Creates a reverse-time version of the current SDE.\n\n This method defines a nested class for the reverse-time SDE and returns an instance of it.\n\n Returns:\n ReverseSDE: An instance of the reverse-time SDE subclass.\n \"\"\"\n num_time_steps = self.N\n end_t = self.T\n sde_fn = self.sde\n discretize_fn = self.discretize\n score_fn = self.score_fn\n sde_config = self.sde_config\n\n class ReverseSDE(self.__class__, abc.ABC):\n \"\"\"Reverse Stochastic Differential Equation abstract base class.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the ReverseSDE class.\n\n Inherits the properties from the original SDE class and overrides the relevant methods for the\n reverse-time SDE.\n \"\"\"\n super().__init__(sde_config)\n self.N = num_time_steps\n self.T = end_t\n self.score_fn = score_fn\n\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Return the drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the reverse-time SDE.\n \"\"\"\n drift, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = -drift + batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n # Set the diffusion function to zero for ODEs.\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the reverse-time SDE in the form of an iterative update rule.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the discretized reverse-time SDE.\n \"\"\"\n f, g = discretize_fn(y_corrupted, t, y_corrupted)\n rev_f = -f + batch_mul(\n g**2,\n self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n * (0.5 if self.sde_config.probability_flow else 1.0),\n )\n rev_g = jnp.zeros_like(g) if self.sde_config.probability_flow else g\n return rev_f, rev_g\n\n def semi_analytic(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Computes the semi-analytic drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the semi-analytic reverse-time SDE.\n \"\"\"\n _, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n return ReverseSDE()"
},
{
"identifier": "batch_mul",
"path": "src/functional_diffusion_processes/utils/common.py",
"snippet": "def batch_mul(a: jnp.ndarray, b: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Perform element-wise multiplication of two arrays.\n\n Args:\n a: First array.\n b: Second array.\n\n Returns:\n The element-wise multiplication of the two arrays.\n \"\"\"\n return jax.vmap(lambda x, y: x * y)(a, b)"
}
] | import abc
import jax
import jax.numpy as jnp
from functools import partial
from typing import Any, Callable, TypeVar, Union
from flax.core import FrozenDict
from jax.random import PRNGKeyArray
from omegaconf import DictConfig
from ..models import BaseMAML, BaseViT
from ..sdetools import SDE
from ..utils.common import batch_mul | 9,087 |
Params = FrozenDict[str, Any]
T = TypeVar("T")
class MSELoss(abc.ABC):
"""Abstract class for computing Mean Squared Error (MSE) Loss.
Provides a structure for constructing a loss function to compute the MSE
loss between model predictions and real data, with potential modifications
for different domains (frequency or normal) and scheduling.
Attributes:
sde (SDE): An instance of stochastic differential equation to be used to calculate the weight factor in loss computation.
loss_config (DictConfig): A configuration object holding parameters for loss computation.
"""
def __init__(self, sde: SDE, loss_config: DictConfig) -> None:
"""Initializes the MSELoss instance with SDE object and loss configuration.
Args:
sde (SDE): An object representing the stochastic differential equation.
loss_config (DictConfig): A configuration object holding parameters for loss computation.
"""
self.sde = sde
self.loss_config = loss_config
|
Params = FrozenDict[str, Any]
T = TypeVar("T")
class MSELoss(abc.ABC):
"""Abstract class for computing Mean Squared Error (MSE) Loss.
Provides a structure for constructing a loss function to compute the MSE
loss between model predictions and real data, with potential modifications
for different domains (frequency or normal) and scheduling.
Attributes:
sde (SDE): An instance of stochastic differential equation to be used to calculate the weight factor in loss computation.
loss_config (DictConfig): A configuration object holding parameters for loss computation.
"""
def __init__(self, sde: SDE, loss_config: DictConfig) -> None:
"""Initializes the MSELoss instance with SDE object and loss configuration.
Args:
sde (SDE): An object representing the stochastic differential equation.
loss_config (DictConfig): A configuration object holding parameters for loss computation.
"""
self.sde = sde
self.loss_config = loss_config
| def construct_loss_fn(self, model: Union[BaseMAML, BaseViT]) -> Callable: | 1 | 2023-10-24 22:01:35+00:00 | 12k |
KosinskiLab/pyTME | tme/tests/test_matching_exhaustive.py | [
{
"identifier": "scan",
"path": "tme/matching_exhaustive.py",
"snippet": "@device_memory_handler\ndef scan(\n matching_data: MatchingData,\n matching_setup: Callable,\n matching_score: Callable,\n n_jobs: int = 4,\n callback_class: CallbackClass = None,\n callback_class_args: Dict = {},\n fftargs: Dict = {},\n pad_fourier: bool = True,\n interpolation_order: int = 3,\n jobs_per_callback_class: int = 8,\n **kwargs,\n) -> Tuple:\n \"\"\"\n Perform template matching between target and template and sample\n different rotations of template.\n\n Parameters\n ----------\n matching_data : MatchingData\n Template matching data.\n matching_setup : Callable\n Function pointer to setup function.\n matching_score : Callable\n Function pointer to scoring function.\n n_jobs : int, optional\n Number of parallel jobs. Default is 4.\n callback_class : type, optional\n Analyzer class pointer to operate on computed scores.\n callback_class_args : dict, optional\n Arguments passed to the callback_class. Default is an empty dictionary.\n fftargs : dict, optional\n Arguments for the FFT operations. Default is an empty dictionary.\n pad_fourier: bool, optional\n Whether to pad target and template to the full convolution shape.\n interpolation_order : int, optional\n Order of spline interpolation for rotations.\n jobs_per_callback_class : int, optional\n How many jobs should be processed by a single callback_class instance,\n if ones is provided.\n **kwargs : various\n Additional arguments.\n\n Returns\n -------\n Tuple\n The merged results from callback_class if provided otherwise None.\n \"\"\"\n matching_data.to_backend()\n fourier_pad = matching_data._templateshape\n fourier_shift = backend.zeros(len(fourier_pad))\n if not pad_fourier:\n fourier_pad = backend.full(shape=fourier_shift.shape, fill_value=1, dtype=int)\n\n convolution_shape, fast_shape, fast_ft_shape = backend.compute_convolution_shapes(\n matching_data._target.shape, fourier_pad\n )\n if not pad_fourier:\n fourier_shift = 1 - backend.astype(\n backend.divide(matching_data._templateshape, 2), int\n )\n fourier_shift -= backend.mod(matching_data._templateshape, 2)\n fourier_shift = backend.flip(fourier_shift, axis=(0,))\n shape_diff = backend.subtract(fast_shape, convolution_shape)\n shape_diff = backend.astype(backend.divide(shape_diff, 2), int)\n backend.add(fourier_shift, shape_diff, out=fourier_shift)\n\n callback_class_args[\"fourier_shift\"] = fourier_shift\n rfftn, irfftn = backend.build_fft(\n fast_shape=fast_shape,\n fast_ft_shape=fast_ft_shape,\n real_dtype=matching_data._default_dtype,\n complex_dtype=matching_data._complex_dtype,\n fftargs=fftargs,\n )\n setup = matching_setup(\n rfftn=rfftn,\n irfftn=irfftn,\n template=matching_data.template,\n template_mask=matching_data.template_mask,\n target=matching_data.target,\n target_mask=matching_data.target_mask,\n fast_shape=fast_shape,\n fast_ft_shape=fast_ft_shape,\n real_dtype=matching_data._default_dtype,\n complex_dtype=matching_data._complex_dtype,\n callback_class=callback_class,\n callback_class_args=callback_class_args,\n **kwargs,\n )\n rfftn, irfftn = None, None\n\n template_filter, preprocessor = None, Preprocessor()\n for method, parameters in matching_data.template_filter.items():\n parameters[\"shape\"] = fast_shape\n parameters[\"omit_negative_frequencies\"] = True\n out = preprocessor.apply_method(method=method, parameters=parameters)\n if template_filter is None:\n template_filter = out\n np.multiply(template_filter, out, out=template_filter)\n\n if template_filter is None:\n template_filter = backend.full(\n shape=(1,), fill_value=1, dtype=backend._default_dtype\n )\n else:\n template_filter = backend.to_backend_array(template_filter)\n\n template_filter = backend.astype(template_filter, backend._default_dtype)\n template_filter_buffer = backend.arr_to_sharedarr(\n arr=template_filter,\n shared_memory_handler=kwargs.get(\"shared_memory_handler\", None),\n )\n setup[\"template_filter\"] = (\n template_filter_buffer,\n template_filter.shape,\n template_filter.dtype,\n )\n\n callback_class_args[\"translation_offset\"] = backend.astype(\n matching_data._translation_offset, int\n )\n callback_class_args[\"thread_safe\"] = n_jobs > 1\n callback_class_args[\"gpu_index\"] = kwargs.get(\"gpu_index\", -1)\n\n n_callback_classes = max(n_jobs // jobs_per_callback_class, 1)\n callback_class = setup.pop(\"callback_class\", callback_class)\n callback_class_args = setup.pop(\"callback_class_args\", callback_class_args)\n callback_classes = [callback_class for _ in range(n_callback_classes)]\n if callback_class == MaxScoreOverRotations:\n score_space_shape = backend.subtract(\n matching_data.target.shape,\n matching_data._target_pad,\n )\n callback_classes = [\n class_name(\n score_space_shape=score_space_shape,\n score_space_dtype=matching_data._default_dtype,\n shared_memory_handler=kwargs.get(\"shared_memory_handler\", None),\n rotation_space_dtype=backend._default_dtype_int,\n **callback_class_args,\n )\n for class_name in callback_classes\n ]\n\n matching_data._target, matching_data._template = None, None\n matching_data._target_mask, matching_data._template_mask = None, None\n\n setup[\"fftargs\"] = fftargs.copy()\n convolution_mode = \"same\"\n if backend.sum(matching_data._target_pad) > 0:\n convolution_mode = \"valid\"\n setup[\"convolution_mode\"] = convolution_mode\n setup[\"interpolation_order\"] = interpolation_order\n rotation_list = matching_data._split_rotations_on_jobs(n_jobs)\n\n backend.free_cache()\n\n def _run_scoring(backend_name, backend_args, rotations, **kwargs):\n from tme.backends import backend\n\n backend.change_backend(backend_name, **backend_args)\n return matching_score(rotations=rotations, **kwargs)\n\n callbacks = Parallel(n_jobs=n_jobs)(\n delayed(_run_scoring)(\n backend_name=backend._backend_name,\n backend_args=backend._backend_args,\n rotations=rotation,\n callback_class=callback_classes[index % n_callback_classes],\n callback_class_args=callback_class_args,\n **setup,\n )\n for index, rotation in enumerate(rotation_list)\n )\n\n callbacks = [\n tuple(callback)\n for callback in callbacks[0:n_callback_classes]\n if callback is not None\n ]\n backend.free_cache()\n\n merged_callback = None\n if callback_class is not None:\n merged_callback = callback_class.merge(\n callbacks,\n **callback_class_args,\n score_indices=matching_data.indices,\n inner_merge=True,\n )\n\n return merged_callback"
},
{
"identifier": "scan_subsets",
"path": "tme/matching_exhaustive.py",
"snippet": "def scan_subsets(\n matching_data: MatchingData,\n matching_score: Callable,\n matching_setup: Callable,\n callback_class: CallbackClass = None,\n callback_class_args: Dict = {},\n job_schedule: Tuple[int] = (1, 1),\n target_splits: Dict = {},\n template_splits: Dict = {},\n pad_target_edges: bool = False,\n pad_fourier: bool = True,\n interpolation_order: int = 3,\n jobs_per_callback_class: int = 8,\n **kwargs,\n) -> Tuple:\n \"\"\"\n Wrapper around :py:meth:`scan` that supports template matching on splits\n of template and target.\n\n Parameters\n ----------\n matching_data : MatchingData\n Template matching data.\n matching_func : type\n Function pointer to setup function.\n matching_score : type\n Function pointer to scoring function.\n callback_class : type, optional\n Analyzer class pointer to operate on computed scores.\n callback_class_args : dict, optional\n Arguments passed to the callback_class. Default is an empty dictionary.\n job_schedule : tuple of int, optional\n Schedule of jobs. Default is (1, 1).\n target_splits : dict, optional\n Splits for target. Default is an empty dictionary, i.e. no splits\n template_splits : dict, optional\n Splits for template. Default is an empty dictionary, i.e. no splits.\n pad_target_edges : bool, optional\n Whether to pad the target boundaries by half the template shape\n along each axis.\n pad_fourier: bool, optional\n Whether to pad target and template to the full convolution shape.\n interpolation_order : int, optional\n Order of spline interpolation for rotations.\n jobs_per_callback_class : int, optional\n How many jobs should be processed by a single callback_class instance,\n if ones is provided.\n **kwargs : various\n Additional arguments.\n\n Notes\n -----\n Objects in matching_data might be destroyed during computation.\n\n Returns\n -------\n Tuple\n The merged results from callback_class if provided otherwise None.\n \"\"\"\n target_splits = split_numpy_array_slices(\n matching_data.target.shape, splits=target_splits\n )\n template_splits = split_numpy_array_slices(\n matching_data._templateshape, splits=template_splits\n )\n\n target_pad = np.zeros(len(matching_data.target.shape), dtype=int)\n if pad_target_edges:\n target_pad = np.subtract(\n matching_data._templateshape, np.mod(matching_data._templateshape, 2)\n )\n outer_jobs, inner_jobs = job_schedule\n results = Parallel(n_jobs=outer_jobs)(\n delayed(_run_inner)(\n backend_name=backend._backend_name,\n backend_args=backend._backend_args,\n matching_data=matching_data.subset_by_slice(\n target_slice=target_split,\n target_pad=target_pad,\n template_slice=template_split,\n ),\n matching_score=matching_score,\n matching_setup=matching_setup,\n n_jobs=inner_jobs,\n callback_class=callback_class,\n callback_class_args=callback_class_args,\n interpolation_order=interpolation_order,\n pad_fourier=pad_fourier,\n gpu_index=index % outer_jobs,\n **kwargs,\n )\n for index, (target_split, template_split) in enumerate(\n product(target_splits, template_splits)\n )\n )\n\n matching_data._target, matching_data._template = None, None\n matching_data._target_mask, matching_data._template_mask = None, None\n\n if callback_class is not None:\n candidates = callback_class.merge(\n results, **callback_class_args, inner_merge=False\n )\n return candidates"
},
{
"identifier": "MATCHING_EXHAUSTIVE_REGISTER",
"path": "tme/matching_exhaustive.py",
"snippet": "MATCHING_EXHAUSTIVE_REGISTER = {\n \"CC\": (cc_setup, corr_scoring),\n \"LCC\": (lcc_setup, corr_scoring),\n \"CORR\": (corr_setup, corr_scoring),\n \"CAM\": (cam_setup, corr_scoring),\n \"FLCSphericalMask\": (flcSphericalMask_setup, corr_scoring),\n \"FLC\": (flc_setup, flc_scoring),\n \"MCC\": (mcc_setup, mcc_scoring),\n}"
},
{
"identifier": "register_matching_exhaustive",
"path": "tme/matching_exhaustive.py",
"snippet": "def register_matching_exhaustive(\n matching: str,\n matching_setup: Callable,\n matching_scoring: Callable,\n memory_class: MatchingMemoryUsage,\n) -> None:\n \"\"\"\n Registers a new matching scheme.\n\n Parameters\n ----------\n matching : str\n Name of the matching method.\n matching_setup : Callable\n The setup function associated with the name.\n matching_scoring : Callable\n The scoring function associated with the name.\n memory_class : MatchingMemoryUsage\n The custom memory estimation class extending\n :py:class:`tme.matching_memory.MatchingMemoryUsage`.\n\n Raises\n ------\n ValueError\n If a function with the name ``matching`` already exists in the registry.\n ValueError\n If ``memory_class`` is not a subclass of\n :py:class:`tme.matching_memory.MatchingMemoryUsage`.\n \"\"\"\n\n if matching in MATCHING_EXHAUSTIVE_REGISTER:\n raise ValueError(f\"A method with name '{matching}' is already registered.\")\n if not issubclass(memory_class, MatchingMemoryUsage):\n raise ValueError(f\"{memory_class} is not a subclass of {MatchingMemoryUsage}.\")\n\n MATCHING_EXHAUSTIVE_REGISTER[matching] = (matching_setup, matching_scoring)\n MATCHING_MEMORY_REGISTRY[matching] = memory_class"
},
{
"identifier": "MatchingData",
"path": "tme/matching_data.py",
"snippet": "class MatchingData:\n \"\"\"\n Contains data required for template matching.\n\n Parameters\n ----------\n target : np.ndarray or Density\n Target data array for template matching.\n template : np.ndarray or Density\n Template data array for template matching.\n\n \"\"\"\n\n def __init__(self, target: NDArray, template: NDArray):\n self._default_dtype = np.float32\n self._complex_dtype = np.complex64\n\n self._target = target\n self._target_mask = None\n self._template_mask = None\n self._translation_offset = np.zeros(len(target.shape), dtype=int)\n\n self.template = template\n\n self._target_pad = np.zeros(len(target.shape), dtype=int)\n self._template_pad = np.zeros(len(template.shape), dtype=int)\n\n self.template_filter = {}\n self.target_filter = {}\n\n self._invert_target = False\n\n @staticmethod\n def _shape_to_slice(shape: Tuple[int]):\n return tuple(slice(0, dim) for dim in shape)\n\n @classmethod\n def _slice_to_mesh(cls, slice_variable: (slice,), shape: (int,)):\n if slice_variable is None:\n slice_variable = cls._shape_to_slice(shape)\n ranges = [range(slc.start, slc.stop) for slc in slice_variable]\n indices = np.meshgrid(*ranges, sparse=True, indexing=\"ij\")\n return indices\n\n @staticmethod\n def _load_array(arr: NDArray):\n \"\"\"\n Load ``arr``, If ``arr`` type is memmap, reload from disk.\n\n Parameters\n ----------\n arr : NDArray\n Array to load.\n\n Returns\n -------\n NDArray\n Loaded array.\n \"\"\"\n\n if type(arr) == np.memmap:\n return np.memmap(arr.filename, mode=\"r\", shape=arr.shape, dtype=arr.dtype)\n return arr\n\n def subset_array(\n self, arr: NDArray, arr_slice: Tuple[slice], padding: NDArray\n ) -> NDArray:\n \"\"\"\n Extract a subset of the input array according to the given slice and\n apply padding.\n\n Parameters\n ----------\n arr : NDArray\n The input array from which a subset is extracted.\n arr_slice : tuple of slice\n Defines the region of the input array to be extracted.\n padding : NDArray\n Padding values for each dimension. If the padding exceeds the array\n dimensions, the extra regions are filled with the mean of the array\n values, otherwise, the\n values in ``arr`` are used.\n\n Returns\n -------\n NDArray\n Subset of the input array with padding applied.\n \"\"\"\n padding = np.maximum(padding, 0)\n\n slice_start = np.array([x.start for x in arr_slice], dtype=int)\n slice_stop = np.array([x.stop for x in arr_slice], dtype=int)\n slice_shape = np.subtract(slice_stop, slice_start)\n\n padding = np.add(padding, np.mod(padding, 2))\n left_pad = right_pad = np.divide(padding, 2).astype(int)\n\n data_voxels_left = np.minimum(slice_start, left_pad)\n data_voxels_right = np.minimum(\n np.subtract(arr.shape, slice_stop), right_pad\n ).astype(int)\n\n ret_shape = np.add(slice_shape, padding)\n arr_start = np.subtract(slice_start, data_voxels_left)\n arr_stop = np.add(slice_stop, data_voxels_right)\n arr_slice = tuple(slice(*pos) for pos in zip(arr_start, arr_stop))\n arr_mesh = self._slice_to_mesh(arr_slice, arr.shape)\n\n subset_start = np.subtract(left_pad, data_voxels_left)\n subset_stop = np.add(subset_start, np.subtract(arr_stop, arr_start))\n subset_slice = tuple(slice(*prod) for prod in zip(subset_start, subset_stop))\n subset_mesh = self._slice_to_mesh(subset_slice, ret_shape)\n\n if type(arr) == Density:\n if type(arr.data) == np.memmap:\n arr = Density.from_file(arr.data.filename, subset=arr_slice).data\n else:\n arr = np.asarray(arr.data[*arr_mesh])\n else:\n if type(arr) == np.memmap:\n arr = np.memmap(\n arr.filename, mode=\"r\", shape=arr.shape, dtype=arr.dtype\n )\n arr = np.asarray(arr[*arr_mesh])\n ret = np.full(\n shape=np.add(slice_shape, padding), fill_value=arr.mean(), dtype=arr.dtype\n )\n ret[*subset_mesh] = arr\n\n return ret\n\n def subset_by_slice(\n self,\n target_slice: Tuple[slice] = None,\n template_slice: Tuple[slice] = None,\n target_pad: NDArray = None,\n template_pad: NDArray = None,\n invert_target: bool = False,\n ) -> \"MatchingData\":\n \"\"\"\n Slice the instance arrays based on the provided slices.\n\n Parameters\n ----------\n target_slice : tuple of slice, optional\n Slices for the target. If not provided, the full shape is used.\n template_slice : tuple of slice, optional\n Slices for the template. If not provided, the full shape is used.\n target_pad : NDArray, optional\n Padding for target. Defaults to zeros. If padding exceeds target,\n pad with mean.\n template_pad : NDArray, optional\n Padding for template. Defaults to zeros. If padding exceeds template,\n pad with mean.\n\n Returns\n -------\n MatchingData\n Newly allocated sliced class instance.\n \"\"\"\n target_shape = self.target.shape\n template_shape = self._template.shape\n\n if target_slice is None:\n target_slice = self._shape_to_slice(target_shape)\n if template_slice is None:\n template_slice = self._shape_to_slice(template_shape)\n\n if target_pad is None:\n target_pad = np.zeros(len(self.target.shape), dtype=int)\n if template_pad is None:\n template_pad = np.zeros(len(self.target.shape), dtype=int)\n\n indices = compute_full_convolution_index(\n outer_shape=self._target.shape,\n inner_shape=self._template.shape,\n outer_split=target_slice,\n inner_split=template_slice,\n )\n\n target_subset = self.subset_array(\n arr=self._target, arr_slice=target_slice, padding=target_pad\n )\n if self._invert_target:\n target_subset *= -1\n target_min, target_max = target_subset.min(), target_subset.max()\n target_subset = (target_subset - target_min) / (target_max - target_min)\n template_subset = self.subset_array(\n arr=self._template,\n arr_slice=template_slice,\n padding=template_pad,\n )\n ret = self.__class__(target=target_subset, template=template_subset)\n\n ret._translation_offset = np.add(\n [x.start for x in target_slice],\n [x.start for x in template_slice],\n )\n ret.template_filter = self.template_filter\n\n ret.rotations, ret.indices = self.rotations, indices\n ret._target_pad, ret._template_pad = target_pad, template_pad\n ret._invert_target = self._invert_target\n\n if self._target_mask is not None:\n ret.target_mask = self.subset_array(\n arr=self._target_mask, arr_slice=target_slice, padding=target_pad\n )\n if self._template_mask is not None:\n ret.template_mask = self.subset_array(\n arr=self._template_mask,\n arr_slice=template_slice,\n padding=template_pad,\n )\n\n return ret\n\n def to_backend(self) -> None:\n \"\"\"\n Transfer the class instance's numpy arrays to the current backend.\n \"\"\"\n for attr_name, attr_value in vars(self).items():\n if isinstance(attr_value, np.ndarray):\n converted_array = backend.to_backend_array(attr_value.copy())\n setattr(self, attr_name, converted_array)\n\n self._default_dtype = backend._default_dtype\n self._complex_dtype = backend._complex_dtype\n\n @property\n def rotations(self):\n \"\"\"Return rotation matrices used for fitting.\"\"\"\n return self._rotations\n\n @rotations.setter\n def rotations(self, rotations: NDArray):\n \"\"\"\n Set and reshape the rotation matrices for fitting.\n\n Parameters\n ----------\n rotations : NDArray\n Rotations in shape (3 x 3), (1 x 3 x 3), or (n x k x k).\n \"\"\"\n if rotations.__class__ != np.ndarray:\n raise ValueError(\"Rotation set has to be of type numpy ndarray.\")\n if rotations.ndim == 2:\n print(\"Reshaping rotations array to rank 3.\")\n rotations = rotations.reshape(1, *rotations.shape)\n elif rotations.ndim == 3:\n pass\n else:\n raise ValueError(\"Rotations have to be a rank 2 or 3 array.\")\n self._rotations = rotations.astype(self._default_dtype)\n\n @property\n def target(self):\n \"\"\"Returns the target NDArray.\"\"\"\n if type(self._target) == Density:\n return self._target.data\n return self._target\n\n @property\n def template(self):\n \"\"\"Returns the reversed template NDArray.\"\"\"\n if type(self._template) == Density:\n return backend.reverse(self._template.data)\n return backend.reverse(self._template)\n\n @template.setter\n def template(self, template: NDArray):\n \"\"\"\n Set the template array.\n\n Parameters\n ----------\n template : NDArray\n Array to set as the template.\n \"\"\"\n if type(template) == Density:\n template.data = template.data.astype(self._default_dtype, copy=False)\n self._template = template\n self._templateshape = self._template.shape[::-1]\n return None\n self._template = template.astype(self._default_dtype, copy=False)\n self._templateshape = self._template.shape[::-1]\n\n @property\n def target_mask(self):\n \"\"\"Returns the target mask NDArray.\"\"\"\n if type(self._target_mask) == Density:\n return self._target_mask.data\n return self._target_mask\n\n @target_mask.setter\n def target_mask(self, mask: NDArray):\n \"\"\"Sets the target mask.\"\"\"\n if not np.all(self.target.shape == mask.shape):\n raise ValueError(\"Target and its mask have to have the same shape.\")\n\n if type(mask) == Density:\n mask.data = mask.data.astype(self._default_dtype, copy=False)\n self._target_mask = mask\n self._targetmaskshape = self._target_mask.shape[::-1]\n return None\n self._target_mask = mask.astype(self._default_dtype, copy=False)\n self._targetmaskshape = self._target_mask.shape\n\n @property\n def template_mask(self):\n \"\"\"\n Set the template mask array after reversing it.\n\n Parameters\n ----------\n template : NDArray\n Array to set as the template.\n \"\"\"\n if type(self._template_mask) == Density:\n return backend.reverse(self._template_mask.data)\n return backend.reverse(self._template_mask)\n\n @template_mask.setter\n def template_mask(self, mask: NDArray):\n \"\"\"Returns the reversed template mask NDArray.\"\"\"\n if not np.all(self._template.shape == mask.shape):\n raise ValueError(\"Target and its mask have to have the same shape.\")\n\n if type(mask) == Density:\n mask.data = mask.data.astype(self._default_dtype, copy=False)\n self._template_mask = mask\n self._templatemaskshape = self._template_mask.shape[::-1]\n return None\n\n self._template_mask = mask.astype(self._default_dtype, copy=False)\n self._templatemaskshape = self._template_mask.shape[::-1]\n\n def _split_rotations_on_jobs(self, n_jobs: int) -> List[NDArray]:\n \"\"\"\n Split the rotation matrices into parts based on the number of jobs.\n\n Parameters\n ----------\n n_jobs : int\n Number of jobs for splitting.\n\n Returns\n -------\n list of NDArray\n List of split rotation matrices.\n \"\"\"\n nrot_per_job = self.rotations.shape[0] // n_jobs\n rot_list = []\n for n in range(n_jobs):\n init_rot = n * nrot_per_job\n end_rot = init_rot + nrot_per_job\n if n == n_jobs - 1:\n end_rot = None\n rot_list.append(self.rotations[init_rot:end_rot])\n return rot_list"
},
{
"identifier": "get_rotation_matrices",
"path": "tme/matching_utils.py",
"snippet": "def get_rotation_matrices(\n angular_sampling: float, dim: int = 3, use_optimized_set: bool = True\n) -> NDArray:\n \"\"\"\n Returns rotation matrices in format k x dim x dim, where k is determined\n by ``angular_sampling``.\n\n Parameters\n ----------\n angular_sampling : float\n The angle in degrees used for the generation of rotation matrices.\n dim : int, optional\n Dimension of the rotation matrices.\n use_optimized_set : bool, optional\n Whether to use pre-computed rotational sets with more optimal sampling.\n Currently only available when dim=3.\n\n Notes\n -----\n For the case of dim = 3 optimized rotational sets are used, otherwise\n QR-decomposition.\n\n Returns\n -------\n NDArray\n Array of shape (k, dim, dim) containing k rotation matrices.\n \"\"\"\n if dim == 3 and use_optimized_set:\n quaternions, *_ = load_quaternions_by_angle(angular_sampling)\n ret = quaternion_to_rotation_matrix(quaternions)\n else:\n num_rotations = dim * (dim - 1) // 2\n k = int((360 / angular_sampling) ** num_rotations)\n As = np.random.randn(k, dim, dim)\n ret, _ = np.linalg.qr(As)\n dets = np.linalg.det(ret)\n neg_dets = dets < 0\n ret[neg_dets, :, -1] *= -1\n return ret"
},
{
"identifier": "MATCHING_MEMORY_REGISTRY",
"path": "tme/matching_memory.py",
"snippet": "MATCHING_MEMORY_REGISTRY = {\n \"CC\": CCMemoryUsage,\n \"LCC\": LCCMemoryUsage,\n \"CORR\": CORRMemoryUsage,\n \"CAM\": CAMMemoryUsage,\n \"MCC\": MCCMemoryUsage,\n \"FLCSphericalMask\": FLCSphericalMaskMemoryUsage,\n \"FLC\": FLCMemoryUsage,\n \"MaxScoreOverRotations\": MaxScoreOverRotationsMemoryUsage,\n \"PeakCallerMaximumFilter\": PeakCallerMaximumFilterMemoryUsage,\n \"cupy\": CupyBackendMemoryUsage,\n \"pytorch\": CupyBackendMemoryUsage,\n}"
}
] | import numpy as np
import pytest
from tme.matching_exhaustive import (
scan,
scan_subsets,
MATCHING_EXHAUSTIVE_REGISTER,
register_matching_exhaustive,
)
from tme.matching_data import MatchingData
from tme.matching_utils import get_rotation_matrices
from tme.matching_memory import MATCHING_MEMORY_REGISTRY | 7,763 |
class TestMatchExhaustive:
def setup_method(self):
target = np.zeros((50, 50, 50))
target[20:30, 30:40, 12:17] = 1
self.target = target
template = np.zeros((50, 50, 50))
template[15:25, 20:30, 2:7] = 1
self.template = template
self.rotations = get_rotation_matrices(60)[0:2,]
def teardown_method(self):
self.target = None
self.template = None
self.coordinates = None
self.coordinates_weights = None
self.rotations = None
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_single_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
scan(matching_data=matching_data, matching_setup=setup, matching_score=process)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_single_multi_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
scan(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
n_jobs=2,
)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_subsets_single_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
target_splits = {i: 1 for i in range(self.target.ndim)}
template_splits = {i: 1 for i in range(self.target.ndim)}
target_splits[0], template_splits[1] = 2, 2
scan_subsets(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
target_splits=target_splits,
template_splits=template_splits,
job_schedule=(2, 1),
)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_subsets_single_multi_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
target_splits = {i: 1 for i in range(self.target.ndim)}
template_splits = {i: 1 for i in range(self.target.ndim)}
target_splits[0], template_splits[1] = 2, 2
scan_subsets(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
target_splits=target_splits,
template_splits=template_splits,
job_schedule=(2, 1),
)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_subsets_single_multi_core_both(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
target_splits = {i: 1 for i in range(self.target.ndim)}
template_splits = {i: 1 for i in range(self.target.ndim)}
target_splits[0], template_splits[1] = 2, 2
scan_subsets(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
target_splits=target_splits,
template_splits=template_splits,
job_schedule=(2, 2),
)
def test_register_matching_exhaustive(self):
setup, matching = MATCHING_EXHAUSTIVE_REGISTER[
list(MATCHING_EXHAUSTIVE_REGISTER.keys())[0]
]
memory_class = MATCHING_MEMORY_REGISTRY[
list(MATCHING_EXHAUSTIVE_REGISTER.keys())[0]
]
|
class TestMatchExhaustive:
def setup_method(self):
target = np.zeros((50, 50, 50))
target[20:30, 30:40, 12:17] = 1
self.target = target
template = np.zeros((50, 50, 50))
template[15:25, 20:30, 2:7] = 1
self.template = template
self.rotations = get_rotation_matrices(60)[0:2,]
def teardown_method(self):
self.target = None
self.template = None
self.coordinates = None
self.coordinates_weights = None
self.rotations = None
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_single_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
scan(matching_data=matching_data, matching_setup=setup, matching_score=process)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_single_multi_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
scan(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
n_jobs=2,
)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_subsets_single_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
target_splits = {i: 1 for i in range(self.target.ndim)}
template_splits = {i: 1 for i in range(self.target.ndim)}
target_splits[0], template_splits[1] = 2, 2
scan_subsets(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
target_splits=target_splits,
template_splits=template_splits,
job_schedule=(2, 1),
)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_subsets_single_multi_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
target_splits = {i: 1 for i in range(self.target.ndim)}
template_splits = {i: 1 for i in range(self.target.ndim)}
target_splits[0], template_splits[1] = 2, 2
scan_subsets(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
target_splits=target_splits,
template_splits=template_splits,
job_schedule=(2, 1),
)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_subsets_single_multi_core_both(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
target_splits = {i: 1 for i in range(self.target.ndim)}
template_splits = {i: 1 for i in range(self.target.ndim)}
target_splits[0], template_splits[1] = 2, 2
scan_subsets(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
target_splits=target_splits,
template_splits=template_splits,
job_schedule=(2, 2),
)
def test_register_matching_exhaustive(self):
setup, matching = MATCHING_EXHAUSTIVE_REGISTER[
list(MATCHING_EXHAUSTIVE_REGISTER.keys())[0]
]
memory_class = MATCHING_MEMORY_REGISTRY[
list(MATCHING_EXHAUSTIVE_REGISTER.keys())[0]
] | register_matching_exhaustive( | 3 | 2023-10-20 13:46:01+00:00 | 12k |
tonnetonne814/MB-iSTFT-BERT-VITS2-44100-Ja | train_ms.py | [
{
"identifier": "TextAudioSpeakerLoader",
"path": "data_utils.py",
"snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 300)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n return (phones, spec, wav, sid, tone, language, bert, ja_bert)\n\n def get_audio(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert = torch.load(bert_path)\n assert bert.shape[-1] == len(phone)\n except:\n bert = get_bert(text, word2ph, language_str, device=\"cuda\")\n torch.save(bert, bert_path)\n assert bert.shape[-1] == len(phone), phone\n\n if language_str == \"ZH\":\n bert = bert\n ja_bert = torch.zeros(768, len(phone))\n elif language_str == \"JP\":\n ja_bert = bert\n bert = torch.zeros(1024, len(phone))\n else:\n bert = torch.zeros(1024, len(phone))\n ja_bert = torch.zeros(768, len(phone))\n assert bert.shape[-1] == len(phone), (\n bert.shape,\n len(phone),\n sum(word2ph),\n p1,\n p2,\n t1,\n t2,\n pold,\n pold2,\n word2ph,\n text,\n w2pho,\n )\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)"
},
{
"identifier": "TextAudioSpeakerCollate",
"path": "data_utils.py",
"snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 768, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n )"
},
{
"identifier": "DistributedBucketSampler",
"path": "data_utils.py",
"snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size"
},
{
"identifier": "SynthesizerTrn",
"path": "models.py",
"snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=6,\n flow_share_parameter=False,\n use_transformer_flow=True,\n subbands=8, # add\n gen_istft_n_fft=16, # add\n gen_istft_hop_size=4, # add\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.subbands = subbands\n self.gen_istft_n_fft = gen_istft_n_fft\n self.gen_istft_hop_size = gen_istft_hop_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels,\n )\n # self.dec = Generator(\n # inter_channels,\n # resblock,\n # resblock_kernel_sizes,\n # resblock_dilation_sizes,\n # upsample_rates,\n # upsample_initial_channel,\n # upsample_kernel_sizes,\n # gin_channels=gin_channels,\n # )\n self.dec = Multistream_iSTFT_Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gen_istft_n_fft,\n gen_istft_hop_size, \n subbands,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers > 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert, ja_bert):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "MultiPeriodDiscriminator",
"path": "models.py",
"snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "DurationDiscriminator",
"path": "models.py",
"snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs"
},
{
"identifier": "generator_loss",
"path": "losses.py",
"snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses"
},
{
"identifier": "discriminator_loss",
"path": "losses.py",
"snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses"
},
{
"identifier": "feature_loss",
"path": "losses.py",
"snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2"
},
{
"identifier": "kl_loss",
"path": "losses.py",
"snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l"
},
{
"identifier": "mel_spectrogram_torch",
"path": "mel_processing.py",
"snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec"
},
{
"identifier": "spec_to_mel_torch",
"path": "mel_processing.py",
"snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec"
},
{
"identifier": "symbols",
"path": "text/symbols.py",
"snippet": ""
}
] | import os
import torch
import torch.distributed as dist
import logging
import commons
import utils
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.cuda.amp import autocast, GradScaler
from tqdm import tqdm
from data_utils import (
TextAudioSpeakerLoader,
TextAudioSpeakerCollate,
DistributedBucketSampler,
)
from models import (
SynthesizerTrn,
MultiPeriodDiscriminator,
DurationDiscriminator,
)
from losses import generator_loss, discriminator_loss, feature_loss, kl_loss
from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
from text.symbols import symbols | 9,084 | # flake8: noqa: E402
logging.getLogger("numba").setLevel(logging.WARNING)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = (
True # If encontered training problem,please try to disable TF32.
)
torch.set_float32_matmul_precision("medium")
torch.backends.cudnn.benchmark = True
torch.backends.cuda.sdp_kernel("flash")
torch.backends.cuda.enable_flash_sdp(True)
torch.backends.cuda.enable_mem_efficient_sdp(
True
) # Not available if torch version is lower than 2.0
torch.backends.cuda.enable_math_sdp(True)
global_step = 0
def run():
#dist.init_process_group(
# backend="gloo",
# init_method="env://", # Due to some training problem,we proposed to use gloo instead of nccl.
#) # Use torchrun instead of mp.spawn
#rank = dist.get_rank()
#n_gpus = dist.get_world_size()
rank = 0
n_gpus = 1
hps = utils.get_hparams()
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
train_sampler = DistributedBucketSampler(
train_dataset,
hps.train.batch_size,
[32, 300, 400, 500, 600, 700, 800, 900, 1000],
num_replicas=n_gpus,
rank=rank,
shuffle=True,
)
| # flake8: noqa: E402
logging.getLogger("numba").setLevel(logging.WARNING)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = (
True # If encontered training problem,please try to disable TF32.
)
torch.set_float32_matmul_precision("medium")
torch.backends.cudnn.benchmark = True
torch.backends.cuda.sdp_kernel("flash")
torch.backends.cuda.enable_flash_sdp(True)
torch.backends.cuda.enable_mem_efficient_sdp(
True
) # Not available if torch version is lower than 2.0
torch.backends.cuda.enable_math_sdp(True)
global_step = 0
def run():
#dist.init_process_group(
# backend="gloo",
# init_method="env://", # Due to some training problem,we proposed to use gloo instead of nccl.
#) # Use torchrun instead of mp.spawn
#rank = dist.get_rank()
#n_gpus = dist.get_world_size()
rank = 0
n_gpus = 1
hps = utils.get_hparams()
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
train_sampler = DistributedBucketSampler(
train_dataset,
hps.train.batch_size,
[32, 300, 400, 500, 600, 700, 800, 900, 1000],
num_replicas=n_gpus,
rank=rank,
shuffle=True,
) | collate_fn = TextAudioSpeakerCollate() | 1 | 2023-10-16 10:04:32+00:00 | 12k |
GXimingLu/IPA | main.py | [
{
"identifier": "get_args",
"path": "arguments.py",
"snippet": "def get_args():\n parser = argparse.ArgumentParser(description='RL')\n\n # dataset\n parser.add_argument(\n '--output-dir', type=str, default=f'{HOME_PATH}/commonGen')\n parser.add_argument(\n '--dataset-train', type=str, default=f'{HOME_PATH}/data/commongen/train.json',\n help='JSON file containing train prompts. Each item contains \"prompt\", \"response\".')\n parser.add_argument(\n '--dataset-val', type=str, default=f'{HOME_PATH}/data/commongen/val.json',\n help='JSON file containing dev prompts. Each item contains \"prompt\", \"response\".')\n\n # reward\n parser.add_argument(\n '--n_extra_tokens', type=int, default=5, help='number of reward categorization')\n parser.add_argument(\n '--sample-interval', type=int, default=750, help='step interval to sample from current policy')\n parser.add_argument(\n '--horizon', type=float, default=2500, help='horizon value in adaptive controller')\n parser.add_argument(\n '--reward_batch_size', type=int, default=16, help='batch size')\n parser.add_argument(\n '--binary_coverage', action='store_true', default=False, help='whether to use binary_coverage')\n\n # KL term\n parser.add_argument(\n '--kl_coef', type=float, default=0.0, help='coefficient for KL term in reward')\n parser.add_argument(\n '--adaptive_kl', action='store_true', default=False, help='whether to use adaptive KL controller')\n parser.add_argument(\n '--target_kl', type=float, default=3, help='target value in adaptive KL controller')\n # entropy term\n parser.add_argument(\n '--entropy_coef', type=float, default=0.0, help='coefficient for entropy term in reward')\n parser.add_argument(\n '--adaptive_entropy', action='store_true', default=False, help='whether to use adaptive entropy controller')\n parser.add_argument(\n '--target_entropy', type=float, default=40, help='target value in adaptive entropy controller')\n\n # policy\n parser.add_argument(\n '--base_model_name', type=str, default='gpt2-xl', help='language model as the base policy.')\n parser.add_argument(\n '--base_model_checkpoint', type=str, default=\"PATH_TO_DISTILLED_GPT3\", help='base policy initialization')\n parser.add_argument(\n '--value_model_name', type=str, default='gpt2-large', help='language model as the value function.')\n parser.add_argument(\n '--alpha', type=float, default=1.0, help='co-efficient to combine policy and value model.')\n parser.add_argument(\n '--response-length', type=int, default=64, help='number of tokens to generate for each prompt.')\n parser.add_argument(\n '--temperature', type=float, default=1.0, help='temperature for sampling policy.')\n parser.add_argument(\n '--gpt3_calibrate', action='store_true', default=False, help='calibrate to adapt gpt3 logprobs')\n\n # training\n parser.add_argument(\n '--total-episodes', type=int, default=2000000, help='total number of episodes')\n parser.add_argument(\n '--batch_size', type=int, default=64, help='batch size')\n parser.add_argument(\n '--grad_accum', type=int, default=2, help='gradient accumulation steps')\n parser.add_argument(\n '--lr', type=float, default=1e-5, help='learning rate')\n parser.add_argument(\n '--num_warmup_steps', type=int, default=500, help='number of warmup steps in lr scheduler')\n parser.add_argument(\n '--clip_grad', action='store_true', default=False, help='whether to clip gradient')\n parser.add_argument(\n '--max-grad-norm', type=float, default=0.5, help='maximum norm of gradients ')\n\n # generation\n parser.add_argument(\n '--num-samples', type=int, default=1, help='number of samples to generate for each prompt.')\n parser.add_argument(\n '--top-p', type=float, default=0.6, help='hyperparameter for nucleus sampling')\n parser.add_argument(\n '--hard_prob', type=float, default=0.75, help='whether to use hard constraint in decoding')\n parser.add_argument(\n '--force_eos', action='store_true', default=False, help='not to generate eos until all constraints satisfied')\n\n # other\n parser.add_argument(\n '--seed', type=int, default=1, help='random seed (default: 1)')\n parser.add_argument(\n '--log-interval', type=int, default=200, help='step interval to print out logs')\n parser.add_argument(\n '--save-interval', type=int, default=500, help='step interval to save model checkpoints')\n parser.add_argument(\n '--min_save_step', type=int, default=8000, help='minimal steps before saving model checkpoints')\n parser.add_argument(\n '--max_save_step', type=int, default=15000, help='maximal steps for saving model checkpoints')\n parser.add_argument(\n '--eval-interval', type=int, default=500, help='step interval to do evaluation')\n parser.add_argument(\n '--cuda-deterministic', action='store_false', default=True,\n help=\"sets flags for determinism when using CUDA (potentially slow!)\")\n\n parser.add_argument(\n '--resume', type=str, default=None, help='directory to resume generation')\n\n args = parser.parse_args()\n args.cuda = torch.cuda.is_available()\n\n return args"
},
{
"identifier": "Policy",
"path": "policy.py",
"snippet": "class Policy:\n def __init__(self, base_model_name, base_model_checkpoint, value_model_name, device, tree_tokens,\n alpha, calibrate, force_eos):\n self.device = device\n self.base_model = GPT2LMHeadModel.from_pretrained(base_model_name)\n self.base_model.load_state_dict(base_model_checkpoint)\n self.value_model = GPT2LMHeadModel.from_pretrained(value_model_name)\n\n self.tokenizer = GPT2Tokenizer.from_pretrained(base_model_name, pad_token=\"<|endoftext|>\")\n self.base_model.config.pad_token_id = self.tokenizer.pad_token_id\n self.value_model.config.pad_token_id = self.tokenizer.pad_token_id\n\n self.tokenizer.add_tokens(tree_tokens, special_tokens=True)\n\n weights = self.value_model.get_input_embeddings().weight.detach().numpy()\n mean_weights, std_weights = np.mean(weights, axis=0), np.std(weights, axis=0)\n new_inits = np.vstack([np.random.normal(loc=mean_weights, scale=std_weights) for _ in tree_tokens])\n\n self.base_model.resize_token_embeddings(len(self.tokenizer))\n self.value_model.resize_token_embeddings(len(self.tokenizer))\n with torch.no_grad():\n new_inits = torch.tensor(new_inits)\n self.value_model.get_input_embeddings().weight[-len(tree_tokens):, :] = new_inits\n\n self.base_model = self.base_model.to(self.device)\n self.base_model.parallelize()\n self.value_model = self.value_model.to(self.device)\n self.value_model.parallelize()\n\n self.best_cat = tree_tokens[0]\n self.best_cat_id = self.tokenizer.convert_tokens_to_ids(self.best_cat)\n\n self.alpha = alpha\n self.base_model.eval()\n for param in self.base_model.parameters():\n param.requires_grad = False\n self.calibrate = calibrate\n\n self.eos_tokens = None\n if force_eos:\n self.eos_tokens = self.tokenizer.convert_tokens_to_ids(['.', 'Ġ.', '!', 'Ġ!'])\n\n def sample(self,\n prompts: Union[str, List[str]] = None,\n input_ids: torch.Tensor = None,\n attention_mask: torch.Tensor = None,\n constraints: List[ConstrainedHypothesis] = None,\n max_len: int = 64,\n min_len: int = 16,\n sample: bool = True,\n top_k: int = None,\n top_p: float = None,\n temperature: float = None,\n use_control_code: bool = False) -> Dict[str, Union[torch.Tensor, List[str]]]:\n\n use_constraints = constraints is not None\n if use_constraints:\n constraints = init_batch([json.loads(x) for x in constraints], self.eos_tokens)\n\n if prompts is not None:\n assert input_ids is None and attention_mask is None, 'repeated input'\n if isinstance(prompts, str):\n prompts = [prompts]\n\n encodings_dict = self.tokenizer(prompts, return_tensors=\"pt\", padding=True)\n input_ids = encodings_dict['input_ids'].to(self.device)\n attention_mask = encodings_dict['attention_mask'].to(self.device)\n\n else:\n input_ids = input_ids.to(self.device)\n attention_mask = attention_mask.to(self.device)\n\n model_kwargs = {'attention_mask': attention_mask}\n batch_size, input_seq_len = input_ids.shape\n\n value_input_ids, value_attention_mask = add_control_code(input_ids, attention_mask, self.best_cat_id)\n value_model_kwargs = {'attention_mask': value_attention_mask}\n\n logits_warper = self.base_model._get_logits_warper(\n top_k=top_k, top_p=top_p, temperature=temperature, num_beams=1\n )\n\n unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=self.device)\n output_logprob = torch.zeros([batch_size, 0], dtype=torch.float, device=self.device)\n output_mask = torch.ones([batch_size, 0], dtype=torch.long, device=self.device)\n\n self.value_model.eval()\n with torch.no_grad():\n for step in range(max_len):\n\n outputs, next_token_logits = get_model_output(self.base_model, step, input_ids, attention_mask, model_kwargs)\n\n # get logit from value model\n if use_control_code:\n value_outputs, value_next_token_logits = get_model_output(self.value_model, step, value_input_ids,\n value_attention_mask, value_model_kwargs)\n if self.calibrate:\n next_token_logits = F.log_softmax(next_token_logits)\n next_token_logits = next_token_logits + self.alpha * value_next_token_logits\n\n if step < min_len:\n next_token_logits[:, self.base_model.config.eos_token_id] = float('-inf')\n if use_constraints:\n for i, constraint in enumerate(constraints):\n for bad_word in constraint.avoid():\n next_token_logits[i, bad_word] = float('-inf')\n log_prob = F.log_softmax(next_token_logits, dim=-1)\n\n if sample:\n # Temperature (higher temperature => more likely to sample low probability tokens)\n next_token_scores = logits_warper(input_ids, next_token_logits)\n probs = F.softmax(next_token_scores, dim=-1)\n next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)\n else:\n # Greedy decoding\n next_tokens = torch.argmax(next_token_logits, dim=-1)\n\n # finished sentences should have their next token be a padding token\n next_tokens = next_tokens * unfinished_sequences + self.tokenizer.pad_token_id * (1 - unfinished_sequences)\n\n # update output mask\n output_mask = torch.cat([output_mask, unfinished_sequences[:, None]], dim=-1)\n # update output log probability\n token_logprob = torch.gather(log_prob, 1, next_tokens[:, None]).squeeze(1)\n token_logprob = token_logprob * unfinished_sequences + NEGATIVE_INF * (1 - unfinished_sequences)\n output_logprob = torch.cat([output_logprob, token_logprob[:, None]], dim=-1)\n\n # update generated ids, model inputs for next step\n input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)\n model_kwargs = self.base_model._update_model_kwargs_for_generation(\n outputs, model_kwargs, is_encoder_decoder=self.base_model.config.is_encoder_decoder\n )\n\n if use_constraints:\n constraints = [c.advance(t) for c, t in zip(constraints, next_tokens.tolist())]\n\n if use_control_code:\n value_input_ids = torch.cat([value_input_ids, next_tokens[:, None]], dim=-1)\n value_model_kwargs = self.value_model._update_model_kwargs_for_generation(\n value_outputs, value_model_kwargs, is_encoder_decoder=self.value_model.config.is_encoder_decoder\n )\n\n # if eos_token was found in one sentence, set sentence to finished\n unfinished_sequences = unfinished_sequences.mul((next_tokens != self.tokenizer.eos_token_id).long())\n\n if unfinished_sequences.max() == 0:\n break\n\n response_ids = input_ids[:, input_seq_len:]\n response_text = [self.tokenizer.decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n for output in response_ids]\n response_text = [process_generation(t) for t in response_text]\n\n prompt_ids = input_ids[:, :input_seq_len]\n if prompts is None:\n prompts = [self.tokenizer.decode(query, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n for query in prompt_ids]\n\n return {\n 'query/input_ids': prompt_ids,\n 'query/text': prompts,\n 'query/mask': attention_mask,\n 'response/input_ids': response_ids,\n 'response/text': response_text,\n 'response/mask': output_mask,\n 'response/log_prob': output_logprob,\n }\n\n def forward_pass(self,\n query_input_ids: torch.Tensor,\n query_mask: torch.Tensor,\n response_input_ids: torch.Tensor,\n response_mask: torch.Tensor,\n use_control_code: bool = False):\n\n query_input_ids = query_input_ids.to(self.device)\n query_mask = query_mask.to(self.device)\n response_input_ids = response_input_ids.to(self.device)\n response_mask = response_mask.to(self.device)\n\n if use_control_code:\n value_query_input_ids, value_query_mask = query_input_ids, query_mask\n query_input_ids, query_mask = remove_control_code(query_input_ids, query_mask)\n\n logits = get_response_logits(self.base_model, query_input_ids, response_input_ids, query_mask, response_mask)\n\n if use_control_code:\n value_logits = get_response_logits(self.value_model, value_query_input_ids, response_input_ids,\n value_query_mask, response_mask)\n logits = logits + self.alpha * value_logits\n\n log_prob = F.log_softmax(logits, dim=-1)\n output_logprob = torch.gather(log_prob, 2, response_input_ids[:, :, None]).squeeze(2)\n output_entropy = logits_to_entropy(logits)\n lm_loss = -1. * output_logprob\n\n return {\n 'response/log_prob': mask_pad(output_logprob, response_mask),\n 'response/lm_loss': mask_pad(lm_loss, response_mask),\n 'response/entropy': mask_pad(output_entropy, response_mask),\n 'response/logits': logits,\n }"
},
{
"identifier": "DataPool",
"path": "data_pool.py",
"snippet": "class DataPool:\n def __init__(self, tree_tokens, n_extra_tokens):\n self.tree_tokens = tree_tokens\n self.n_extra_tokens = n_extra_tokens\n\n self.cat_tokens = None\n self.prompt_pool, self.response_pool, self.score_pool = [], [], []\n\n def add(self, prompts: List[str], responses: List[str], scores: List[float]):\n self.prompt_pool.extend(prompts)\n self.response_pool.extend(responses)\n self.score_pool.extend(scores)\n\n data = zip(self.prompt_pool, self.response_pool, self.score_pool)\n data = [x for x in data if x[-1] is not None]\n sorted_data = sorted(data, key=lambda x: x[-1], reverse=True)\n self.prompt_pool, self.response_pool, self.score_pool = [list(x) for x in list(zip(*sorted_data))]\n\n cat_pos = [[i] * (len(sorted_data) // self.n_extra_tokens) for i in range(self.n_extra_tokens)]\n cat_pos = [y for x in cat_pos for y in x]\n cat_pos = cat_pos + [self.n_extra_tokens - 1] * (len(sorted_data) - len(cat_pos))\n self.cat_tokens = [self.tree_tokens[i] for i in cat_pos]\n\n def get_data(self):\n return deepcopy(self.prompt_pool), deepcopy(self.response_pool), deepcopy(self.cat_tokens)\n\n def data_to_save(self):\n return {'prompts': self.prompt_pool, 'responses': self.response_pool, 'scores': self.score_pool}"
},
{
"identifier": "Reward",
"path": "reward.py",
"snippet": "class Reward:\n def __init__(self, save_path: str, batch_size: int, device: int, params: argparse.Namespace):\n self.path = save_path\n self.batch_size = batch_size\n self.params = params\n self.device = f'cuda:{device}'\n\n cola_model_name = \"textattack/roberta-base-CoLA\"\n self.cola_tokenizer = RobertaTokenizer.from_pretrained(cola_model_name)\n self.cola_model = RobertaForSequenceClassification.from_pretrained(cola_model_name).to(self.device)\n\n def get_reward(self, prompts: List[str], responses: List[str], concepts: List[str], epoch: str) -> Dict[str, List[float]]:\n reward_dict = {'coverage': [], 'cola': []}\n\n for response, concept in tqdm(zip(responses, concepts), total=len(concepts), desc='computing coverage'):\n reward_dict['coverage'].append(self._compute_coverage(response, concept, use_binary=self.params.binary_coverage))\n\n if not self.params.binary_coverage:\n reward_dict['binary_coverage'] = [int(c == 1) for c in reward_dict['coverage']]\n\n for texts in tqdm(batchify(responses, self.batch_size), total=math.ceil(len(responses) // self.batch_size),\n desc='scoring generations'):\n\n texts = [t.strip() for t in texts]\n inputs = self.cola_tokenizer(texts, padding=True, truncation=True, return_tensors=\"pt\").to(self.device)\n with torch.no_grad():\n logits = self.cola_model(**inputs).logits\n probs = logits.softmax(dim=-1)\n scores = probs[:, 1].tolist()\n reward_dict['cola'].extend(scores)\n\n overall_reward = product_rewards([reward_dict['coverage'], reward_dict['cola']])\n reward_dict.update({'reward': overall_reward})\n\n zip_scores = list(zip(reward_dict['coverage'], reward_dict['cola']))\n data = pd.DataFrame.from_dict({'prompt': prompts, 'concepts': concepts})\n collate(data, responses, zip_scores, os.path.join(self.path, f'reward_{epoch}.json'))\n\n return reward_dict\n\n @staticmethod\n def _compute_coverage(output, concept, use_binary=False):\n lematized_concepts = [nlp(c.strip())[0].lemma_ for c in concept.split('-')]\n lemmatized_output = []\n for token in output.strip().split():\n lemmatized_output.extend([x.lemma_ for x in nlp(token)])\n\n if use_binary:\n score = 0\n for word in lematized_concepts:\n if word in lemmatized_output:\n score += 1\n\n if score < len(lematized_concepts):\n return 0\n ordered_concept = sorted(lematized_concepts, key=lambda x: lemmatized_output.index(x))\n return int(ordered_concept == lematized_concepts)\n\n else:\n output_keywords = []\n for token in lemmatized_output:\n if token in lematized_concepts and token not in output_keywords:\n output_keywords.append(token)\n assert len(output_keywords) <= len(lematized_concepts), f'concepts: {concept}, keywords: {output_keywords}'\n\n coverage = 0\n for i in range(len(output_keywords)):\n if lematized_concepts[i] == output_keywords[i]:\n coverage += 1\n else:\n break\n return coverage / len(lematized_concepts)"
},
{
"identifier": "ensure_dir",
"path": "utils/utils.py",
"snippet": "def ensure_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)"
},
{
"identifier": "ceil_div",
"path": "utils/utils.py",
"snippet": "def ceil_div(a, b):\n return (a - 1) // b + 1"
},
{
"identifier": "reduce_mean",
"path": "utils/utils.py",
"snippet": "def reduce_mean(value, mask, axis=None):\n if axis is None:\n return torch.sum(value * mask) / torch.sum(mask)\n return reduce_sum(value, mask, axis) / torch.sum(mask, axis)"
},
{
"identifier": "reduce_sum",
"path": "utils/utils.py",
"snippet": "def reduce_sum(value, mask, axis=None):\n if axis is None:\n return torch.sum(value * mask)\n return torch.sum(value * mask, axis)"
},
{
"identifier": "decode",
"path": "utils/generation_utils.py",
"snippet": "def decode(tokenizer, query_input_ids, response_input_ids=None):\n query = [tokenizer.decode(p, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n for p in query_input_ids]\n\n if response_input_ids is None:\n return query\n\n response = [tokenizer.decode(r, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n for r in response_input_ids]\n return query, response"
}
] | import os
import torch
import json
import time
import logging
import random
import argparse
import numpy as np
import torch.nn.functional as F
from typing import List
from datetime import datetime
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam, Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.tensorboard import SummaryWriter
from transformers import get_linear_schedule_with_warmup
from arguments import get_args
from policy import Policy
from data_pool import DataPool
from reward import Reward
from utils.utils import ensure_dir, ceil_div, reduce_mean, reduce_sum
from utils.generation_utils import decode | 8,347 | 'loss/total': data['total_loss'].item(),
'loss/kl': data['kl_loss'].item(),
'loss/lm': data['lm_loss'].item(),
'loss/entropy': data['entropy'].item(),
})
return stats
def print_samples(self, queries, responses, lm_loss, logprobs, ref_logprobs, masks, step):
if step % self.params.log_interval != 0:
return
# Log samples
for i in range(min(3, len(queries))):
sample_kl = torch.sum((logprobs[i] - ref_logprobs[i]) * masks[i]).item()
print(queries[i] + responses[i])
print(f" lm_loss = {lm_loss[i].item():+.2f}")
print(f" kl = {sample_kl:+.2f}")
print(f" total = {lm_loss[i].item() + self.params.kl_coef * sample_kl:+.2f}")
def save(self, step):
if step < self.params.min_save_step or step > self.params.max_save_step or step % self.params.save_interval != 0:
return
torch.save({
'step': step,
'value_model': self.policy.value_model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'data_pool': self.data_pool.data_to_save(),
}, f'{self.params.model_dir}/ckp_{step}.pth')
log.info(f"[step {step}] model checkpoint saved")
def eval(self, step):
if step % self.params.eval_interval != 0:
return
log.info(f"[step {step}] evaluating ...")
concepts, prompts, uncons_gens, cons_gens = [], [], [], []
for i, batch in enumerate(tqdm(self.val_dataloader)):
input_ids, attention_mask, concept, constraints = batch
with torch.no_grad():
uncons_rollouts = self.policy.sample(input_ids=input_ids, attention_mask=attention_mask,
max_len=self.params.response_length, top_p=self.params.top_p,
use_control_code=(step > 0))
cons_rollouts = self.policy.sample(input_ids=input_ids, attention_mask=attention_mask,
constraints=constraints,
max_len=self.params.response_length, top_p=self.params.top_p,
use_control_code=(step > 0))
concepts.extend(concept)
prompts.extend(uncons_rollouts['query/text'])
uncons_gens.extend(uncons_rollouts['response/text'])
cons_gens.extend(cons_rollouts['response/text'])
for eval_name, gens in [('unconstrained', uncons_gens), ('constrained', cons_gens)]:
print(f" {eval_name.capitalize()} evaluation: ")
score_dict = self.score_model.get_reward(prompts, gens, concepts, f'step{step}_eval_{eval_name}')
for name, scores in score_dict.items():
metric_score = np.mean(scores)
print(f" {name} = {metric_score:+.2f}")
self.writer.add_scalar(f'{eval_name.capitalize()}_eval/{name}', metric_score, step)
def main():
args = get_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
num_gpus = torch.cuda.device_count()
log.info(f'Detect {num_gpus} GPUS')
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if args.resume is None:
date_time = datetime.now().strftime("%m-%d-%Y_%H:%M:%S")
args.save_dir = os.path.join(args.output_dir, date_time)
else:
sub_dirs = args.resume.split(os.sep)
date_time, args.output_dir, args.save_dir = sub_dirs[-3], os.sep.join(sub_dirs[:-3]), os.sep.join(sub_dirs[:-2])
args.reward_dir = os.path.join(args.save_dir, 'reward')
args.model_dir = os.path.join(args.save_dir, 'model')
args.tensorboard_dir = os.path.join(args.output_dir, 'tensorboard', date_time)
log.info(f'Write to output directory: {args.save_dir}')
if args.resume is None:
for d in [args.output_dir, args.save_dir, args.reward_dir, args.model_dir, args.tensorboard_dir]:
ensure_dir(d)
with open(os.path.join(args.save_dir, 'args.json'), 'w') as f:
json.dump(args.__dict__, f, indent=2)
tree_tokens = [' _TREE_TOKEN_{}'.format(str(idx).zfill(5)) for idx in range(args.n_extra_tokens)]
log.info(f'Initializing models ...')
policy_checkpoint = torch.load(args.base_model_checkpoint, map_location='cpu')['policy_model']
policy = Policy(base_model_name=args.base_model_name, base_model_checkpoint=policy_checkpoint,
value_model_name=args.value_model_name, device=device, tree_tokens=tree_tokens, alpha=args.alpha,
calibrate=args.gpt3_calibrate, force_eos=args.force_eos)
reward = Reward(save_path=args.reward_dir, batch_size=args.reward_batch_size, device=num_gpus - 1, params=args)
data_pool = DataPool(tree_tokens=tree_tokens, n_extra_tokens=args.n_extra_tokens)
log.info(f'Initialization done!')
prompt_collator = PromptCollator(tokenizer=policy.tokenizer)
train_dataset = PromptDataset(path=args.dataset_train, tokenizer=policy.tokenizer)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True, collate_fn=prompt_collator)
log.info(f'Load train set with {len(train_dataset)} examples')
val_dataset = PromptDataset(path=args.dataset_val, tokenizer=policy.tokenizer)
val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, collate_fn=prompt_collator)
log.info(f'Load val set with {len(val_dataset)} examples')
# set up optimizer and scheduler
optimizer = Adam(policy.value_model.parameters(), lr=args.lr, eps=1e-5)
|
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
log = logging.getLogger(__name__)
class PromptDataset(Dataset):
def __init__(self, path, tokenizer):
data = json.load(open(path, 'r'))
self.items = [v for k, v in data.items() if v['human_order']]
self.tokenizer = tokenizer
def __len__(self):
return len(self.items)
def __getitem__(self, idx):
item = self.items[idx]
order_words = random.choice(item['human_order'])
constraint = json.dumps([list(map(lambda x: self.tokenizer.encode(f' {x}'), item['inflection'][w]))
for w in order_words.split('-')])
prompt = 'Generate a sentence including the following keywords in the same order as listed: %s\n\nAnswer:'
prompt = prompt % ' '.join(order_words.split('-'))
return {
'order': order_words,
'constraint': constraint,
'prompt': prompt,
}
class PromptCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
concepts = [sequence['order'] for sequence in sequences]
prompts = [sequence['prompt'] for sequence in sequences]
constraints = [sequence['constraint'] for sequence in sequences]
encodings_dict = self.tokenizer(prompts, return_tensors="pt", padding=True)
input_ids = encodings_dict['input_ids']
attention_mask = encodings_dict['attention_mask']
return input_ids, attention_mask, concepts, constraints
class SequenceDataset(Dataset):
def __init__(self, data_pool: DataPool):
self.queries, self.responses, self.cat_tokens = data_pool.get_data()
def __len__(self):
return len(self.queries)
def __getitem__(self, idx):
return {'query': self.queries[idx],
'response': self.responses[idx],
'cat_tokens': self.cat_tokens[idx]
}
class SequenceCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
queries = [sequence['query'] for sequence in sequences]
responses = [sequence['response'] + self.tokenizer.eos_token for sequence in sequences]
cat_ids = [self.tokenizer.convert_tokens_to_ids(sequence['cat_tokens']) for sequence in sequences]
query_encodings_dict = self.tokenizer(queries, return_tensors="pt", padding=True)
query_input_ids = query_encodings_dict['input_ids']
query_mask = query_encodings_dict['attention_mask']
query_input_ids = torch.cat([query_input_ids.new(cat_ids)[:, None], query_input_ids], dim=1)
query_mask = torch.cat([query_mask.new([1] * len(query_mask))[:, None], query_mask], dim=1)
response_encodings_dict = self.tokenizer(responses, return_tensors="pt", padding=True)
response_input_ids = response_encodings_dict['input_ids']
response_mask = response_encodings_dict['attention_mask']
return query_input_ids, query_mask, response_input_ids, response_mask
class FixedController:
def __init__(self, coef):
self.value = coef
def update(self, current, n_steps, lower_bound):
pass
class AdaptiveController:
def __init__(self, init_coef, target, horizon):
self.value = init_coef
self.target = target
self.horizon = horizon
def update(self, current, n_steps, lower_bound):
proportional_error = np.clip(current / self.target - 1, -0.2, 0.2)
if lower_bound:
mult = 1 + proportional_error * n_steps / self.horizon
else:
mult = 1 - proportional_error * n_steps / self.horizon
self.value *= mult
class ConditionTrainer:
def __init__(self,
params: argparse.Namespace,
policy: Policy,
data_pool: DataPool,
score_model: Reward,
tree_tokens: List[str],
train_dataloader: DataLoader,
val_dataloader: DataLoader,
optimizer: Optimizer,
scheduler: LambdaLR,
resume: bool):
self.params = params
self.policy = policy
self.data_pool = data_pool
self.score_model = score_model
self.optimizer = optimizer
self.scheduler = scheduler
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.writer = SummaryWriter(log_dir=params.tensorboard_dir)
if self.params.adaptive_kl:
self.kl_ctl = AdaptiveController(self.params.kl_coef, self.params.target_kl, self.params.horizon)
else:
self.kl_ctl = FixedController(self.params.kl_coef)
self.kl_loss = torch.nn.KLDivLoss(reduction="none")
if self.params.adaptive_entropy:
self.entropy_ctl = AdaptiveController(self.params.entropy_coef, self.params.target_entropy,
self.params.horizon)
else:
self.entropy_ctl = FixedController(self.params.entropy_coef)
self.tree_tokens = tree_tokens
self.best_cat = self.tree_tokens[0]
self.best_cat_id = self.policy.tokenizer.convert_tokens_to_ids(self.best_cat)
self.sample_dataloader, self.sampler = None, None
self.seq_collator = SequenceCollator(tokenizer=policy.tokenizer)
if resume:
sample_dataset = SequenceDataset(data_pool=self.data_pool)
self.sample_dataloader = DataLoader(sample_dataset, batch_size=self.params.batch_size,
shuffle=True, drop_last=True, collate_fn=self.seq_collator)
self.sampler = iter(self.sample_dataloader)
def sample(self, step):
if step % self.params.sample_interval != 0:
return
log.info(f"[step {step}] Sampling ...")
concepts, prompts, responses = [], [], []
for i, batch in enumerate(tqdm(self.train_dataloader, total=len(self.train_dataloader),
desc='Sampling from current policy')):
input_ids, attention_mask, concept, constraints = batch
use_constraint = random.choices([1, 0], weights=[self.params.hard_prob, 1 - self.params.hard_prob], k=1)[0]
rollouts = self.policy.sample(input_ids=input_ids, attention_mask=attention_mask,
constraints=constraints if use_constraint else None,
max_len=self.params.response_length, top_p=self.params.top_p,
use_control_code=(step > 0))
prompt, response = rollouts['query/text'], rollouts['response/text']
concepts.extend(concept)
prompts.extend(prompt)
responses.extend(response)
scores = self.score_model.get_reward(prompts, responses, concepts, f'step{step}')
self.data_pool.add(prompts=prompts, responses=responses, scores=scores['reward'])
sample_dataset = SequenceDataset(data_pool=self.data_pool)
self.sample_dataloader = DataLoader(sample_dataset, batch_size=self.params.batch_size,
shuffle=True, drop_last=True, collate_fn=self.seq_collator)
self.sampler = iter(self.sample_dataloader)
def step(self, step_num):
step_started_at = time.time()
self.save(step=step_num)
self.eval(step=step_num)
self.sample(step=step_num)
try:
batch = next(self.sampler)
assert len(batch[0]) == self.params.batch_size, 'insufficient batch'
except (StopIteration, AssertionError):
self.sampler = iter(self.sample_dataloader)
batch = next(self.sampler)
self.policy.value_model.train()
ppo_loss, stats = self.loss(step_num, *batch)
ppo_loss = ppo_loss / self.params.grad_accum
ppo_loss.backward()
if self.params.clip_grad:
torch.nn.utils.clip_grad_norm_(self.policy.value_model.parameters(), self.params.max_grad_norm)
if (step_num + 1) % self.params.grad_accum == 0:
self.optimizer.step()
self.optimizer.zero_grad()
self.scheduler.step()
for metric in ['kl', 'entropy']:
self.writer.add_scalar(f'Objective/{metric}', stats[f'objective/{metric}'], step_num)
for metric in ['lm', 'kl', 'entropy', 'total']:
self.writer.add_scalar(f'Loss/{metric}', stats[f'loss/{metric}'], step_num)
self.writer.add_scalar(f'Params/lr', self.optimizer.param_groups[0]['lr'], step_num)
self.writer.add_scalar(f'Params/kl_coef', self.kl_ctl.value, step_num)
self.writer.add_scalar(f'Params/entropy_coef', self.entropy_ctl.value, step_num)
self.kl_ctl.update(stats['objective/kl'], self.params.batch_size, True)
self.entropy_ctl.update(stats['objective/entropy'], self.params.batch_size, False)
step_time = time.time() - step_started_at
eps_per_second = float(self.params.batch_size) / step_time
log.info(f"[step {step_num}] step_time={step_time:.2f}s, eps/s={eps_per_second:.2f}")
def loss(self, step, query_input_ids, query_mask, response_input_ids, response_mask):
outputs = self.policy.forward_pass(query_input_ids, query_mask, response_input_ids, response_mask,
use_control_code=True)
lm_loss, logprobs, entropy, logits = outputs['response/lm_loss'], outputs['response/log_prob'], \
outputs['response/entropy'], outputs['response/logits']
masks = response_mask.to(self.policy.device)
with torch.no_grad():
ref_outputs = self.policy.forward_pass(query_input_ids[:, 1:], query_mask[:, 1:],
response_input_ids, response_mask, use_control_code=False)
ref_logprobs, ref_logits = ref_outputs['response/log_prob'], ref_outputs['response/logits']
kl = torch.sum(self.kl_loss(F.log_softmax(ref_logits, dim=-1), F.softmax(logits, dim=-1)), dim=-1)
loss = reduce_mean(lm_loss + self.kl_ctl.value * kl - self.entropy_ctl.value * entropy, masks)
data = {'logprobs': logprobs, 'ref_logprobs': ref_logprobs, 'masks': masks,
'logits': logits, 'ref_logits': ref_logits,
'lm_loss': reduce_mean(lm_loss, masks), 'kl_loss': reduce_mean(kl, masks),
'entropy': reduce_mean(entropy, masks), 'total_loss': loss}
stats = self.record_step_stats(data)
queries, responses = decode(self.policy.tokenizer, query_input_ids, response_input_ids)
self.print_samples(queries=queries, responses=responses, lm_loss=reduce_mean(lm_loss, masks, axis=1),
logprobs=logprobs, ref_logprobs=ref_logprobs, masks=masks, step=step)
return loss, stats
def record_step_stats(self, data):
masks = data['masks']
kl = torch.sum(self.kl_loss(F.log_softmax(data['ref_logits'], dim=-1), F.softmax(data['logits'], dim=-1)), dim=-1)
mean_kl = torch.mean(reduce_sum(kl, masks, axis=1))
mean_entropy = torch.mean(reduce_sum(-data['logprobs'], masks, axis=1))
stats = {
'objective/kl': mean_kl.item(),
'objective/entropy': mean_entropy.item(),
}
stats.update({
'loss/total': data['total_loss'].item(),
'loss/kl': data['kl_loss'].item(),
'loss/lm': data['lm_loss'].item(),
'loss/entropy': data['entropy'].item(),
})
return stats
def print_samples(self, queries, responses, lm_loss, logprobs, ref_logprobs, masks, step):
if step % self.params.log_interval != 0:
return
# Log samples
for i in range(min(3, len(queries))):
sample_kl = torch.sum((logprobs[i] - ref_logprobs[i]) * masks[i]).item()
print(queries[i] + responses[i])
print(f" lm_loss = {lm_loss[i].item():+.2f}")
print(f" kl = {sample_kl:+.2f}")
print(f" total = {lm_loss[i].item() + self.params.kl_coef * sample_kl:+.2f}")
def save(self, step):
if step < self.params.min_save_step or step > self.params.max_save_step or step % self.params.save_interval != 0:
return
torch.save({
'step': step,
'value_model': self.policy.value_model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'data_pool': self.data_pool.data_to_save(),
}, f'{self.params.model_dir}/ckp_{step}.pth')
log.info(f"[step {step}] model checkpoint saved")
def eval(self, step):
if step % self.params.eval_interval != 0:
return
log.info(f"[step {step}] evaluating ...")
concepts, prompts, uncons_gens, cons_gens = [], [], [], []
for i, batch in enumerate(tqdm(self.val_dataloader)):
input_ids, attention_mask, concept, constraints = batch
with torch.no_grad():
uncons_rollouts = self.policy.sample(input_ids=input_ids, attention_mask=attention_mask,
max_len=self.params.response_length, top_p=self.params.top_p,
use_control_code=(step > 0))
cons_rollouts = self.policy.sample(input_ids=input_ids, attention_mask=attention_mask,
constraints=constraints,
max_len=self.params.response_length, top_p=self.params.top_p,
use_control_code=(step > 0))
concepts.extend(concept)
prompts.extend(uncons_rollouts['query/text'])
uncons_gens.extend(uncons_rollouts['response/text'])
cons_gens.extend(cons_rollouts['response/text'])
for eval_name, gens in [('unconstrained', uncons_gens), ('constrained', cons_gens)]:
print(f" {eval_name.capitalize()} evaluation: ")
score_dict = self.score_model.get_reward(prompts, gens, concepts, f'step{step}_eval_{eval_name}')
for name, scores in score_dict.items():
metric_score = np.mean(scores)
print(f" {name} = {metric_score:+.2f}")
self.writer.add_scalar(f'{eval_name.capitalize()}_eval/{name}', metric_score, step)
def main():
args = get_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
num_gpus = torch.cuda.device_count()
log.info(f'Detect {num_gpus} GPUS')
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if args.resume is None:
date_time = datetime.now().strftime("%m-%d-%Y_%H:%M:%S")
args.save_dir = os.path.join(args.output_dir, date_time)
else:
sub_dirs = args.resume.split(os.sep)
date_time, args.output_dir, args.save_dir = sub_dirs[-3], os.sep.join(sub_dirs[:-3]), os.sep.join(sub_dirs[:-2])
args.reward_dir = os.path.join(args.save_dir, 'reward')
args.model_dir = os.path.join(args.save_dir, 'model')
args.tensorboard_dir = os.path.join(args.output_dir, 'tensorboard', date_time)
log.info(f'Write to output directory: {args.save_dir}')
if args.resume is None:
for d in [args.output_dir, args.save_dir, args.reward_dir, args.model_dir, args.tensorboard_dir]:
ensure_dir(d)
with open(os.path.join(args.save_dir, 'args.json'), 'w') as f:
json.dump(args.__dict__, f, indent=2)
tree_tokens = [' _TREE_TOKEN_{}'.format(str(idx).zfill(5)) for idx in range(args.n_extra_tokens)]
log.info(f'Initializing models ...')
policy_checkpoint = torch.load(args.base_model_checkpoint, map_location='cpu')['policy_model']
policy = Policy(base_model_name=args.base_model_name, base_model_checkpoint=policy_checkpoint,
value_model_name=args.value_model_name, device=device, tree_tokens=tree_tokens, alpha=args.alpha,
calibrate=args.gpt3_calibrate, force_eos=args.force_eos)
reward = Reward(save_path=args.reward_dir, batch_size=args.reward_batch_size, device=num_gpus - 1, params=args)
data_pool = DataPool(tree_tokens=tree_tokens, n_extra_tokens=args.n_extra_tokens)
log.info(f'Initialization done!')
prompt_collator = PromptCollator(tokenizer=policy.tokenizer)
train_dataset = PromptDataset(path=args.dataset_train, tokenizer=policy.tokenizer)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True, collate_fn=prompt_collator)
log.info(f'Load train set with {len(train_dataset)} examples')
val_dataset = PromptDataset(path=args.dataset_val, tokenizer=policy.tokenizer)
val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, collate_fn=prompt_collator)
log.info(f'Load val set with {len(val_dataset)} examples')
# set up optimizer and scheduler
optimizer = Adam(policy.value_model.parameters(), lr=args.lr, eps=1e-5) | args.total_steps = ceil_div(args.total_episodes, args.batch_size) | 5 | 2023-10-20 08:30:18+00:00 | 12k |
violet-sto/HN-GFN | main.py | [
{
"identifier": "Dataset",
"path": "dataset.py",
"snippet": "class Dataset:\n\n def __init__(self, args, bpath, oracle, device):\n self.test_split_rng = np.random.RandomState(142857)\n self.train_rng = np.random.RandomState(int(time.time()))\n self.train_mols = []\n self.test_mols = []\n self.all_mols = []\n self.train_mols_map = {}\n\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(device, args.proxy_repr_type, include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n if args.floatX == 'float64':\n self.mdp.floatX = torch.double\n else:\n self.mdp.floatX = torch.float\n self.mdp._cue_max_blocks = args.max_blocks\n self.max_blocks = args.max_blocks\n self.oracle = oracle\n self._device = device\n self.seen_molecules = set()\n self.stop_event = threading.Event()\n\n self.target_norm = [-8.6, 1.10] # for dockerscore\n\n self.hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives)))\n\n def load_h5(self, path, test_ratio=0.1, num_init_examples=None):\n import json\n columns = [\"smiles\", \"dockscore\",\"blockidxs\", \"slices\", \"jbonds\", \"stems\"]\n store = pd.HDFStore(path, 'r')\n df = store.select('df')\n # Pandas has problem with calculating some stuff on float16\n df.dockscore = df.dockscore.astype(\"float64\")\n for cl_mame in columns[2:]:\n df.loc[:, cl_mame] = df[cl_mame].apply(json.loads)\n\n test_idxs = self.test_split_rng.choice(\n len(df), int(test_ratio * len(df)), replace=False)\n\n split_bool = np.zeros(len(df), dtype=np.bool)\n split_bool[test_idxs] = True\n self.scores = []\n self.smis = []\n for i in tqdm(range(len(df))):\n m = BlockMoleculeDataExtended()\n for c in range(1, len(columns)):\n setattr(m, columns[c], df.iloc[i, c - 1])\n m.blocks = [self.mdp.block_mols[i] for i in m.blockidxs]\n if len(m.blocks) > self.max_blocks:\n continue\n m.numblocks = len(m.blocks)\n m.score = self.oracle.get_score([m])\n self.scores.append(m.score)\n self.smis.append(m.smiles)\n self.all_mols.append(m)\n if split_bool[i]: \n self.test_mols.append(m)\n else:\n self.train_mols.append(m)\n if len(self.train_mols)+len(self.test_mols) >= num_init_examples:\n break\n store.close()\n\n print(\"Sampling initial {} molecules from all {} molecules...\".format(\n num_init_examples, len(split_bool)))\n print(len(self.train_mols), 'train mols')\n print(len(self.test_mols), 'test mols')\n\n def r2r(self, dockscore=None, normscore=None):\n if dockscore is not None:\n normscore = 4-(min(0, dockscore) -\n self.target_norm[0])/self.target_norm[1]\n normscore = max(0.1, normscore)\n return (normscore/1) ** 1\n\n def _get(self, i, dset):\n return [(dset[i], dset[i].score)]\n\n def sample(self, n):\n eidx = np.random.randint(0, len(self.train_mols), n)\n samples = sum((self._get(i, self.train_mols) for i in eidx), [])\n\n return zip(*samples)\n\n def sample2batch(self, mb):\n s, r = mb\n s = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in s])\n r = torch.tensor(pd.DataFrame.from_dict(\n r).values, device=self._device).float()\n return (s, r)\n\n def iterset(self, n, mode):\n if mode == 'test':\n dset = self.test_mols\n elif mode == 'train':\n dset = self.train_mols\n\n N = len(dset)\n for i in range(int(np.ceil(N/n))):\n samples = sum((self._get(j, dset)\n for j in range(i*n, min(N, (i+1)*n))), [])\n yield self.sample2batch(zip(*samples))\n\n def add_samples(self, batch):\n picked_mols, scores, picked_smis = batch\n\n for m in picked_mols:\n if np.random.uniform() < (1/10):\n self.test_mols.append(m)\n else:\n self.train_mols.append(m)\n self.all_mols.append(m)\n \n self.scores += scores\n self.smis += [smis[-1] for smis in picked_smis]\n \n self.stop_event.clear()\n\n def compute_hypervolume(self):\n scores = torch.tensor(pd.DataFrame.from_dict(self.scores).values)\n volume = self.hypervolume.compute(scores)\n\n return volume\n \n def start_samplers(self, n, mbsize):\n self.ready_events = [threading.Event() for i in range(n)]\n self.resume_events = [threading.Event() for i in range(n)]\n self.results = [None] * n\n def f(idx):\n while not self.stop_event.is_set():\n try:\n self.results[idx] = self.sample2batch(self.sample(mbsize))\n except Exception as e:\n print(\"Exception while sampling:\")\n print(e)\n self.sampler_threads[idx].failed = True\n self.sampler_threads[idx].exception = e\n self.ready_events[idx].set()\n break\n self.ready_events[idx].set()\n self.resume_events[idx].clear()\n self.resume_events[idx].wait()\n self.sampler_threads = [threading.Thread(target=f, args=(i,)) for i in range(n)]\n [setattr(i, 'failed', False) for i in self.sampler_threads]\n [i.start() for i in self.sampler_threads]\n round_robin_idx = [0]\n def get():\n while True:\n idx = round_robin_idx[0]\n round_robin_idx[0] = (round_robin_idx[0] + 1) % n\n if self.ready_events[idx].is_set():\n r = self.results[idx]\n self.ready_events[idx].clear()\n self.resume_events[idx].set()\n return r\n elif round_robin_idx[0] == 0:\n time.sleep(0.001)\n return get\n\n def stop_samplers_and_join(self):\n self.stop_event.set()\n if hasattr(self, 'sampler_threads'):\n while any([i.is_alive() for i in self.sampler_threads]):\n [i.set() for i in self.resume_events]\n [i.join(0.05) for i in self.sampler_threads]"
},
{
"identifier": "MolMDPExtended",
"path": "mol_mdp_ext.py",
"snippet": "class MolMDPExtended(MolMDP):\n\n def build_translation_table(self):\n \"\"\"build a symmetry mapping for blocks. Necessary to compute parent transitions\"\"\"\n self.translation_table = {}\n for blockidx in range(len(self.block_mols)):\n # Blocks have multiple ways of being attached. By default,\n # a new block is attached to the target stem by attaching\n # it's kth atom, where k = block_rs[new_block_idx][0].\n # When computing a reverse action (from a parent), we may\n # wish to attach the new block to a different atom. In\n # the blocks library, there are duplicates of the same\n # block but with block_rs[block][0] set to a different\n # atom. Thus, for the reverse action we have to find out\n # which duplicate this corresponds to.\n\n # Here, we compute, for block blockidx, what is the index\n # of the duplicate block, if someone wants to attach to\n # atom x of the block.\n # So atom_map[x] == bidx, such that block_rs[bidx][0] == x\n atom_map = {}\n for j in range(len(self.block_mols)):\n if self.block_smi[blockidx] == self.block_smi[j]:\n atom_map[self.block_rs[j][0]] = j\n self.translation_table[blockidx] = atom_map\n\n # We're still missing some \"duplicates\", as some might be\n # symmetric versions of each other. For example, block CC with\n # block_rs == [0,1] has no duplicate, because the duplicate\n # with block_rs [1,0] would be a symmetric version (both C\n # atoms are the \"same\").\n\n # To test this, let's create nonsense molecules by attaching\n # duplicate blocks to a Gold atom, and testing whether they\n # are the same.\n gold = Chem.MolFromSmiles('[Au]')\n # If we find that two molecules are the same when attaching\n # them with two different atoms, then that means the atom\n # numbers are symmetries. We can add those to the table.\n for blockidx in range(len(self.block_mols)):\n for j in self.block_rs[blockidx]:\n if j not in self.translation_table[blockidx]:\n symmetric_duplicate = None\n for atom, block_duplicate in self.translation_table[blockidx].items():\n molA, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,j]],\n frags=[gold, self.block_mols[blockidx]])\n molB, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,atom]],\n frags=[gold, self.block_mols[blockidx]])\n if (Chem.MolToSmiles(molA) == Chem.MolToSmiles(molB) or\n molA.HasSubstructMatch(molB)):\n symmetric_duplicate = block_duplicate\n break\n if symmetric_duplicate is None:\n raise ValueError('block', blockidx, self.block_smi[blockidx],\n 'has no duplicate for atom', j,\n 'in position 0, and no symmetrical correspondance')\n self.translation_table[blockidx][j] = symmetric_duplicate\n #print('block', blockidx, '+ atom', j,\n # 'in position 0 is a symmetric duplicate of',\n # symmetric_duplicate)\n\n def parents(self, mol=None):\n \"\"\"returns all the possible parents of molecule mol (or the current\n molecule if mol is None.\n\n Returns a list of (BlockMoleculeDataExtended, (block_idx, stem_idx)) pairs such that\n for a pair (m, (b, s)), MolMDPExtended.add_block_to(m, b, s) == mol.\n \"\"\"\n if len(mol.blockidxs) == 1:\n # If there's just a single block, then the only parent is\n # the empty block with the action that recreates that block\n return [(BlockMoleculeDataExtended(), (mol.blockidxs[0], 0))]\n\n # Compute the how many blocks each block is connected to\n blocks_degree = defaultdict(int)\n for a,b,_,_ in mol.jbonds:\n blocks_degree[a] += 1\n blocks_degree[b] += 1\n # Keep only blocks of degree 1 (those are the ones that could\n # have just been added)\n blocks_degree_1 = [i for i, d in blocks_degree.items() if d == 1]\n # Form new molecules without these blocks\n parent_mols = []\n\n for rblockidx in blocks_degree_1:\n new_mol = mol.copy()\n # find which bond we're removing\n removed_bonds = [(jbidx, bond) for jbidx, bond in enumerate(new_mol.jbonds)\n if rblockidx in bond[:2]]\n assert len(removed_bonds) == 1\n rjbidx, rbond = removed_bonds[0]\n # Pop the bond\n new_mol.jbonds.pop(rjbidx)\n # Remove the block\n mask = np.ones(len(new_mol.blockidxs), dtype=np.bool)\n mask[rblockidx] = 0\n reindex = new_mol.delete_blocks(mask)\n # reindex maps old blockidx to new blockidx, since the\n # block the removed block was attached to might have its\n # index shifted by 1.\n\n # Compute which stem the bond was using\n stem = ([reindex[rbond[0]], rbond[2]] if rblockidx == rbond[1] else\n [reindex[rbond[1]], rbond[3]])\n # and add it back\n new_mol.stems = [list(i) for i in new_mol.stems] + [stem]\n #new_mol.stems.append(stem)\n # and we have a parent. The stem idx to recreate mol is\n # the last stem, since we appended `stem` in the back of\n # the stem list.\n # We also have to translate the block id to match the bond\n # we broke, see build_translation_table().\n removed_stem_atom = (\n rbond[3] if rblockidx == rbond[1] else rbond[2])\n blockid = mol.blockidxs[rblockidx]\n if removed_stem_atom not in self.translation_table[blockid]:\n raise ValueError('Could not translate removed stem to duplicate or symmetric block.')\n parent_mols.append([new_mol,\n # action = (block_idx, stem_idx)\n (self.translation_table[blockid][removed_stem_atom],\n len(new_mol.stems) - 1)])\n if not len(parent_mols):\n raise ValueError('Could not find any parents')\n return parent_mols\n\n\n def add_block_to(self, mol, block_idx, stem_idx=None, atmidx=None):\n '''out-of-place version of add_block'''\n #assert (block_idx >= 0) and (block_idx <= len(self.block_mols)), \"unknown block\"\n if mol.numblocks == 0:\n stem_idx = None\n new_mol = mol.copy()\n new_mol.add_block(block_idx,\n block=self.block_mols[block_idx],\n block_r=self.block_rs[block_idx],\n stem_idx=stem_idx, atmidx=atmidx)\n return new_mol\n\n def remove_jbond_from(self, mol, jbond_idx=None, atmidx=None):\n new_mol = mol.copy()\n new_mol.remove_jbond(jbond_idx, atmidx)\n return new_mol\n\n def a2mol(self, acts):\n mol = BlockMoleculeDataExtended()\n for i in acts:\n if i[0] >= 0:\n mol = self.add_block_to(mol, *i)\n return mol\n\n def reset(self):\n self.molecule = BlockMoleculeDataExtended()\n return None\n\n\n def post_init(self, device, repr_type, include_bonds=False, include_nblocks=False):\n self.device = device\n self.repr_type = repr_type\n #self.max_bond_atmidx = max([max(i) for i in self.block_rs])\n self.max_num_atm = max(self.block_natm)\n # see model_block.mol2graph\n self.true_block_set = sorted(set(self.block_smi))\n self.stem_type_offset = np.int32([0] + list(np.cumsum([\n max(self.block_rs[self.block_smi.index(i)])+1 for i in self.true_block_set])))\n self.num_stem_types = self.stem_type_offset[-1]\n self.true_blockidx = [self.true_block_set.index(i) for i in self.block_smi]\n self.num_true_blocks = len(self.true_block_set)\n self.include_nblocks = include_nblocks\n self.include_bonds = include_bonds\n #print(self.max_num_atm, self.num_stem_types)\n self.molcache = {}\n\n def mols2batch(self, mols):\n if self.repr_type == 'block_graph':\n return model_block.mols2batch(mols, self)\n elif self.repr_type == 'atom_graph':\n return model_atom.mols2batch(mols, self)\n elif self.repr_type == 'morgan_fingerprint':\n return model_fingerprint.mols2batch(mols, self)\n\n def mol2repr(self, mol=None):\n if mol is None:\n mol = self.molecule\n #molhash = str(mol.blockidxs)+':'+str(mol.stems)+':'+str(mol.jbonds)\n #if molhash in self.molcache:\n # return self.molcache[molhash]\n if self.repr_type == 'block_graph':\n r = model_block.mol2graph(mol, self, self.floatX)\n elif self.repr_type == 'atom_graph':\n r = model_atom.mol2graph(mol, self, self.floatX,\n bonds=self.include_bonds,\n nblocks=self.include_nblocks)\n elif self.repr_type == 'morgan_fingerprint':\n r = model_fingerprint.mol2fp(mol, self, self.floatX)\n #self.molcache[molhash] = r\n return r\n\n def get_nx_graph(self, mol: BlockMoleculeData, true_block=False):\n true_blockidx = self.true_blockidx\n\n G = nx.DiGraph()\n blockidxs = [true_blockidx[xx] for xx in mol.blockidxs] if true_block else mol.blockidxs\n\n G.add_nodes_from([(ix, {\"block\": blockidxs[ix]}) for ix in range(len(blockidxs))])\n\n if len(mol.jbonds) > 0:\n edges = []\n for jbond in mol.jbonds:\n edges.append((jbond[0], jbond[1],\n {\"bond\": [jbond[2], jbond[3]]}))\n edges.append((jbond[1], jbond[0],\n {\"bond\": [jbond[3], jbond[2]]}))\n G.add_edges_from(edges)\n return G\n\n def graphs_are_isomorphic(self, g1, g2):\n return nx.algorithms.is_isomorphic(g1, g2, node_match=node_match, edge_match=edge_match)"
},
{
"identifier": "BlockMoleculeDataExtended",
"path": "mol_mdp_ext.py",
"snippet": "class BlockMoleculeDataExtended(BlockMoleculeData):\n\n @property\n def mol(self):\n return chem.mol_from_frag(jun_bonds=self.jbonds, frags=self.blocks)[0]\n\n @property\n def smiles(self):\n return Chem.MolToSmiles(self.mol)\n\n def copy(self): # shallow copy\n o = BlockMoleculeDataExtended()\n o.blockidxs = list(self.blockidxs)\n o.blocks = list(self.blocks)\n o.slices = list(self.slices)\n o.numblocks = self.numblocks\n o.jbonds = list(self.jbonds)\n o.stems = list(self.stems)\n return o\n\n def as_dict(self):\n return {'blockidxs': self.blockidxs,\n 'slices': self.slices,\n 'numblocks': self.numblocks,\n 'jbonds': self.jbonds,\n 'stems': self.stems}"
},
{
"identifier": "Oracle",
"path": "oracle/oracle.py",
"snippet": "class Oracle():\n def __init__(self, args, mols_ref=None):\n '''\n @params:\n args (dict): argsurations\n '''\n self.objectives = args.objectives\n self.fps_ref = [AllChem.GetMorganFingerprintAsBitVect(x, 3, 2048) \n for x in mols_ref] if mols_ref else None\n self.device = torch.device(args.device)\n\n def batch_get_scores(self, mols):\n '''\n @params:\n mols: molecules to estimate score\n @return:\n dicts (list): list of score dictionaries\n '''\n dicts = [{} for _ in mols]\n for obj in self.objectives:\n scores = get_scores(obj, mols, device=self.device)\n for i, mol in enumerate(mols):\n dicts[i][obj] = scores[i]\n return dicts\n \n def get_score(self, mol):\n scores = {}\n for obj in self.objectives:\n score = get_scores(obj, mol, device=self.device)\n scores[obj] = score[0]\n \n return scores"
},
{
"identifier": "get_proxy",
"path": "proxy/proxy.py",
"snippet": "def get_proxy(args, bpath, oracle):\n if args.acq_fn.lower() == 'none':\n return NoAF(args, bpath, oracle)\n\n elif args.acq_fn.lower() == 'ucb':\n return UCB(args, bpath, oracle)\n \n elif args.acq_fn.lower() == 'ucb_chebyshev':\n return UCB_chebyshev(args, bpath, oracle)\n\n elif args.acq_fn.lower() == 'ei':\n return EI(args, bpath, oracle)"
},
{
"identifier": "FMGFlowNet",
"path": "generator/gfn.py",
"snippet": "class FMGFlowNet(nn.Module):\n def __init__(self, args, bpath):\n super().__init__()\n self.args = args\n mdp = MolMDPExtended(bpath)\n mdp.post_init(args.device, args.repr_type,\n include_nblocks=args.include_nblocks)\n mdp.build_translation_table()\n self.model = make_model(args, mdp, is_proxy=False)\n self.opt = torch.optim.Adam(self.model.parameters(\n ), args.learning_rate, weight_decay=args.weight_decay)\n\n self.loginf = 1000 # to prevent nans\n self.log_reg_c = args.log_reg_c\n self.balanced_loss = args.balanced_loss\n self.do_nblocks_reg = False\n self.max_blocks = args.max_blocks\n self.leaf_coef = args.leaf_coef\n self.clip_grad = args.clip_grad\n # self.score_criterion = nn.MSELoss(reduction='none')\n self.score_criterion = nn.MSELoss()\n\n def forward(self, graph_data, vec_data=None, do_stems=True):\n return self.model(graph_data, vec_data, do_stems)\n\n def train_step(self, p, pb, a, pw, w, r, s, d, mols, i):\n loss, term_loss, flow_loss = self.FMLoss(p, pb, a, pw, w, r, s, d)\n\n self.opt.zero_grad()\n loss.backward()\n if self.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.clip_grad)\n self.opt.step()\n self.model.training_steps = i+1\n \n return (loss.item(), term_loss.item(), flow_loss.item())\n\n def FMLoss(self, p, pb, a, pw, w, r, s, d):\n # Since we sampled 'mbsize' trajectories, we're going to get\n # roughly mbsize * H (H is variable) transitions\n ntransitions = r.shape[0]\n # state outputs\n stem_out_s, mol_out_s = self.model(s, w) # log(F)\n # parents of the state outputs\n stem_out_p, mol_out_p = self.model(p, pw)\n # index parents by their corresponding actions\n qsa_p = self.model.index_output_by_action(\n p, stem_out_p, mol_out_p[:, 0], a)\n # then sum the parents' contribution, this is the inflow\n exp_inflow = (torch.zeros((ntransitions,), device=qsa_p.device, dtype=qsa_p.dtype)\n .index_add_(0, pb, torch.exp(qsa_p))) # pb is the parents' batch index\n inflow = torch.log(exp_inflow + self.log_reg_c)\n # sum the state's Q(s,a), this is the outflow\n exp_outflow = self.model.sum_output(s, torch.exp(\n stem_out_s), torch.exp(mol_out_s[:, 0]))\n # include reward and done multiplier, then take the log\n # we're guarenteed that r > 0 iff d = 1, so the log always works\n outflow_plus_r = torch.log(self.log_reg_c + r + exp_outflow * (1-d))\n if self.do_nblocks_reg:\n losses = _losses = ((inflow - outflow_plus_r) /\n (s.nblocks * self.max_blocks)).pow(2)\n else:\n losses = _losses = (inflow - outflow_plus_r).pow(2)\n\n term_loss = (losses * d).sum() / (d.sum() + 1e-20) # terminal nodes\n flow_loss = (losses * (1-d)).sum() / \\\n ((1-d).sum() + 1e-20) # non-terminal nodes\n \n if self.balanced_loss:\n loss = term_loss * self.leaf_coef + flow_loss\n else:\n loss = losses.mean()\n\n return loss, term_loss, flow_loss"
},
{
"identifier": "TBGFlowNet",
"path": "generator/gfn.py",
"snippet": "class TBGFlowNet(nn.Module):\n def __init__(self, args, bpath):\n super().__init__()\n self.args = args\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(args.device, args.repr_type,\n include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n self.model = make_model(args, self.mdp, is_proxy=False)\n self.Z = nn.Sequential(nn.Linear(len(args.objectives), args.nemb//2), nn.LeakyReLU(),\n nn.Linear(args.nemb//2, 1))\n self.Z.to(args.device)\n self.opt = torch.optim.Adam(self.model.parameters(), args.learning_rate, weight_decay=args.weight_decay)\n self.opt_Z = torch.optim.Adam(self.Z.parameters(), args.Z_learning_rate, weight_decay=args.weight_decay)\n\n def forward(self, graph_data, vec_data=None, do_stems=True):\n return self.model(graph_data, vec_data, do_stems)\n\n def train_step(self, p, pb, a, pw, w, r, s, d, mols, i):\n loss = self.TBLoss(p, a, w, r, d, mols)\n self.opt.zero_grad()\n self.opt_Z.zero_grad()\n loss.backward()\n if self.args.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.args.clip_grad)\n self.opt.step()\n self.opt_Z.step()\n\n return (loss.item(),)\n\n @property\n def Z(self):\n return self.model.Z\n\n def TBLoss(self, p, a, w, r, d, mols):\n # logit\n stem_out_p, mol_out_p = self.model(p, w)\n # index parents by their corresponding actions\n logits = -self.model.action_negloglikelihood(\n p, a, stem_out_p, mol_out_p)\n\n b = torch.cat([torch.tensor([0], device=logits.device),\n torch.cumsum(d.long(), 0)[:-1]], dim=0)\n n = torch.tensor([len(self.mdp.parents(mol)) if a[idx, 0].item() != -1 else 1.\n for idx, mol in enumerate(mols[1])], device=logits.device)\n # n = torch.tensor([len(self.mdp.parents(mol)) for mol in mols[1]], device=logits.device)\n forward_ll = scatter(logits, b, reduce='sum')\n backward_ll = scatter(torch.log(1/n), b, reduce='sum')\n\n losses = ((self.Z(w[d==1.]) + forward_ll) - (torch.log(r[d == 1.]) + backward_ll)).pow(2) \n loss = losses.mean()\n\n return loss"
},
{
"identifier": "circle_points",
"path": "utils/metrics.py",
"snippet": "def circle_points(K, min_angle=None, max_angle=None):\n # generate evenly distributed preference vector\n ang0 = 1e-6 if min_angle is None else min_angle\n ang1 = np.pi / 2 - ang0 if max_angle is None else max_angle\n angles = np.linspace(ang0, ang1, K, endpoint=True)\n x = np.cos(angles)\n y = np.sin(angles)\n weights = np.c_[x, y]\n normalized_weights = weights/weights.sum(1, keepdims=True)\n\n return normalized_weights.astype(np.float32)"
},
{
"identifier": "compute_success",
"path": "utils/metrics.py",
"snippet": "def compute_success(mols, scores, objectives, score_succ):\n print(\"Computing successful rate...\")\n positive_mols = []\n success_dict = {k: 0. for k in objectives}\n\n for mol, score in zip(mols, scores):\n all_success = True\n for k, v in score.items():\n if v >= score_succ[k]:\n success_dict[k] += 1\n else:\n all_success = False\n if all_success:\n positive_mols.append(mol)\n\n success = 1.*len(positive_mols)/len(mols)\n\n return success, positive_mols"
},
{
"identifier": "compute_diversity",
"path": "utils/metrics.py",
"snippet": "def compute_diversity(mols):\n print(\"Computing diversity...\")\n\n if len(mols) == 0:\n return 0\n\n sims = []\n fps = [AllChem.GetMorganFingerprintAsBitVect(x.mol, 3, 2048) for x in mols]\n for i in range(len(fps)):\n sims += DataStructs.BulkTanimotoSimilarity(fps[i], fps[:i])\n\n return 1 - np.mean(sims)"
},
{
"identifier": "compute_novelty",
"path": "utils/metrics.py",
"snippet": "def compute_novelty(mols, ref_mols):\n print(\"Computing novelty...\")\n positive_fps = [AllChem.GetMorganFingerprintAsBitVect(\n x.mol, 3, 2048) for x in mols]\n ref_fps = [AllChem.GetMorganFingerprintAsBitVect(\n x, 3, 2048) for x in ref_mols]\n\n n_sim = 0.\n for i in range(len(positive_fps)):\n sims = DataStructs.BulkTanimotoSimilarity(positive_fps[i], ref_fps)\n if max(sims) >= 0.4:\n n_sim += 1\n novelty = 1. - 1. * n_sim / (len(positive_fps)+1e-6)\n\n return novelty"
},
{
"identifier": "evaluate",
"path": "utils/metrics.py",
"snippet": "def evaluate(args, generator, rollout_worker, k):\n time_start = time.time()\n print(f\"Sampling molecules and evaluating...\")\n test_weights = rollout_worker.test_weights\n picked_mols = []\n all_scores = []\n # top_scores = []\n top_scores = defaultdict(list)\n mean_scores = []\n hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives)))\n \n for weights in test_weights:\n sampled_mols = []\n rewards = []\n scores = []\n for i in range(args.num_samples):\n rollout_worker.rollout(\n generator, use_rand_policy=False, weights=weights.unsqueeze(0))\n (raw_r, _, m, _, _) = rollout_worker.sampled_mols[-1]\n sampled_mols.append(m)\n rewards.append(raw_r[0])\n scores.append(raw_r[1])\n\n idx_pick = np.argsort(rewards)[::-1][:k] \n picked_mols += np.array(sampled_mols)[idx_pick].tolist()\n top_rewards = np.array(rewards)[idx_pick]\n mean_scores.append(np.array(scores).mean(0))\n \n picked_scores = np.array(scores)[idx_pick]\n weight_specific_volume = hypervolume.compute(torch.tensor(picked_scores))\n print(f'Hypervolume w.r.t test weights {weights}: {weight_specific_volume}')\n \n for K in [10, 100]:\n scores_np = np.array(scores)\n top_scores_weight = [scores_np[np.argsort(scores_np[:,i])[::-1][:K], i].mean() for i in range(len(args.objectives))]\n top_scores[K].append(top_scores_weight)\n print(f'Top {K} scores w.r.t test weights {weights}: {top_scores_weight}')\n \n all_scores += scores\n print('Top_rewards: {}'.format(top_rewards.mean())) # Top-100 rewards\n \n volume = hypervolume.compute(torch.tensor(all_scores))\n diversity = compute_diversity(picked_mols) # Top-100\n\n print('Hypervolume: {}, Diversity: {}, Time: {}'.format(\n volume, diversity, time.time()-time_start))\n\n return volume, diversity"
},
{
"identifier": "compute_correlation",
"path": "utils/metrics.py",
"snippet": "def compute_correlation(args, model, rollout_worker, test_mols):\n\n mdp = rollout_worker.mdp\n device = args.device\n def tf(x): return torch.tensor(x, device=device).to(torch.float)\n def tint(x): return torch.tensor(x, device=device).long()\n\n # test_mols = pickle.load(gzip.open('data/some_mols_U_1k.pkl.gz'))\n logsoftmax = nn.LogSoftmax(0)\n corrs = []\n numblocks = []\n\n start_time = time.time()\n if args.n_objectives == 3:\n test_weights = rollout_worker.test_weights[::2]\n elif args.n_objectives == 4:\n test_weights = rollout_worker.test_weights[1:-2:4]\n else:\n test_weights = rollout_worker.test_weights\n \n for weights in test_weights:\n print(\"Computing correlation w.r.t test weights {}\".format(weights))\n weights = torch.tensor(weights).to(args.device)\n logp = []\n rewards = []\n for m in tqdm(test_mols):\n try:\n agraph = get_mol_path_graph(m, mdp)\n except:\n continue\n # rewards.append(np.log(moli[0][0]))\n reward = rollout_worker._get_reward(m, weights)[0].item()\n rewards.append(np.log(reward))\n s = mdp.mols2batch([mdp.mol2repr(agraph.nodes[i]['mol'])\n for i in agraph.nodes])\n numblocks.append(len(m.blocks))\n with torch.no_grad():\n # get the mols_out_s for ALL molecules not just the end one.\n if args.condition_type == 'Hyper_scorepred':\n stem_out_s, mol_out_s, _ = model(\n s, weights.repeat(s.num_graphs, 1))\n else:\n stem_out_s, mol_out_s = model(\n s, weights.repeat(s.num_graphs, 1))\n per_mol_out = []\n # Compute pi(a|s)\n for j in range(len(agraph.nodes)):\n a, b = s._slice_dict['stems'][j:j+2]\n\n stop_allowed = len(\n agraph.nodes[j]['mol'].blocks) >= args.min_blocks\n mp = logsoftmax(torch.cat([\n stem_out_s[a:b].reshape(-1),\n # If num_blocks < min_blocks, the model is not allowed to stop\n mol_out_s[j, :1] if stop_allowed else tf([-1000])]))\n per_mol_out.append(\n (mp[:-1].reshape((-1, stem_out_s.shape[1])), mp[-1]))\n\n # When the model reaches 8 blocks, it is stopped automatically. If instead it stops before\n # that, we need to take into account the STOP action's logprob\n if len(m.blocks) < 8:\n if args.condition_type == 'Hyper_scorepred':\n stem_out_last, mol_out_last, _ = model(\n mdp.mols2batch([mdp.mol2repr(m)]), weights.unsqueeze(0))\n else:\n stem_out_last, mol_out_last = model(\n mdp.mols2batch([mdp.mol2repr(m)]), weights.unsqueeze(0)) \n mplast = logsoftmax(\n torch.cat([stem_out_last.reshape(-1), mol_out_last[0, :1]]))\n MSTOP = mplast[-1]\n\n # assign logprob to edges\n for u, v in agraph.edges:\n a = agraph.edges[u, v]['action']\n if a[0] == -1:\n agraph.edges[u, v]['logprob'] = per_mol_out[v][1]\n else:\n agraph.edges[u,\n v]['logprob'] = per_mol_out[v][0][a[1], a[0]]\n\n # propagate logprobs through the graph\n for n in list(nx.topological_sort(agraph))[::-1]:\n for c in agraph.predecessors(n):\n if len(m.blocks) < 8 and c == 0:\n agraph.nodes[c]['logprob'] = torch.logaddexp(\n agraph.nodes[c].get('logprob', tf(-1000)),\n agraph.edges[c, n]['logprob'] + agraph.nodes[n].get('logprob', 0) + MSTOP)\n else:\n agraph.nodes[c]['logprob'] = torch.logaddexp(\n agraph.nodes[c].get('logprob', tf(-1000)),\n agraph.edges[c, n]['logprob'] + agraph.nodes[n].get('logprob', 0))\n\n # add the first item\n # logp.append((moli, agraph.nodes[n]['logprob'].item()))\n logp.append(agraph.nodes[n]['logprob'].item())\n corrs.append(stats.spearmanr(rewards, logp).correlation)\n\n print('Spearmanr: {}, mean: {}, Time: {}'.format(corrs, np.mean(corrs), time.time()-start_time))\n return corrs"
},
{
"identifier": "set_random_seed",
"path": "utils/utils.py",
"snippet": "def set_random_seed(seed, deterministic=True):\n \"\"\"Set random seed.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False"
},
{
"identifier": "get_logger",
"path": "utils/logging.py",
"snippet": "def get_logger(args):\n if args.enable_tensorboard:\n return TensorboardLogger(args)\n else:\n return Logger(args)"
}
] | from curses import raw
from dataset import Dataset
from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended
from oracle.oracle import Oracle
from proxy import get_proxy
from generator import TBGFlowNet, FMGFlowNet
from utils.metrics import circle_points, compute_success, compute_diversity, compute_novelty, evaluate, compute_correlation
from utils.utils import set_random_seed
from utils.logging import get_logger
from datetime import datetime
from botorch.utils.multi_objective.hypervolume import Hypervolume
from botorch.utils.sampling import sample_simplex
from botorch.utils.transforms import normalize, unnormalize
from torch.distributions.dirichlet import Dirichlet
from rdkit.Chem import AllChem
from rdkit import DataStructs
from pymoo.util.ref_dirs import get_reference_directions
import os
import argparse
import json
import time
import threading
import pdb
import pickle
import gzip
import warnings
import torch.multiprocessing as mp
import torch.nn.functional as F
import torch
import pandas as pd
import numpy as np | 10,298 | warnings.filterwarnings('ignore')
def arg_parse():
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=str, default='cuda')
parser.add_argument('--seed', type=int, default=42, help='seed')
parser.add_argument("--run", default=0, help="run", type=int)
parser.add_argument('--save', action='store_true',
default=False, help='Save model.')
parser.add_argument('--debug', action='store_true',
default=False, help='debug mode, no multi thread')
parser.add_argument("--enable_tensorboard",
action='store_true', default=False)
parser.add_argument("--log_dir", default='runs/synthetic')
parser.add_argument("--include_nblocks", default=False)
parser.add_argument("--num_samples", default=1000, type=int)
parser.add_argument("--floatX", default='float32')
parser.add_argument('--sample_iterations', type=int, default=1000, help='sample mols and compute metrics')
# objectives
parser.add_argument("--objectives", type=str, default='gsk3b,jnk3')
parser.add_argument("--scalar", default='WeightedSum', type=str) #TODO: other scalars
parser.add_argument("--alpha", default=1., type=float,
help='dirichlet distribution')
parser.add_argument("--alpha_vector", default='1,1', type=str)
# GFlowNet
parser.add_argument("--min_blocks", default=2, type=int)
parser.add_argument("--max_blocks", default=8, type=int)
parser.add_argument("--num_iterations", default=30000, type=int) # 30k
parser.add_argument("--criterion", default="FM", type=str)
parser.add_argument("--learning_rate", default=5e-4,
help="Learning rate", type=float)
parser.add_argument("--Z_learning_rate", default=5e-3,
help="Learning rate", type=float)
parser.add_argument("--clip_grad", default=0, type=float)
parser.add_argument("--trajectories_mbsize", default=16, type=int)
parser.add_argument("--offline_mbsize", default=0, type=int)
parser.add_argument("--hindsight_mbsize", default=0, type=int)
parser.add_argument("--reward_min", default=1e-2, type=float)
parser.add_argument("--reward_norm", default=0.8, type=float)
parser.add_argument("--reward_exp", default=6, type=float)
parser.add_argument("--reward_exp_ramping", default=0, type=float)
# Hyperparameters for TB
parser.add_argument("--partition_init", default=30, type=float)
# Hyperparameters for FM
parser.add_argument("--log_reg_c", default=(0.1/8)
** 4, type=float) # (0.1/8)**8
parser.add_argument("--balanced_loss", default=True)
parser.add_argument("--leaf_coef", default=10, type=float)
# Architecture
parser.add_argument("--repr_type", default='block_graph')
parser.add_argument("--model_version", default='v4')
parser.add_argument("--condition_type", default='HN', type=str) # 'HN', 'FiLM', 'concat'
parser.add_argument("--num_conv_steps", default=10, type=int)
parser.add_argument("--nemb", default=256, help="#hidden", type=int)
parser.add_argument("--weight_decay", default=0, type=float)
parser.add_argument("--random_action_prob", default=0.05, type=float)
parser.add_argument("--bootstrap_tau", default=0, type=float)
parser.add_argument("--ray_hidden_dim", default=100, type=int)
parser.add_argument("--logit_clipping", default=0., type=float)
return parser.parse_args()
class RolloutWorker:
def __init__(self, args, bpath, proxy, device):
self.args = args
self.test_split_rng = np.random.RandomState(142857)
self.train_rng = np.random.RandomState(int(time.time()))
| warnings.filterwarnings('ignore')
def arg_parse():
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=str, default='cuda')
parser.add_argument('--seed', type=int, default=42, help='seed')
parser.add_argument("--run", default=0, help="run", type=int)
parser.add_argument('--save', action='store_true',
default=False, help='Save model.')
parser.add_argument('--debug', action='store_true',
default=False, help='debug mode, no multi thread')
parser.add_argument("--enable_tensorboard",
action='store_true', default=False)
parser.add_argument("--log_dir", default='runs/synthetic')
parser.add_argument("--include_nblocks", default=False)
parser.add_argument("--num_samples", default=1000, type=int)
parser.add_argument("--floatX", default='float32')
parser.add_argument('--sample_iterations', type=int, default=1000, help='sample mols and compute metrics')
# objectives
parser.add_argument("--objectives", type=str, default='gsk3b,jnk3')
parser.add_argument("--scalar", default='WeightedSum', type=str) #TODO: other scalars
parser.add_argument("--alpha", default=1., type=float,
help='dirichlet distribution')
parser.add_argument("--alpha_vector", default='1,1', type=str)
# GFlowNet
parser.add_argument("--min_blocks", default=2, type=int)
parser.add_argument("--max_blocks", default=8, type=int)
parser.add_argument("--num_iterations", default=30000, type=int) # 30k
parser.add_argument("--criterion", default="FM", type=str)
parser.add_argument("--learning_rate", default=5e-4,
help="Learning rate", type=float)
parser.add_argument("--Z_learning_rate", default=5e-3,
help="Learning rate", type=float)
parser.add_argument("--clip_grad", default=0, type=float)
parser.add_argument("--trajectories_mbsize", default=16, type=int)
parser.add_argument("--offline_mbsize", default=0, type=int)
parser.add_argument("--hindsight_mbsize", default=0, type=int)
parser.add_argument("--reward_min", default=1e-2, type=float)
parser.add_argument("--reward_norm", default=0.8, type=float)
parser.add_argument("--reward_exp", default=6, type=float)
parser.add_argument("--reward_exp_ramping", default=0, type=float)
# Hyperparameters for TB
parser.add_argument("--partition_init", default=30, type=float)
# Hyperparameters for FM
parser.add_argument("--log_reg_c", default=(0.1/8)
** 4, type=float) # (0.1/8)**8
parser.add_argument("--balanced_loss", default=True)
parser.add_argument("--leaf_coef", default=10, type=float)
# Architecture
parser.add_argument("--repr_type", default='block_graph')
parser.add_argument("--model_version", default='v4')
parser.add_argument("--condition_type", default='HN', type=str) # 'HN', 'FiLM', 'concat'
parser.add_argument("--num_conv_steps", default=10, type=int)
parser.add_argument("--nemb", default=256, help="#hidden", type=int)
parser.add_argument("--weight_decay", default=0, type=float)
parser.add_argument("--random_action_prob", default=0.05, type=float)
parser.add_argument("--bootstrap_tau", default=0, type=float)
parser.add_argument("--ray_hidden_dim", default=100, type=int)
parser.add_argument("--logit_clipping", default=0., type=float)
return parser.parse_args()
class RolloutWorker:
def __init__(self, args, bpath, proxy, device):
self.args = args
self.test_split_rng = np.random.RandomState(142857)
self.train_rng = np.random.RandomState(int(time.time())) | self.mdp = MolMDPExtended(bpath) | 1 | 2023-10-24 14:10:35+00:00 | 12k |
SALT-NLP/Efficient_Unlearning | src/models/transformers/parameter-efficient-finetuning/layer.py | [
{
"identifier": "AdapterCompositionBlock",
"path": "src/models/transformers/parameter-efficient-finetuning/composition.py",
"snippet": "class AdapterCompositionBlock(Sequence):\n def __init__(self, *children):\n self.children = [parse_composition(b, None) for b in children]\n\n def __getitem__(self, key):\n return self.children[key]\n\n def __len__(self):\n return len(self.children)\n\n def __eq__(self, o: object) -> bool:\n if isinstance(o, type(self)):\n return all([c1 == c2 for c1, c2 in zip(self.children, o.children)])\n else:\n return False\n\n def __repr__(self):\n child_repr = \", \".join(map(str, self.children))\n return f\"{self.__class__.__name__}[{child_repr}]\"\n\n def first(self):\n if not isinstance(self.children[0], AdapterCompositionBlock):\n return self.children[0]\n else:\n return self.children[0].first()\n\n def last(self):\n if not isinstance(self.children[-1], AdapterCompositionBlock):\n return self.children[-1]\n else:\n return self.children[-1].last()\n\n @property\n def parallel_channels(self):\n return max([b.parallel_channels if isinstance(b, AdapterCompositionBlock) else 1 for b in self.children])\n\n def flatten(self) -> Set[str]:\n return set(itertools.chain(*[[b] if isinstance(b, str) else b.flatten() for b in self.children]))"
},
{
"identifier": "BatchSplit",
"path": "src/models/transformers/parameter-efficient-finetuning/composition.py",
"snippet": "class BatchSplit(AdapterCompositionBlock):\n def __init__(self, *split_adapters: List[Union[AdapterCompositionBlock, str]], batch_sizes: Union[List[int], int]):\n super().__init__(*split_adapters)\n self.batch_sizes = batch_sizes if isinstance(batch_sizes, list) else [batch_sizes] * len(split_adapters)"
},
{
"identifier": "Fuse",
"path": "src/models/transformers/parameter-efficient-finetuning/composition.py",
"snippet": "class Fuse(AdapterCompositionBlock):\n def __init__(self, *fuse_stacks: List[Union[AdapterCompositionBlock, str]]):\n super().__init__(*fuse_stacks)\n\n # TODO-V2 pull this up to all block classes?\n @property\n def name(self):\n return \",\".join([c if isinstance(c, str) else c.last() for c in self.children])"
},
{
"identifier": "Parallel",
"path": "src/models/transformers/parameter-efficient-finetuning/composition.py",
"snippet": "class Parallel(AdapterCompositionBlock):\n def __init__(self, *parallel_adapters: List[str]):\n \"\"\"\n Can be used to perform inference for multiple tasks (i.e., adapters) in parallel (for the same input).\n\n See AdapterDrop https://arxiv.org/abs/2010.11918\n \"\"\"\n super().__init__(*parallel_adapters)\n\n @property\n def parallel_channels(self):\n return len(self.children)"
},
{
"identifier": "Split",
"path": "src/models/transformers/parameter-efficient-finetuning/composition.py",
"snippet": "class Split(AdapterCompositionBlock):\n def __init__(self, left: str, right: str, split_index: int):\n super().__init__(left, right)\n assert split_index > 0\n self.left = left\n self.right = right\n self.split_index = split_index"
},
{
"identifier": "Stack",
"path": "src/models/transformers/parameter-efficient-finetuning/composition.py",
"snippet": "class Stack(AdapterCompositionBlock):\n def __init__(self, *stack_layers: List[Union[AdapterCompositionBlock, str]]):\n super().__init__(*stack_layers)"
},
{
"identifier": "AdapterConfig",
"path": "src/models/transformers/parameter-efficient-finetuning/configuration.py",
"snippet": "class AdapterConfig(AdapterConfigBase):\n \"\"\"\n Base class that models the architecture of an adapter.\n\n Args:\n mh_adapter (:obj:`bool`): If True, add adapter modules after the multi-head attention block of each layer.\n output_adapter (:obj:`bool`): If True, add adapter modules after the output FFN of each layer.\n reduction_factor (:obj:`float` or :obj:`Mapping`):\n Either a scalar float (> 0) specifying the reduction factor for all layers or a mapping specifying the\n reduction_factor for individual layers. If not all layers are represented in the mapping a default value\n should be given e.g. {'1': 8, '6': 32, 'default': 16}. Specifying a reduction factor < 1 will result in an\n up-projection layer.\n non_linearity (:obj:`str`): The activation function to use in the adapter bottleneck.\n original_ln_before (:obj:`bool`, optional):\n If True, apply layer pre-trained normalization and residual connection before the adapter modules. Defaults\n to False. Only applicable if :obj:`is_parallel` is False.\n original_ln_after (:obj:`bool`, optional):\n If True, apply pre-trained layer normalization and residual connection after the adapter modules. Defaults\n to True.\n ln_before (:obj:`bool`, optional): If True, add a new layer normalization before the adapter bottleneck.\n Defaults to False.\n ln_after (:obj:`bool`, optional): If True, add a new layer normalization after the adapter bottleneck.\n Defaults to False.\n init_weights (:obj:`str`, optional): Initialization method for the weights of the adapter modules.\n Currently, this can be either \"bert\" (default) or \"mam_adapter\".\n is_parallel (:obj:`bool`, optional): If True, apply adapter transformations in parallel.\n By default (False), sequential application is used.\n scaling (:obj:`float` or :obj:`str`, optional):\n Scaling factor to use for scaled addition of adapter outputs as done by He et al. (2021). Can bei either a\n constant factor (float) or the string \"learned\", in which case the scaling factor is learned. Defaults to\n 1.0.\n use_gating (:obj:`bool`, optional):\n Place a trainable gating module besides the added parameter module to control module activation. This is\n e.g. used for PEFT. Defaults to False.\n residual_before_ln (:obj:`bool`, optional):\n If True, take the residual connection around the adapter bottleneck before the layer normalization. Only\n applicable if :obj:`original_ln_before` is True.\n adapter_residual_before_ln (:obj:`bool`, optional):\n If True, apply the residual connection around the adapter modules before the new layer normalization within\n the adapter. Only applicable if :obj:`ln_after` is True and :obj:`is_parallel` is False.\n inv_adapter (:obj:`str`, optional):\n If not None (default), add invertible adapter modules after the model embedding layer. Currently, this can\n be either \"nice\" or \"glow\".\n inv_adapter_reduction_factor (:obj:`float`, optional):\n The reduction to use within the invertible adapter modules. Only applicable if :obj:`inv_adapter` is not\n None.\n cross_adapter (:obj:`bool`, optional):\n If True, add adapter modules after the cross attention block of each decoder layer in an encoder-decoder\n model. Defaults to False.\n leave_out (:obj:`List[int]`, optional):\n The IDs of the layers (starting at 0) where NO adapter modules should be added.\n phm_layer (:obj:`bool`, optional): If True the down and up projection layers are a PHMLayer.\n Defaults to False\n phm_dim (:obj:`int`, optional): The dimension of the phm matrix.\n Defaults to None.\n shared_phm_rule (:obj:`bool`, optional): Whether the phm matrix is shared across all layers.\n Defaults to True\n factorized_phm_rule (:obj:`bool`, optional):\n Whether the phm matrix is factorized into a left and right matrix. Defaults to False.\n learn_phm (:obj:`bool`, optional): Whether the phm matrix should be learned during training.\n Defaults to True\n factorized_phm_W (:\n obj:`bool`, optional): Whether the weights matrix is factorized into a left and right matrix. Defaults to\n True\n shared_W_phm (:obj:`bool`, optional): Whether the weights matrix is shared across all layers.\n Defaults to False.\n phm_c_init (:obj:`str`, optional): The initialization function for the weights of the phm matrix.\n The possible values are `[\"normal\", \"uniform\"]`. Defaults to `normal`.\n phm_init_range (:obj:`float`, optional): std for initializing phm weights if `phm_c_init=\"normal\"`.\n Defaults to 0.0001.\n hypercomplex_nonlinearity (:obj:`str`, optional):\n This specifies the distribution to draw the weights in the phm layer from. Defaults to `glorot-uniform`.\n phm_rank (:obj:`int`, optional):\n If the weight matrix is factorized this specifies the rank of the matrix. E.g. the left matrix of the down\n projection has the shape (phm_dim, _in_feats_per_axis, phm_rank) and the right matrix (phm_dim, phm_rank,\n _out_feats_per_axis). Defaults to 1\n phm_bias (:obj:`bool`, optional):\n If True the down and up projection PHMLayer has a bias term. If `phm_layer` is False this is ignored.\n Defaults to True\n \"\"\"\n\n # Required options\n mh_adapter: bool\n output_adapter: bool\n\n reduction_factor: Union[float, Mapping]\n non_linearity: str\n\n # Options with defaults\n original_ln_before: bool = False\n original_ln_after: bool = True\n ln_before: bool = False\n ln_after: bool = False\n init_weights: str = \"bert\"\n is_parallel: bool = False\n scaling: Union[float, str] = 1.0\n use_gating: bool = False\n residual_before_ln: bool = True\n adapter_residual_before_ln: bool = False\n inv_adapter: Optional[str] = None\n inv_adapter_reduction_factor: Optional[float] = None\n cross_adapter: bool = False\n leave_out: List[int] = field(default_factory=list)\n phm_layer: bool = False\n phm_dim: int = 4\n factorized_phm_W: Optional[bool] = True\n shared_W_phm: Optional[bool] = False\n shared_phm_rule: Optional[bool] = True\n factorized_phm_rule: Optional[bool] = False\n phm_c_init: Optional[str] = \"normal\"\n phm_init_range: Optional[float] = 0.0001\n learn_phm: Optional[bool] = True\n hypercomplex_nonlinearity: Optional[str] = \"glorot-uniform\"\n phm_rank: Optional[int] = 1\n phm_bias: Optional[bool] = True\n\n # We want to emulate a simple form of immutability while keeping the ability to add custom attributes.\n # Therefore, we don't allow changing attribute values if set once.\n def __setattr__(self, name, value):\n if name in self.__dict__:\n raise FrozenInstanceError()\n elif name == \"invertible_adapter\":\n # This is for backwards compatibility. In v1, invertible adapters were specified in a nested config dict.\n # Now, we have two config keys directly in the adapter config.\n if value:\n object.__setattr__(self, \"inv_adapter\", value[\"block_type\"])\n object.__setattr__(self, \"inv_adapter_reduction_factor\", value[\"reduction_factor\"])\n else:\n object.__setattr__(self, name, value)"
},
{
"identifier": "AdapterSetup",
"path": "src/models/transformers/parameter-efficient-finetuning/context.py",
"snippet": "class AdapterSetup:\n \"\"\"\n Represents an adapter setup of a model including active adapters and active heads. This class is intended to be\n used as a context manager using the ``with`` statement. The setup defined by the ``AdapterSetup`` context will\n override static adapter setups defined in a model (i.e. setups specified via ``active_adapters``).\n\n Example::\n\n with AdapterSetup(Stack(\"a\", \"b\")):\n # will use the adapter stack \"a\" and \"b\" outputs = model(**inputs)\n\n Note that the context manager is thread-local, i.e. it can be used with different setups in a multi-threaded\n environment.\n \"\"\"\n\n # thread-local storage that holds a stack of active contexts\n storage = threading.local()\n\n def __init__(self, adapter_setup, head_setup=None, ignore_empty: bool = False):\n self.adapter_setup = parse_composition(adapter_setup)\n if head_setup:\n self.head_setup = head_setup\n else:\n self.head_setup = parse_heads_from_composition(self.adapter_setup)\n self._empty = ignore_empty and self.adapter_setup is None and self.head_setup is None\n\n def __enter__(self):\n if not self._empty:\n AdapterSetup.get_contexts().append(self)\n return self\n\n def __exit__(self, type, value, traceback):\n if not self._empty:\n AdapterSetup.get_contexts().pop()\n\n @classmethod\n def get_contexts(cls):\n if not hasattr(cls.storage, \"contexts\"):\n cls.storage.contexts = []\n return cls.storage.contexts\n\n @classmethod\n def get_context(cls):\n try:\n return cls.get_contexts()[-1]\n except IndexError:\n return None\n\n @classmethod\n def get_context_adapter_setup(cls):\n context = cls.get_context()\n if context:\n return context.adapter_setup\n return None\n\n @classmethod\n def get_context_head_setup(cls):\n context = cls.get_context()\n if context:\n return context.head_setup\n return None"
},
{
"identifier": "ForwardContext",
"path": "src/models/transformers/parameter-efficient-finetuning/context.py",
"snippet": "class ForwardContext:\n \"\"\"\n Holds context information during a forward pass through a model. This class should be used via the\n ``ForwardContext.wrap()`` method.\n\n Note that the context is thread-local.\n \"\"\"\n\n # thread-local storage that holds a stack of active contexts\n storage = threading.local()\n\n context_attributes = [\"adapter_gating_scores\", \"adapter_fusion_attentions\", \"adapter_input_parallelized\"]\n\n def __init__(self, model, *args, **kwargs):\n # If the model has a method ``forward_context()``, use it to create the context.\n if hasattr(model, \"forward_context\"):\n model.forward_context(self, *args, **kwargs)\n\n def __enter__(self):\n ForwardContext.get_contexts().append(self)\n return self\n\n def __exit__(self, type, value, traceback):\n ForwardContext.get_contexts().pop()\n\n @classmethod\n def wrap(cls, f):\n \"\"\"\n Decorator method that wraps a ``forward()`` function of a model class.\n \"\"\"\n\n @functools.wraps(f)\n def wrapper_func(self, *args, **kwargs):\n if self.config.adapters is not None:\n with cls(self, *args, **kwargs) as ctx:\n kwargs = {\n k: v for k, v in kwargs.items() if k.replace(\"output_\", \"\") not in cls.context_attributes\n }\n results = f(self, *args, **kwargs)\n\n # append output attributes\n if isinstance(results, tuple):\n for attr in cls.context_attributes:\n if getattr(ctx, \"output_\" + attr, False):\n results = results + (dict(getattr(ctx, attr)),)\n else:\n for attr in cls.context_attributes:\n if getattr(ctx, \"output_\" + attr, False):\n results[attr] = dict(getattr(ctx, attr))\n return results\n else:\n return f(self, *args, **kwargs)\n\n return wrapper_func\n\n @classmethod\n def get_contexts(cls):\n if not hasattr(cls.storage, \"contexts\"):\n cls.storage.contexts = []\n return cls.storage.contexts\n\n @classmethod\n def get_context(cls):\n try:\n return cls.get_contexts()[-1]\n except IndexError:\n return None"
},
{
"identifier": "Adapter",
"path": "src/models/transformers/parameter-efficient-finetuning/modeling.py",
"snippet": "class Adapter(nn.Module):\n \"\"\"\n Implementation of a sequential bottleneck adapter block.\n \"\"\"\n\n def __init__(\n self,\n adapter_name,\n input_size,\n down_sample,\n config: AdapterConfig,\n ):\n super().__init__()\n self.name = adapter_name\n self.input_size = input_size\n self.add_layer_norm_before = config[\"ln_before\"]\n self.add_layer_norm_after = config[\"ln_after\"]\n self.adapter_residual_before_ln = config[\"adapter_residual_before_ln\"]\n self.use_gating = config[\"use_gating\"]\n\n # Params related to input & output of adapter\n self.residual_before_ln = config[\"residual_before_ln\"]\n self.original_ln_before = config[\"original_ln_before\"]\n self.original_ln_after = config[\"original_ln_after\"]\n\n # list for all modules of the adapter, passed into nn.Sequential()\n seq_list = []\n\n # If we want to have a layer norm on input, we add it to seq_list\n if self.add_layer_norm_before:\n self.adapter_norm_before = nn.LayerNorm(self.input_size)\n seq_list.append(self.adapter_norm_before)\n\n # if a downsample size is not passed, we just half the size of the original input\n self.down_sample = down_sample\n if down_sample is None:\n self.down_sample = self.input_size // 2\n\n # ensure that the down sample size is at least 1\n if self.down_sample < 1:\n self.down_sample = 1\n\n if config[\"phm_layer\"]:\n # Linear down projection of the input\n seq_list.append(PHMLayer(adapter_name, self.input_size, self.down_sample, \"down\", config))\n else:\n seq_list.append(nn.Linear(self.input_size, self.down_sample))\n\n # select non-linearity\n self.non_linearity = Activation_Function_Class(config[\"non_linearity\"].lower())\n\n seq_list.append(self.non_linearity)\n\n # sequential adapter, first downproject, then non-linearity then upsample. In the forward pass we include the\n # residual connection\n self.adapter_down = nn.Sequential(*seq_list)\n\n # Up projection to input size\n if config[\"phm_layer\"]:\n # Linear down projection of the input\n self.adapter_up = PHMLayer(adapter_name, self.down_sample, self.input_size, \"up\", config)\n else:\n self.adapter_up = nn.Linear(self.down_sample, self.input_size)\n\n # Additional scaling factor (from He et al. (2021))\n if isinstance(config[\"scaling\"], float):\n self.scaling = config[\"scaling\"]\n elif config[\"scaling\"] == \"learned\":\n self.scaling = nn.Parameter(torch.ones(1))\n else:\n raise ValueError(\"Unknown scaling type: {}\".format(config[\"scaling\"]))\n\n # If we want to have a layer norm on output, we apply it later after a separate residual connection\n # This means that we learn a new output layer norm, which replaces another layer norm learned in the bert layer\n if self.add_layer_norm_after:\n self.adapter_norm_after = nn.LayerNorm(self.input_size)\n\n if self.use_gating:\n self.gate = nn.Linear(self.input_size, 1)\n\n # if we want to initialize with the bert strategy then this function is called for all the linear layers\n if config[\"init_weights\"] == \"bert\":\n self.adapter_down.apply(self.init_bert_weights)\n self.adapter_up.apply(self.init_bert_weights)\n if self.use_gating:\n self.gate.apply(self.init_bert_weights)\n elif config[\"init_weights\"] == \"mam_adapter\":\n with torch.no_grad():\n nn.init.kaiming_uniform_(self.adapter_down[0].weight, a=math.sqrt(5))\n nn.init.zeros_(self.adapter_up.weight)\n nn.init.zeros_(self.adapter_down[0].bias)\n nn.init.zeros_(self.adapter_up.bias)\n if self.use_gating:\n self.gate.apply(self.init_bert_weights)\n else:\n raise ValueError(\"Unknown init_weights type: {}\".format(config[\"init_weights\"]))\n\n def pre_forward(\n self,\n hidden_states,\n input_tensor,\n layer_norm,\n fusion_config=None,\n ):\n \"\"\"\n Retrieves the hidden_states, query (for Fusion), and residual connection according to the set configuration.\n\n Args:\n adapter_config: config file according to what the parameters are passed\n hidden_states: output of previous layer\n input_tensor: residual connection before FFN\n\n Returns: hidden_states, query, residual\n\n \"\"\"\n query = None\n\n if self.residual_before_ln:\n residual = hidden_states\n\n if fusion_config is not None and fusion_config[\"query_before_ln\"]:\n query = hidden_states\n\n if self.original_ln_before:\n if layer_norm:\n hidden_states = layer_norm(hidden_states + input_tensor)\n else:\n hidden_states = hidden_states + input_tensor\n\n if not self.residual_before_ln:\n residual = hidden_states\n\n if fusion_config is not None and not fusion_config[\"query_before_ln\"]:\n query = hidden_states\n\n return hidden_states, query, residual\n\n def forward(self, x, residual_input, output_gating=False):\n down = self.adapter_down(x)\n\n up = self.adapter_up(down)\n up = up * self.scaling\n output = up\n\n if self.use_gating:\n # x.shape = (batch_size, seq_len, hidden_size)\n gate = torch.sigmoid(self.gate(x))\n gate = torch.mean(gate, dim=1).unsqueeze(-1)\n output = output * gate\n\n # apply residual connection before layer norm if configured in this way\n if self.adapter_residual_before_ln:\n output = output + residual_input\n\n # apply layer norm if available\n if self.add_layer_norm_after:\n output = self.adapter_norm_after(output)\n\n # if residual should be applied after layer norm, apply it here\n if not self.adapter_residual_before_ln:\n output = output + residual_input\n\n if self.use_gating and output_gating:\n return output, down, up, gate\n return output, down, up\n\n def post_forward(self, hidden_states, input_hidden_states, input_tensor, layer_norm):\n \"\"\"\n Performs computations after the forward pass of the adapter block(s). This e.g. includes applying the residual\n connection and layer norm if configured in this way.\n\n Args:\n hidden_states: The hidden states outputted by the adapter block(s).\n input_hidden_states: Residual connection before the adapter block(s).\n input_tensor: Residual connection before the Transformer FFN/ attention layer.\n layer_norm: Transformer LayerNorm.\n\n Returns:\n The modified hidden states.\n \"\"\"\n if self.original_ln_after:\n if layer_norm:\n hidden_states = layer_norm(hidden_states + input_tensor)\n else:\n hidden_states = hidden_states + input_tensor\n\n return hidden_states\n\n # This is copied from the BertPreTrainedModel class to make this a self containing class.\n @staticmethod\n def init_bert_weights(module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # std defaults to 0.02, this might need to be changed\n module.weight.data.normal_(mean=0.0, std=0.02)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()"
},
{
"identifier": "BertFusion",
"path": "src/models/transformers/parameter-efficient-finetuning/modeling.py",
"snippet": "class BertFusion(nn.Module):\n \"\"\"\n Implementation of an AdapterFusion block.\n \"\"\"\n\n def __init__(\n self,\n config: AdapterFusionConfig,\n dense_size,\n attention_probs_dropout_prob,\n ):\n super(BertFusion, self).__init__()\n # if config.hidden_size % config.num_attention_heads != 0:\n # raise ValueError(\n # \"The hidden size (%d) is not a multiple of the number of attention \"\n # \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.config = config\n\n self.dense_size = dense_size\n self.dropout = nn.Dropout(attention_probs_dropout_prob)\n\n if not self.config[\"query\"] and not self.config[\"key\"] and not self.config[\"value\"]:\n self.dense = nn.Linear(self.dense_size, 1)\n\n if self.config[\"query\"]:\n self.query = nn.Linear(self.dense_size, self.dense_size)\n self.query.apply(Adapter.init_bert_weights)\n\n if self.config[\"key\"]:\n self.key = nn.Linear(self.dense_size, self.dense_size)\n self.key.apply(Adapter.init_bert_weights)\n\n if self.config[\"value\"]:\n self.value = nn.Linear(self.dense_size, self.dense_size, bias=False)\n self.value.apply(Adapter.init_bert_weights)\n if self.config[\"value_initialized\"]:\n self.value.weight.data = (torch.zeros(self.dense_size, self.dense_size) + 0.000001).fill_diagonal_(1.0)\n\n if self.config[\"temperature\"]:\n self.T = 50.0\n else:\n self.T = 1.0\n self.reduction = self.T / 1000.0\n\n def forward(self, query, key, value, residual, output_attentions: bool = False):\n\n if self.config[\"residual_before\"]:\n value += residual[:, :, None, :].repeat(1, 1, value.size(2), 1)\n\n if self.config[\"query\"]:\n query_layer = self.query(query)\n else:\n query_layer = query\n\n if self.config[\"key\"]:\n key_layer = self.key(key)\n else:\n key_layer = key\n\n if self.config[\"value\"] and self.config[\"value_before_softmax\"]:\n # key/value have dims => batch, toks, number-of-adapters, feats\n value_layer = self.value(value)\n else:\n value_layer = value\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.squeeze(torch.matmul(query_layer.unsqueeze(2), key_layer.transpose(-2, -1)), dim=2)\n\n attention_scores = self.dropout(attention_scores)\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores / self.T)\n self.T = max(self.T - self.reduction, 1.0)\n\n context_layer = torch.squeeze(torch.matmul(attention_probs.unsqueeze(2), value_layer), dim=2)\n\n if self.config[\"value\"] and not self.config[\"value_before_softmax\"]:\n # key/value have dims => batch, toks, number-of-adapters, feats\n context_layer = self.value(context_layer)\n else:\n context_layer = context_layer\n\n if not self.config[\"residual_before\"]:\n context_layer += residual\n\n if output_attentions:\n attention_probs = attention_probs.detach().cpu().numpy()\n return context_layer, attention_probs\n else:\n return context_layer"
},
{
"identifier": "ParallelAdapter",
"path": "src/models/transformers/parameter-efficient-finetuning/modeling.py",
"snippet": "class ParallelAdapter(Adapter):\n \"\"\"\n Implementation of a parallel bottleneck adapter block.\n \"\"\"\n\n def __init__(self, adapter_name, input_size, down_sample, config: AdapterConfig):\n super().__init__(adapter_name, input_size, down_sample, config)\n\n def pre_forward(\n self,\n hidden_states,\n input_tensor,\n layer_norm,\n fusion_config=None,\n ):\n \"\"\"\n Retrieves the hidden_states, query (for Fusion), and residual connection according to the set configuration.\n\n Args:\n adapter_config: config file according to what the parameters are passed\n hidden_states: output of previous layer\n input_tensor: residual connection before FFN\n\n Returns: hidden_states, query, residual\n\n \"\"\"\n # In case of parallel adapter, return the input tensor as hidden states\n query = None\n if fusion_config is not None:\n query = input_tensor\n return input_tensor, query, input_tensor\n\n def forward(self, x, residual_input, output_gating=False):\n down = self.adapter_down(x)\n\n up = self.adapter_up(down)\n up = up * self.scaling\n\n output = up\n\n if self.use_gating:\n # x.shape = (batch_size, seq_len, hidden_size)\n gate = torch.sigmoid(self.gate(x))\n gate = torch.mean(gate, dim=1).unsqueeze(-1)\n output = output * gate\n\n # apply layer norm if available\n if self.add_layer_norm_after:\n output = self.adapter_norm_after(output)\n\n if self.use_gating and output_gating:\n return output, down, up, gate\n return output, down, up\n\n def post_forward(self, hidden_states, input_hidden_states, input_tensor, layer_norm):\n \"\"\"\n Performs computations after the forward pass of the adapter block(s). This e.g. includes applying the residual\n connection and layer norm if configured in this way.\n\n Args:\n hidden_states: The hidden states outputted by the adapter block(s).\n input_hidden_states: Residual connection before the adapter block(s).\n input_tensor: Residual connection before the Transformer FFN/ attention layer.\n layer_norm: Transformer LayerNorm.\n\n Returns:\n The modified hidden states.\n \"\"\"\n hidden_states = hidden_states + input_hidden_states\n\n if self.original_ln_after:\n if layer_norm:\n hidden_states = layer_norm(hidden_states + input_tensor)\n else:\n hidden_states = hidden_states + input_tensor\n\n return hidden_states"
}
] | from abc import ABC, abstractmethod
from typing import List, Mapping, Union
from torch import nn
from .composition import AdapterCompositionBlock, BatchSplit, Fuse, Parallel, Split, Stack
from .configuration import AdapterConfig
from .context import AdapterSetup, ForwardContext
from .modeling import Adapter, BertFusion, ParallelAdapter
import numpy as np
import torch | 7,849 |
class AdapterLayerBase(ABC):
"""
Base class for all adaptation methods that require per-layer modules.
"""
@property
def layer_idx(self):
return getattr(self, "_layer_idx", -1)
@layer_idx.setter
def layer_idx(self, layer_idx):
idx = getattr(self, "_layer_idx", layer_idx)
assert idx == layer_idx
setattr(self, "_layer_idx", idx)
def get_active_setup(self, module_dict):
if getattr(self.config, "is_adaptable", False):
# First check current context before falling back to defined setup
context = AdapterSetup.get_context()
if context is not None:
adapter_setup = context.adapter_setup
else:
adapter_setup = self.config.adapters.active_setup
else:
adapter_setup = None
skip_adapters = adapter_setup is None or (
self.config.adapters.skip_layers is not None and self.layer_idx in self.config.adapters.skip_layers
)
if not skip_adapters and (len(set(module_dict.keys()) & adapter_setup.flatten()) > 0):
return adapter_setup
else:
return None
def _store_gating_score(self, adapter_name, gating_score):
context = ForwardContext.get_context()
if context.output_adapter_gating_scores:
gating_cache = context.adapter_gating_scores
if self.layer_idx not in gating_cache[adapter_name]:
gating_cache[adapter_name][self.layer_idx] = {}
gating_score = gating_score.detach().squeeze().cpu().numpy()
if len(gating_score.shape) == 0:
gating_score = np.expand_dims(gating_score, axis=0)
cache_score = gating_cache[adapter_name][self.layer_idx].get(self.location_key, None)
if cache_score is not None:
gating_cache[adapter_name][self.layer_idx][self.location_key] = np.column_stack(
(cache_score, gating_score)
)
else:
gating_cache[adapter_name][self.layer_idx][self.location_key] = gating_score
def _store_fusion_attentions(self, fusion_name, attentions):
context = ForwardContext.get_context()
if context.output_adapter_fusion_attentions:
attention_cache = context.adapter_fusion_attentions
if self.layer_idx not in attention_cache[fusion_name]:
attention_cache[fusion_name][self.layer_idx] = {}
attention_cache[fusion_name][self.layer_idx][self.location_key] = attentions
@abstractmethod
def add_adapter(self, adapter_name: str, layer_idx: int):
raise NotImplementedError()
@abstractmethod
def delete_adapter(self, adapter_name: str):
raise NotImplementedError()
@abstractmethod
def add_fusion_layer(self, adapter_names: Union[List, str]):
raise NotImplementedError()
@abstractmethod
def delete_fusion_layer(self, adapter_names: Union[List, str]):
raise NotImplementedError()
@abstractmethod
|
class AdapterLayerBase(ABC):
"""
Base class for all adaptation methods that require per-layer modules.
"""
@property
def layer_idx(self):
return getattr(self, "_layer_idx", -1)
@layer_idx.setter
def layer_idx(self, layer_idx):
idx = getattr(self, "_layer_idx", layer_idx)
assert idx == layer_idx
setattr(self, "_layer_idx", idx)
def get_active_setup(self, module_dict):
if getattr(self.config, "is_adaptable", False):
# First check current context before falling back to defined setup
context = AdapterSetup.get_context()
if context is not None:
adapter_setup = context.adapter_setup
else:
adapter_setup = self.config.adapters.active_setup
else:
adapter_setup = None
skip_adapters = adapter_setup is None or (
self.config.adapters.skip_layers is not None and self.layer_idx in self.config.adapters.skip_layers
)
if not skip_adapters and (len(set(module_dict.keys()) & adapter_setup.flatten()) > 0):
return adapter_setup
else:
return None
def _store_gating_score(self, adapter_name, gating_score):
context = ForwardContext.get_context()
if context.output_adapter_gating_scores:
gating_cache = context.adapter_gating_scores
if self.layer_idx not in gating_cache[adapter_name]:
gating_cache[adapter_name][self.layer_idx] = {}
gating_score = gating_score.detach().squeeze().cpu().numpy()
if len(gating_score.shape) == 0:
gating_score = np.expand_dims(gating_score, axis=0)
cache_score = gating_cache[adapter_name][self.layer_idx].get(self.location_key, None)
if cache_score is not None:
gating_cache[adapter_name][self.layer_idx][self.location_key] = np.column_stack(
(cache_score, gating_score)
)
else:
gating_cache[adapter_name][self.layer_idx][self.location_key] = gating_score
def _store_fusion_attentions(self, fusion_name, attentions):
context = ForwardContext.get_context()
if context.output_adapter_fusion_attentions:
attention_cache = context.adapter_fusion_attentions
if self.layer_idx not in attention_cache[fusion_name]:
attention_cache[fusion_name][self.layer_idx] = {}
attention_cache[fusion_name][self.layer_idx][self.location_key] = attentions
@abstractmethod
def add_adapter(self, adapter_name: str, layer_idx: int):
raise NotImplementedError()
@abstractmethod
def delete_adapter(self, adapter_name: str):
raise NotImplementedError()
@abstractmethod
def add_fusion_layer(self, adapter_names: Union[List, str]):
raise NotImplementedError()
@abstractmethod
def delete_fusion_layer(self, adapter_names: Union[List, str]):
raise NotImplementedError()
@abstractmethod | def enable_adapters(self, adapter_setup: AdapterCompositionBlock, unfreeze_adapters: bool, unfreeze_fusion: bool): | 0 | 2023-10-18 18:05:54+00:00 | 12k |
justincui03/tesla | distill.py | [
{
"identifier": "augment",
"path": "utils.py",
"snippet": "def augment(images, dc_aug_param, device):\n # This can be sped up in the future.\n\n if dc_aug_param != None and dc_aug_param['strategy'] != 'none':\n scale = dc_aug_param['scale']\n crop = dc_aug_param['crop']\n rotate = dc_aug_param['rotate']\n noise = dc_aug_param['noise']\n strategy = dc_aug_param['strategy']\n\n shape = images.shape\n mean = []\n for c in range(shape[1]):\n mean.append(float(torch.mean(images[:,c])))\n\n def cropfun(i):\n im_ = torch.zeros(shape[1],shape[2]+crop*2,shape[3]+crop*2, dtype=torch.float, device=device)\n for c in range(shape[1]):\n im_[c] = mean[c]\n im_[:, crop:crop+shape[2], crop:crop+shape[3]] = images[i]\n r, c = np.random.permutation(crop*2)[0], np.random.permutation(crop*2)[0]\n images[i] = im_[:, r:r+shape[2], c:c+shape[3]]\n\n def scalefun(i):\n h = int((np.random.uniform(1 - scale, 1 + scale)) * shape[2])\n w = int((np.random.uniform(1 - scale, 1 + scale)) * shape[2])\n tmp = F.interpolate(images[i:i + 1], [h, w], )[0]\n mhw = max(h, w, shape[2], shape[3])\n im_ = torch.zeros(shape[1], mhw, mhw, dtype=torch.float, device=device)\n r = int((mhw - h) / 2)\n c = int((mhw - w) / 2)\n im_[:, r:r + h, c:c + w] = tmp\n r = int((mhw - shape[2]) / 2)\n c = int((mhw - shape[3]) / 2)\n images[i] = im_[:, r:r + shape[2], c:c + shape[3]]\n\n def rotatefun(i):\n im_ = scipyrotate(images[i].cpu().data.numpy(), angle=np.random.randint(-rotate, rotate), axes=(-2, -1), cval=np.mean(mean))\n r = int((im_.shape[-2] - shape[-2]) / 2)\n c = int((im_.shape[-1] - shape[-1]) / 2)\n images[i] = torch.tensor(im_[:, r:r + shape[-2], c:c + shape[-1]], dtype=torch.float, device=device)\n\n def noisefun(i):\n images[i] = images[i] + noise * torch.randn(shape[1:], dtype=torch.float, device=device)\n\n\n augs = strategy.split('_')\n\n for i in range(shape[0]):\n choice = np.random.permutation(augs)[0] # randomly implement one augmentation\n if choice == 'crop':\n cropfun(i)\n elif choice == 'scale':\n scalefun(i)\n elif choice == 'rotate':\n rotatefun(i)\n elif choice == 'noise':\n noisefun(i)\n\n return images"
},
{
"identifier": "get_dataset",
"path": "utils.py",
"snippet": "def get_dataset(dataset, data_path, batch_size=1, args=None):\n\n class_map = None\n loader_train_dict = None\n class_map_inv = None\n\n if dataset == 'CIFAR10':\n channel = 3\n im_size = (32, 32)\n num_classes = 10\n mean = [0.4914, 0.4822, 0.4465]\n std = [0.2023, 0.1994, 0.2010]\n if args.zca:\n transform = transforms.Compose([transforms.ToTensor()])\n else:\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\n dst_train = datasets.CIFAR10(data_path, train=True, download=True, transform=transform) # no augmentation\n dst_test = datasets.CIFAR10(data_path, train=False, download=True, transform=transform)\n class_names = dst_train.classes\n class_map = {x:x for x in range(num_classes)}\n\n\n elif dataset == 'Tiny':\n channel = 3\n im_size = (64, 64)\n num_classes = 200\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n if args.zca:\n transform = transforms.Compose([transforms.ToTensor()])\n else:\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\n dst_train = datasets.ImageFolder(os.path.join(data_path, \"train\"), transform=transform) # no augmentation\n dst_test = datasets.ImageFolder(os.path.join(data_path, \"val\", \"images\"), transform=transform)\n class_names = dst_train.classes\n class_map = {x:x for x in range(num_classes)}\n\n\n elif dataset == 'ImageNet':\n channel = 3\n im_size = (64, 64)\n # im_size = (128, 128)\n # data_path = '/home/justincui/data/' + str(im_size[0])\n num_classes = 1000\n data_path = '/nfs/data/justincui/data/imagenet2012/' + str(im_size[0])\n\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n\n data_transforms = {\n 'train': transforms.Compose([\n # transforms.Resize(im_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n # transforms.Resize(im_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n }\n\n dst_train = datasets.ImageFolder(os.path.join(data_path, \"train\"), transform=data_transforms['train']) # no augmentation\n dst_test = datasets.ImageFolder(os.path.join(data_path, \"val\"), transform=data_transforms['val'])\n class_names = dst_train.classes\n class_map = {x:x for x in range(num_classes)}\n\n elif dataset.startswith('CIFAR100'):\n channel = 3\n im_size = (32, 32)\n num_classes = 100\n mean = [0.4914, 0.4822, 0.4465]\n std = [0.2023, 0.1994, 0.2010]\n\n if args.zca:\n transform = transforms.Compose([transforms.ToTensor()])\n else:\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std), transforms.Resize(im_size)])\n dst_train = datasets.CIFAR100(data_path, train=True, download=True, transform=transform) # no augmentation\n dst_test = datasets.CIFAR100(data_path, train=False, download=True, transform=transform)\n class_names = dst_train.classes\n class_map = {x: x for x in range(num_classes)}\n\n else:\n exit('unknown dataset: %s'%dataset)\n\n if args.zca:\n images = []\n labels = []\n print(\"Train ZCA\")\n for i in tqdm.tqdm(range(len(dst_train))):\n im, lab = dst_train[i]\n images.append(im)\n labels.append(lab)\n images = torch.stack(images, dim=0).to(args.device)\n labels = torch.tensor(labels, dtype=torch.long, device=\"cpu\")\n zca = K.enhance.ZCAWhitening(eps=0.1, compute_inv=True)\n zca.fit(images)\n zca_images = zca(images).to(\"cpu\")\n dst_train = TensorDataset(zca_images, labels)\n\n images = []\n labels = []\n print(\"Test ZCA\")\n for i in tqdm.tqdm(range(len(dst_test))):\n im, lab = dst_test[i]\n images.append(im)\n labels.append(lab)\n images = torch.stack(images, dim=0).to(args.device)\n labels = torch.tensor(labels, dtype=torch.long, device=\"cpu\")\n\n zca_images = zca(images).to(\"cpu\")\n dst_test = TensorDataset(zca_images, labels)\n\n args.zca_trans = zca\n\n\n testloader = torch.utils.data.DataLoader(dst_test, batch_size=128, shuffle=False, num_workers=2)\n\n\n return channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader, loader_train_dict, class_map, class_map_inv"
},
{
"identifier": "get_network",
"path": "utils.py",
"snippet": "def get_network(model, channel, num_classes, im_size=(32, 32), dist=True):\n torch.random.manual_seed(int(time.time() * 1000) % 100000)\n net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting()\n\n if model == 'ConvNet':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD1':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=1, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD2':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=2, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD3':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=3, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD4':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=4, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD5':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=5, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD6':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=6, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD7':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=7, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD8':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=8, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n\n else:\n net = None\n exit('DC error: unknown model')\n\n if dist:\n gpu_num = torch.cuda.device_count()\n if gpu_num>0:\n device = 'cuda'\n if gpu_num>1:\n net = nn.DataParallel(net)\n else:\n device = 'cpu'\n net = net.to(device)\n\n return net"
},
{
"identifier": "get_eval_pool",
"path": "utils.py",
"snippet": "def get_eval_pool(eval_mode, model, model_eval):\n if eval_mode == 'M': # multiple architectures\n # model_eval_pool = ['MLP', 'ConvNet', 'AlexNet', 'VGG11', 'ResNet18', 'LeNet']\n model_eval_pool = ['ConvNet', 'AlexNet', 'VGG11', 'ResNet18_AP', 'ResNet18']\n # model_eval_pool = ['MLP', 'ConvNet', 'AlexNet', 'VGG11', 'ResNet18']\n elif eval_mode == 'W': # ablation study on network width\n model_eval_pool = ['ConvNetW32', 'ConvNetW64', 'ConvNetW128', 'ConvNetW256']\n elif eval_mode == 'D': # ablation study on network depth\n model_eval_pool = ['ConvNetD1', 'ConvNetD2', 'ConvNetD3', 'ConvNetD4']\n elif eval_mode == 'A': # ablation study on network activation function\n model_eval_pool = ['ConvNetAS', 'ConvNetAR', 'ConvNetAL']\n elif eval_mode == 'P': # ablation study on network pooling layer\n model_eval_pool = ['ConvNetNP', 'ConvNetMP', 'ConvNetAP']\n elif eval_mode == 'N': # ablation study on network normalization layer\n model_eval_pool = ['ConvNetNN', 'ConvNetBN', 'ConvNetLN', 'ConvNetIN', 'ConvNetGN']\n elif eval_mode == 'S': # itself\n model_eval_pool = [model[:model.index('BN')]] if 'BN' in model else [model]\n elif eval_mode == 'C':\n model_eval_pool = [model, 'ConvNet']\n else:\n model_eval_pool = [model_eval]\n return model_eval_pool"
},
{
"identifier": "evaluate_synset",
"path": "utils.py",
"snippet": "def evaluate_synset(it_eval, net, images_train, labels_train, testloader, args, return_loss=False, texture=False):\n net = net.to(args.device)\n images_train = images_train.to(args.device)\n labels_train = labels_train.to(args.device)\n lr = float(args.lr_net)\n Epoch = int(args.epoch_eval_train)\n lr_schedule = [Epoch//2+1]\n optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)\n\n criterion = nn.CrossEntropyLoss().to(args.device)\n\n dst_train = TensorDataset(images_train, labels_train)\n trainloader = torch.utils.data.DataLoader(dst_train, batch_size=args.batch_train, shuffle=True, num_workers=0)\n\n start = time.time()\n acc_train_list = []\n loss_train_list = []\n\n for ep in tqdm.tqdm(range(Epoch+1)):\n loss_train, acc_train = epoch('train', trainloader, net, optimizer, criterion, args, aug=True, texture=texture)\n acc_train_list.append(acc_train)\n loss_train_list.append(loss_train)\n if ep == Epoch:\n with torch.no_grad():\n loss_test, acc_test = epoch('test', testloader, net, optimizer, criterion, args, aug=False)\n if ep in lr_schedule:\n lr *= 0.1\n optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)\n\n\n time_train = time.time() - start\n\n print('%s Evaluate_%02d: epoch = %04d train time = %d s train loss = %.6f train acc = %.4f, test acc = %.4f' % (get_time(), it_eval, Epoch, int(time_train), loss_train, acc_train, acc_test))\n\n if return_loss:\n return net, acc_train_list, acc_test, loss_train_list, loss_test\n else:\n return net, acc_train_list, acc_test"
},
{
"identifier": "get_time",
"path": "utils.py",
"snippet": "def get_time():\n return str(time.strftime(\"[%Y-%m-%d %H:%M:%S]\", time.localtime()))"
},
{
"identifier": "DiffAugment",
"path": "utils.py",
"snippet": "def DiffAugment(x, strategy='', seed = -1, param = None):\n if seed == -1:\n param.batchmode = False\n else:\n param.batchmode = True\n\n param.latestseed = seed\n\n if strategy == 'None' or strategy == 'none':\n return x\n\n if strategy:\n if param.aug_mode == 'M': # original\n for p in strategy.split('_'):\n for f in AUGMENT_FNS[p]:\n x = f(x, param)\n elif param.aug_mode == 'S':\n pbties = strategy.split('_')\n set_seed_DiffAug(param)\n p = pbties[torch.randint(0, len(pbties), size=(1,)).item()]\n for f in AUGMENT_FNS[p]:\n x = f(x, param)\n else:\n exit('Error ZH: unknown augmentation mode.')\n x = x.contiguous()\n return x"
},
{
"identifier": "DiffAugmentList",
"path": "utils.py",
"snippet": "def DiffAugmentList(x_list, strategy='', seed = -1, param = None):\n if seed == -1:\n param.batchmode = False\n else:\n param.batchmode = True\n\n param.latestseed = seed\n\n if strategy == 'None' or strategy == 'none':\n return x\n\n if strategy:\n if param.aug_mode == 'M': # original\n for p in strategy.split('_'):\n for f in AUGMENT_FNS[p]:\n for x in x_list:\n x = f(x, param)\n elif param.aug_mode == 'S':\n pbties = strategy.split('_')\n set_seed_DiffAug(param)\n p = pbties[torch.randint(0, len(pbties), size=(1,)).item()]\n for f in AUGMENT_FNS[p]:\n for x in x_list:\n x = f(x, param)\n else:\n exit('Error ZH: unknown augmentation mode.')\n for x in x_list:\n x = x.contiguous()\n return x_list"
},
{
"identifier": "ParamDiffAug",
"path": "utils.py",
"snippet": "class ParamDiffAug():\n def __init__(self):\n self.aug_mode = 'S' #'multiple or single'\n self.prob_flip = 0.5\n self.ratio_scale = 1.2\n self.ratio_rotate = 15.0\n self.ratio_crop_pad = 0.125\n self.ratio_cutout = 0.5 # the size would be 0.5x0.5\n self.ratio_noise = 0.05\n self.brightness = 1.0\n self.saturation = 2.0\n self.contrast = 0.5"
},
{
"identifier": "ReparamModule",
"path": "reparam_module.py",
"snippet": "class ReparamModule(nn.Module):\n def _get_module_from_name(self, mn):\n if mn == '':\n return self\n m = self\n for p in mn.split('.'):\n m = getattr(m, p)\n return m\n\n def __init__(self, module):\n super(ReparamModule, self).__init__()\n self.module = module\n\n param_infos = [] # (module name/path, param name)\n shared_param_memo = {}\n shared_param_infos = [] # (module name/path, param name, src module name/path, src param_name)\n params = []\n param_numels = []\n param_shapes = []\n for mn, m in self.named_modules():\n for n, p in m.named_parameters(recurse=False):\n if p is not None:\n if p in shared_param_memo:\n shared_mn, shared_n = shared_param_memo[p]\n shared_param_infos.append((mn, n, shared_mn, shared_n))\n else:\n shared_param_memo[p] = (mn, n)\n param_infos.append((mn, n))\n params.append(p.detach())\n param_numels.append(p.numel())\n param_shapes.append(p.size())\n\n assert len(set(p.dtype for p in params)) <= 1, \\\n \"expects all parameters in module to have same dtype\"\n\n # store the info for unflatten\n self._param_infos = tuple(param_infos)\n self._shared_param_infos = tuple(shared_param_infos)\n self._param_numels = tuple(param_numels)\n self._param_shapes = tuple(param_shapes)\n\n # flatten\n flat_param = nn.Parameter(torch.cat([p.reshape(-1) for p in params], 0))\n self.register_parameter('flat_param', flat_param)\n self.param_numel = flat_param.numel()\n del params\n del shared_param_memo\n\n # deregister the names as parameters\n for mn, n in self._param_infos:\n delattr(self._get_module_from_name(mn), n)\n for mn, n, _, _ in self._shared_param_infos:\n delattr(self._get_module_from_name(mn), n)\n\n # register the views as plain attributes\n self._unflatten_param(self.flat_param)\n\n # now buffers\n # they are not reparametrized. just store info as (module, name, buffer)\n buffer_infos = []\n for mn, m in self.named_modules():\n for n, b in m.named_buffers(recurse=False):\n if b is not None:\n buffer_infos.append((mn, n, b))\n\n self._buffer_infos = tuple(buffer_infos)\n self._traced_self = None\n\n def trace(self, example_input, **trace_kwargs):\n assert self._traced_self is None, 'This ReparamModule is already traced'\n\n if isinstance(example_input, torch.Tensor):\n example_input = (example_input,)\n example_input = tuple(example_input)\n example_param = (self.flat_param.detach().clone(),)\n example_buffers = (tuple(b.detach().clone() for _, _, b in self._buffer_infos),)\n\n self._traced_self = torch.jit.trace_module(\n self,\n inputs=dict(\n _forward_with_param=example_param + example_input,\n _forward_with_param_and_buffers=example_param + example_buffers + example_input,\n ),\n **trace_kwargs,\n )\n\n # replace forwards with traced versions\n self._forward_with_param = self._traced_self._forward_with_param\n self._forward_with_param_and_buffers = self._traced_self._forward_with_param_and_buffers\n return self\n\n def clear_views(self):\n for mn, n in self._param_infos:\n setattr(self._get_module_from_name(mn), n, None) # This will set as plain attr\n\n def _apply(self, *args, **kwargs):\n if self._traced_self is not None:\n self._traced_self._apply(*args, **kwargs)\n return self\n return super(ReparamModule, self)._apply(*args, **kwargs)\n\n def _unflatten_param(self, flat_param):\n ps = (t.view(s) for (t, s) in zip(flat_param.split(self._param_numels), self._param_shapes))\n for (mn, n), p in zip(self._param_infos, ps):\n setattr(self._get_module_from_name(mn), n, p) # This will set as plain attr\n for (mn, n, shared_mn, shared_n) in self._shared_param_infos:\n setattr(self._get_module_from_name(mn), n, getattr(self._get_module_from_name(shared_mn), shared_n))\n\n @contextmanager\n def unflattened_param(self, flat_param):\n saved_views = [getattr(self._get_module_from_name(mn), n) for mn, n in self._param_infos]\n self._unflatten_param(flat_param)\n yield\n # Why not just `self._unflatten_param(self.flat_param)`?\n # 1. because of https://github.com/pytorch/pytorch/issues/17583\n # 2. slightly faster since it does not require reconstruct the split+view\n # graph\n for (mn, n), p in zip(self._param_infos, saved_views):\n setattr(self._get_module_from_name(mn), n, p)\n for (mn, n, shared_mn, shared_n) in self._shared_param_infos:\n setattr(self._get_module_from_name(mn), n, getattr(self._get_module_from_name(shared_mn), shared_n))\n\n @contextmanager\n def replaced_buffers(self, buffers):\n for (mn, n, _), new_b in zip(self._buffer_infos, buffers):\n setattr(self._get_module_from_name(mn), n, new_b)\n yield\n for mn, n, old_b in self._buffer_infos:\n setattr(self._get_module_from_name(mn), n, old_b)\n\n def _forward_with_param_and_buffers(self, flat_param, buffers, *inputs, **kwinputs):\n with self.unflattened_param(flat_param):\n with self.replaced_buffers(buffers):\n return self.module(*inputs, **kwinputs)\n\n def _forward_with_param(self, flat_param, *inputs, **kwinputs):\n with self.unflattened_param(flat_param):\n return self.module(*inputs, **kwinputs)\n\n def forward(self, *inputs, flat_param=None, buffers=None, **kwinputs):\n flat_param = torch.squeeze(flat_param)\n # print(\"PARAMS ON DEVICE: \", flat_param.get_device())\n # print(\"DATA ON DEVICE: \", inputs[0].get_device())\n # flat_param.to(\"cuda:{}\".format(inputs[0].get_device()))\n # self.module.to(\"cuda:{}\".format(inputs[0].get_device()))\n if flat_param is None:\n flat_param = self.flat_param\n if buffers is None:\n return self._forward_with_param(flat_param, *inputs, **kwinputs)\n else:\n return self._forward_with_param_and_buffers(flat_param, tuple(buffers), *inputs, **kwinputs)"
}
] | import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.utils
import wandb
import copy
import random
import warnings
from tqdm import tqdm
from utils import augment, get_dataset, get_network, get_eval_pool, evaluate_synset, get_time, DiffAugment, DiffAugmentList, ParamDiffAug
from reparam_module import ReparamModule
from torch.utils.data import Subset
from torch.utils.data import DataLoader
from PIL import PngImagePlugin | 8,079 | args.distributed = torch.cuda.device_count() > 1
print('Hyper-parameters: \n', args.__dict__)
print('Evaluation model pool: ', model_eval_pool)
''' organize the real dataset '''
indices_class = [[] for c in range(num_classes)]
# Build label to index map
print("---------------Build label to index map--------------")
# For machines with limited RAM, it's impossible to load all ImageNet or even TinyImageNet into memory.
# Even if it's possible, it will take too long to process.
# Therefore we pregenerate an indices to image map and use this map to quickly random samples from ImageNet or TinyImageNet dataset.
if args.dataset == 'ImageNet':
indices_class = np.load('indices/imagenet_indices_class.npy', allow_pickle=True)
elif args.dataset == 'Tiny':
indices_class = np.load('indices/tiny_indices_class.npy', allow_pickle=True)
else:
for i, data in tqdm(enumerate(dst_train)):
indices_class[data[1]].append(i)
# for c in range(num_classes):
# print('class c = %d: %d real images'%(c, len(indices_class[c])))
def get_images(c, n): # get random n images from class c
idx_shuffle = np.random.permutation(indices_class[c])[:n]
subset = Subset(dst_train, idx_shuffle)
data_loader = DataLoader(subset, batch_size=n)
# only read the first batch which has n(IPC) number of images.
for data in data_loader:
return data[0].to("cpu")
''' initialize the synthetic data '''
label_syn = torch.tensor([np.ones(args.ipc)*i for i in range(num_classes)], dtype=torch.long, requires_grad=False, device=args.device).view(-1) # [0,0,0, 1,1,1, ..., 9,9,9]
if args.texture:
image_syn = torch.randn(size=(num_classes * args.ipc, channel, im_size[0]*args.canvas_size, im_size[1]*args.canvas_size), dtype=torch.float)
else:
image_syn = torch.randn(size=(num_classes * args.ipc, channel, im_size[0], im_size[1]), dtype=torch.float)
syn_lr = torch.tensor(args.lr_teacher).to(args.device)
if args.pix_init == 'real':
print('initialize synthetic data from random real images')
for c in range(num_classes):
image_syn.data[c * args.ipc:(c + 1) * args.ipc] = get_images(c, args.ipc).detach().data
else:
print('initialize synthetic data from random noise')
''' training '''
image_syn = image_syn.detach().to(args.device).requires_grad_(True)
print(image_syn.shape)
syn_lr = syn_lr.detach().to(args.device).requires_grad_(True)
optimizer_img = torch.optim.SGD([image_syn], lr=args.lr_img, momentum=0.5)
optimizer_lr = torch.optim.SGD([syn_lr], lr=args.lr_lr, momentum=0.5)
optimizer_img.zero_grad()
optimizer_lr.zero_grad()
criterion = nn.CrossEntropyLoss().to(args.device)
print('%s training begins'%get_time())
expert_dir = os.path.join(args.buffer_path, args.dataset)
if args.dataset in ["CIFAR10", "CIFAR100"] and not args.zca:
expert_dir += "_NO_ZCA"
expert_dir = os.path.join(expert_dir, args.model)
print("Expert Dir: {}".format(expert_dir))
if not args.random_trajectory:
if args.load_all:
buffer = []
n = 0
while os.path.exists(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n))):
buffer = buffer + torch.load(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n)))
n += 1
if n == 0:
raise AssertionError("No buffers detected at {}".format(expert_dir))
else:
expert_files = []
n = 0
while os.path.exists(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n))):
expert_files.append(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n)))
n += 1
if n == 0:
raise AssertionError("No buffers detected at {}".format(expert_dir))
file_idx = 0
expert_idx = 0
random.shuffle(expert_files)
if args.max_files is not None:
expert_files = expert_files[:args.max_files]
print("loading file {}".format(expert_files[file_idx]))
buffer = torch.load(expert_files[file_idx])
if args.max_experts is not None:
buffer = buffer[:args.max_experts]
random.shuffle(buffer)
best_acc = {m: 0 for m in model_eval_pool}
best_std = {m: 0 for m in model_eval_pool}
for it in range(0, args.Iteration+1):
save_this_it = False
# writer.add_scalar('Progress', it, it)
wandb.log({"Progress": it}, step=it)
''' Evaluate synthetic data '''
if it in eval_it_pool and args.eval_it > 0:
for model_eval in model_eval_pool:
print('-------------------------\nEvaluation\nmodel_train = %s, model_eval = %s, iteration = %d'%(args.model, model_eval, it))
if args.dsa:
print('DSA augmentation strategy: \n', args.dsa_strategy)
print('DSA augmentation parameters: \n', args.dsa_param.__dict__)
else:
print('DC augmentation parameters: \n', args.dc_aug_param)
accs_test = []
accs_train = []
for it_eval in range(args.num_eval):
|
LARGE_ENOUGH_NUMBER = 100
PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2)
warnings.filterwarnings("ignore", category=DeprecationWarning)
def main(args):
if args.zca and args.texture:
raise AssertionError("Cannot use zca and texture together")
if args.texture and args.pix_init == "real":
print("WARNING: Using texture with real initialization will take a very long time to smooth out the boundaries between images.")
if args.max_experts is not None and args.max_files is not None:
args.total_experts = args.max_experts * args.max_files
print("CUDNN STATUS: {}".format(torch.backends.cudnn.enabled))
args.dsa = True if args.dsa == 'True' else False
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
eval_it_pool = np.arange(0, args.Iteration + 1, args.eval_it).tolist()
channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader, loader_train_dict, class_map, class_map_inv = get_dataset(args.dataset, args.data_path, args.batch_real, args=args)
model_eval_pool = get_eval_pool(args.eval_mode, args.model, args.model)
im_res = im_size[0]
args.im_size = im_size
accs_all_exps = dict() # record performances of all experiments
for key in model_eval_pool:
accs_all_exps[key] = []
data_save = []
if args.dsa:
# args.epoch_eval_train = 1000
args.dc_aug_param = None
args.dsa_param = ParamDiffAug()
dsa_params = args.dsa_param
if args.zca:
zca_trans = args.zca_trans
else:
zca_trans = None
wandb.init(sync_tensorboard=False,
project="DatasetDistillation",
job_type="CleanRepo",
config=args,
)
args = type('', (), {})()
for key in wandb.config._items:
setattr(args, key, wandb.config._items[key])
args.dsa_param = dsa_params
args.zca_trans = zca_trans
if args.batch_syn is None:
args.batch_syn = num_classes * args.ipc
args.distributed = torch.cuda.device_count() > 1
print('Hyper-parameters: \n', args.__dict__)
print('Evaluation model pool: ', model_eval_pool)
''' organize the real dataset '''
indices_class = [[] for c in range(num_classes)]
# Build label to index map
print("---------------Build label to index map--------------")
# For machines with limited RAM, it's impossible to load all ImageNet or even TinyImageNet into memory.
# Even if it's possible, it will take too long to process.
# Therefore we pregenerate an indices to image map and use this map to quickly random samples from ImageNet or TinyImageNet dataset.
if args.dataset == 'ImageNet':
indices_class = np.load('indices/imagenet_indices_class.npy', allow_pickle=True)
elif args.dataset == 'Tiny':
indices_class = np.load('indices/tiny_indices_class.npy', allow_pickle=True)
else:
for i, data in tqdm(enumerate(dst_train)):
indices_class[data[1]].append(i)
# for c in range(num_classes):
# print('class c = %d: %d real images'%(c, len(indices_class[c])))
def get_images(c, n): # get random n images from class c
idx_shuffle = np.random.permutation(indices_class[c])[:n]
subset = Subset(dst_train, idx_shuffle)
data_loader = DataLoader(subset, batch_size=n)
# only read the first batch which has n(IPC) number of images.
for data in data_loader:
return data[0].to("cpu")
''' initialize the synthetic data '''
label_syn = torch.tensor([np.ones(args.ipc)*i for i in range(num_classes)], dtype=torch.long, requires_grad=False, device=args.device).view(-1) # [0,0,0, 1,1,1, ..., 9,9,9]
if args.texture:
image_syn = torch.randn(size=(num_classes * args.ipc, channel, im_size[0]*args.canvas_size, im_size[1]*args.canvas_size), dtype=torch.float)
else:
image_syn = torch.randn(size=(num_classes * args.ipc, channel, im_size[0], im_size[1]), dtype=torch.float)
syn_lr = torch.tensor(args.lr_teacher).to(args.device)
if args.pix_init == 'real':
print('initialize synthetic data from random real images')
for c in range(num_classes):
image_syn.data[c * args.ipc:(c + 1) * args.ipc] = get_images(c, args.ipc).detach().data
else:
print('initialize synthetic data from random noise')
''' training '''
image_syn = image_syn.detach().to(args.device).requires_grad_(True)
print(image_syn.shape)
syn_lr = syn_lr.detach().to(args.device).requires_grad_(True)
optimizer_img = torch.optim.SGD([image_syn], lr=args.lr_img, momentum=0.5)
optimizer_lr = torch.optim.SGD([syn_lr], lr=args.lr_lr, momentum=0.5)
optimizer_img.zero_grad()
optimizer_lr.zero_grad()
criterion = nn.CrossEntropyLoss().to(args.device)
print('%s training begins'%get_time())
expert_dir = os.path.join(args.buffer_path, args.dataset)
if args.dataset in ["CIFAR10", "CIFAR100"] and not args.zca:
expert_dir += "_NO_ZCA"
expert_dir = os.path.join(expert_dir, args.model)
print("Expert Dir: {}".format(expert_dir))
if not args.random_trajectory:
if args.load_all:
buffer = []
n = 0
while os.path.exists(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n))):
buffer = buffer + torch.load(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n)))
n += 1
if n == 0:
raise AssertionError("No buffers detected at {}".format(expert_dir))
else:
expert_files = []
n = 0
while os.path.exists(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n))):
expert_files.append(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n)))
n += 1
if n == 0:
raise AssertionError("No buffers detected at {}".format(expert_dir))
file_idx = 0
expert_idx = 0
random.shuffle(expert_files)
if args.max_files is not None:
expert_files = expert_files[:args.max_files]
print("loading file {}".format(expert_files[file_idx]))
buffer = torch.load(expert_files[file_idx])
if args.max_experts is not None:
buffer = buffer[:args.max_experts]
random.shuffle(buffer)
best_acc = {m: 0 for m in model_eval_pool}
best_std = {m: 0 for m in model_eval_pool}
for it in range(0, args.Iteration+1):
save_this_it = False
# writer.add_scalar('Progress', it, it)
wandb.log({"Progress": it}, step=it)
''' Evaluate synthetic data '''
if it in eval_it_pool and args.eval_it > 0:
for model_eval in model_eval_pool:
print('-------------------------\nEvaluation\nmodel_train = %s, model_eval = %s, iteration = %d'%(args.model, model_eval, it))
if args.dsa:
print('DSA augmentation strategy: \n', args.dsa_strategy)
print('DSA augmentation parameters: \n', args.dsa_param.__dict__)
else:
print('DC augmentation parameters: \n', args.dc_aug_param)
accs_test = []
accs_train = []
for it_eval in range(args.num_eval): | net_eval = get_network(model_eval, channel, num_classes, im_size).to(args.device) # get a random model | 2 | 2023-10-17 23:11:36+00:00 | 12k |
upiterbarg/hihack | models/utils.py | [
{
"identifier": "CDGPT5",
"path": "models/cdgpt5.py",
"snippet": "class CDGPT5(nn.Module):\n def __init__(self, shape, action_space, flags, device):\n super(CDGPT5, self).__init__()\n\n self.flags = flags\n self.num_actions = len(action_space)\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n self.prev_actions_dim = 128 if self.use_prev_action else 0\n\n if flags.cdgpt5_xxl_policy:\n self.hidden_dim = 1024\n else:\n self.hidden_dim = 512\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim,\n ]\n )\n \n self.core = nn.LSTM(self.h_dim, self.hidden_dim, num_layers=1)\n\n if flags.cdgpt5_xxl_decoder:\n self.policy_hidden_dim = 1024\n self.policy = nn.Sequential(nn.Linear(self.hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.num_actions)\n )\n else:\n self.policy = nn.Linear(self.hidden_dim, self.num_actions)\n\n self.baseline = nn.Linear(self.hidden_dim, 1)\n self.version = 0\n self.inference_unroll_length = flags.unroll_length if not 'inference_unroll_length' in flags else flags.inference_unroll_length\n\n def initial_state(self, batch_size=1):\n return tuple(\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)\n for _ in range(2)\n )\n\n def forward(self, inputs, core_state=None):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n st = [\n self.topline_encoder(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.prev_actions_dim).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n\n core_input = st.view(T, B, -1)\n core_output_list = []\n notdone = (~inputs[\"done\"]).float()\n\n for input, nd in zip(core_input.unbind(), notdone.unbind()):\n # Reset core state to zero whenever an episode ended.\n # Make `done` broadcastable with (num_layers, B, hidden_size)\n nd = nd.view(1, -1, 1)\n core_state = tuple(nd * t for t in core_state)\n output, core_state = self.core(input.unsqueeze(0), core_state)\n core_output_list.append(output)\n\n core_output = torch.flatten(torch.cat(core_output_list), 0, 1)\n\n # -- [B' x A]\n policy_logits = self.policy(core_output)\n\n # -- [B' x 1]\n baseline = self.baseline(core_output)\n\n action = torch.multinomial(F.softmax(policy_logits + 1e-5, dim=1), num_samples=1)\n\n policy_logits = policy_logits.view(T, B, -1)\n baseline = baseline.view(T, B)\n action = action.view(T, B)\n version = torch.ones_like(action) * self.version\n\n output = dict(\n policy_logits=policy_logits,\n baseline=baseline,\n action=action,\n version=version,\n )\n\n return (output, core_state)"
},
{
"identifier": "CleavedHierarchicalPolicy",
"path": "models/cleaved_hierarchical_policy.py",
"snippet": "class CleavedHierarchicalPolicy(nn.Module):\n def __init__(self, \n flags,\n high_level_model, \n low_level_model):\n super(CleavedHierarchicalPolicy, self).__init__()\n self.high_level_model = high_level_model\n self.low_level_model = low_level_model\n self.num_strategies = self.high_level_model.num_strategies\n\n self.gumbel_softmax_tau = 1\n if 'gumbel_softmax_tau' in flags:\n self.gumbel_softmax_tau = flags.gumbel_softmax_tau\n\n self.disable_high_level_policy_gradients = flags.disable_high_level_policy_gradients\n self.disable_low_level_policy_gradients = flags.disable_low_level_policy_gradients\n self.version = 0\n self.eps_greedy = flags.eps_greedy if 'eps_greedy' in flags else 1\n\n\n def initial_state(self, batch_size=1):\n high_level_core_state = self.high_level_model.initial_state(batch_size)\n low_level_core_state = self.low_level_model.initial_state(batch_size)\n return high_level_core_state + low_level_core_state\n\n def parameters(self):\n if self.disable_high_level_policy_gradients:\n return self.low_level_model.parameters()\n elif self.disable_low_level_policy_gradients:\n return self.high_level_model.parameters()\n return list(self.low_level_model.parameters()) + list(self.high_level_model.parameters())\n\n def buffers(self):\n if self.disable_high_level_policy_gradients:\n return self.low_level_model.buffers()\n elif self.disable_low_level_policy_gradients:\n return self.high_level_model.buffers()\n return list(self.low_level_model.buffers()) + list(self.high_level_model.buffers())\n\n def forward(self, inputs, core_state, last_ttyrec_data=None):\n high_level_core_state, low_level_core_state = core_state[:2], core_state[2:]\n\n if not last_ttyrec_data is None:\n low_level_out, low_level_core_state = self.low_level_model(inputs, low_level_core_state, return_strategywise_logits=True, last_ttyrec_data=last_ttyrec_data)\n else:\n low_level_out, low_level_core_state = self.low_level_model(inputs, low_level_core_state, return_strategywise_logits=True)\n high_level_out, high_level_core_state = self.high_level_model(inputs, high_level_core_state)\n\n policy_logits = low_level_out['strategywise_policy_logits']\n strategy_logits = high_level_out['strategy_logits']\n\n if isinstance(self.low_level_model, HierarchicalTransformerLSTM):\n strategy_logits = torch.cat([strategy_logits[..., -1].unsqueeze(-1), strategy_logits[..., :-1]], axis=-1)\n\n T, B, _ = strategy_logits.shape\n\n sample = True\n\n if self.eps_greedy < 1:\n sample = bool(np.random.binomial(1, self.eps_greedy))\n\n if sample:\n strategies = F.gumbel_softmax(strategy_logits.reshape(T * B, -1), tau=self.gumbel_softmax_tau, hard=True).bool().unsqueeze(-1).expand((-1, -1, policy_logits.shape[-1]))\n sdim = strategy_logits.size(-1)\n out_policy_logits = torch.sum(torch.mul(policy_logits[:sdim], torch.swapaxes(strategies, 0, 1)), axis=0).view(T, B, -1)\n else:\n strategies = torch.argmax(strategy_logits.reshape(T * B, -1), axis=-1)\n out_policy_logits = policy_logits[strategies, torch.arange(strategies.size(0))].view(T, B, -1)\n\n\n out_action = torch.multinomial(F.softmax(out_policy_logits.reshape(T * B, -1), dim=1), num_samples=1).long().view(T, B)\n\n version = torch.ones_like(out_action) * self.version\n\n if self.disable_high_level_policy_gradients:\n baseline = low_level_out['baseline']\n else:\n baseline = high_level_out['baseline']\n\n output = dict(\n policy_logits=out_policy_logits,\n baseline=baseline,\n action=out_action,\n version=version,\n strategy_logits=strategy_logits.view(T, B, -1),\n all_policy_logits=torch.swapaxes(torch.swapaxes(policy_logits, 0, 1), 1, 2),\n )\n\n core_state = high_level_core_state + low_level_core_state\n return (output, core_state)"
},
{
"identifier": "FlatTransformer",
"path": "models/flat_transformer.py",
"snippet": "class FlatTransformer(nn.Module):\n def __init__(self, shape, action_space, flags, device):\n super(FlatTransformer, self).__init__()\n \n self.flags = flags\n self.num_actions = len(action_space)\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n self.prev_actions_dim = 128 if self.use_prev_action else 0\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim,\n ]\n )\n\n self.num_attention_heads = flags.num_attention_heads \n self.num_transformer_encoder_layers = flags.num_transformer_layers\n core_layer = nn.TransformerEncoderLayer(d_model=self.h_dim, nhead=self.num_attention_heads)\n self.core = nn.TransformerEncoder(core_layer, num_layers=self.num_transformer_encoder_layers)\n self.positional_encoder = PositionalEncoding(self.h_dim)\n\n self.policy_hidden_dim = 1024\n self.policy = nn.Sequential(nn.Linear(self.h_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.num_actions)\n )\n self.baseline = nn.Linear(self.h_dim, 1)\n\n self.version = 0\n self.inference_unroll_length = 1\n\n def initial_state(self, batch_size=1):\n return (\n torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length),\n torch.rand(self.inference_unroll_length, batch_size, self.h_dim)\n )\n\n def forward(self, inputs, core_state=None):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n st = [\n self.topline_encoder(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.prev_actions_dim).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n\n core_input = st.reshape(T, B, -1)\n notdone = (~inputs[\"done\"]).float()\n if not self.training:\n prev_mask, prev_encodings = core_state\n prev_mask = prev_mask.squeeze(0)\n core_input = torch.cat([prev_encodings[1:], core_input], axis=0)\n core_mask = torch.stack(\n [torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(core_input.device)], axis=1) for i in range(B)]\n )\n core_mask[:, -1, -1] = 1\n core_state = (core_mask.detach().clone().unsqueeze(0), \n core_input.detach().clone()\n )\n for i in range(B):\n core_mask[i].fill_diagonal_(1)\n core_mask = (core_mask.float().masked_fill(core_mask == 0, float(\"-inf\")).masked_fill(core_mask == 1, float(0.0))).to(device=core_input.device)\n\n core_mask = torch.repeat_interleave(core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)\n T = core_input.shape[0]\n else:\n core_mask = generate_square_subsequent_mask(T, core_input.device)\n\n core_input = self.positional_encoder(core_input)\n core_output = self.core(core_input, core_mask)\n core_output = torch.flatten(core_output, 0, 1)\n\n # -- [B' x A]\n policy_logits = self.policy(core_output)\n\n # -- [B' x 1]\n baseline = self.baseline(core_output)\n\n action = torch.multinomial(F.softmax(policy_logits + 1e-5, dim=1), num_samples=1)\n\n policy_logits = policy_logits.view(T, B, -1)\n baseline = baseline.view(T, B)\n action = action.view(T, B)\n version = torch.ones_like(action) * self.version\n\n\n if not self.training:\n action = action[-1].unsqueeze(0)\n baseline = baseline[-1].unsqueeze(0)\n policy_logits = policy_logits[-1].unsqueeze(0)\n version = version[-1].unsqueeze(0)\n\n output = dict(\n policy_logits=policy_logits,\n baseline=baseline,\n action=action,\n version=version,\n )\n \n return (output, core_state)"
},
{
"identifier": "HierarchicalLSTM",
"path": "models/hierarchical_lstm.py",
"snippet": "class HierarchicalLSTM(nn.Module):\n def __init__(self, shape, action_space, flags, device, num_strategies=13):\n super(HierarchicalLSTM, self).__init__()\n\n self.flags = flags\n self.num_actions = len(action_space)\n self.num_strategies = num_strategies\n\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n self.prev_actions_dim = self.num_actions if self.use_prev_action else 0\n\n self.strategy_dim = self.num_strategies\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim,\n ]\n )\n\n self.policy_hidden_dim = 256\n self.strategy_hidden_dim = 128\n self.hidden_dim = 512\n\n self.strategy_encoder = nn.Linear(self.hidden_dim, self.num_strategies)\n\n self.core = nn.LSTM(self.h_dim, self.hidden_dim, num_layers=1)\n\n self.policies = nn.ModuleDict(\n [[f'{i}', nn.Sequential(nn.Linear(self.hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.num_actions))] for i in range(self.num_strategies)]\n )\n\n self.baseline = nn.Linear(self.hidden_dim, 1)\n self.version = 0\n self.action_masks = {}\n\n self.gumbel_softmax_tau = 1\n if 'gumbel_softmax_tau' in flags:\n self.gumbel_softmax_tau = flags.gumbel_softmax_tau\n\n def initial_state(self, batch_size=1):\n return tuple(\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)\n for _ in range(2)\n )\n\n def forward(self, inputs, core_state, last_ttyrec_data=None, return_strategywise_logits=False):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n st = [\n self.topline_encoder( topline.float(memory_format=torch.contiguous_format).view(T * B, -1)),\n self.bottomline_encoder(bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)),\n self.screen_encoder(inputs[\"screen_image\"].float(memory_format=torch.contiguous_format).view(T * B, C, H, W)),\n ]\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.num_actions).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n\n core_input = st.view(T, B, -1)\n core_output_list = []\n notdone = (~inputs[\"done\"]).float()\n\n for input, nd in zip(core_input.unbind(), notdone.unbind()):\n # Reset core state to zero whenever an episode ended.\n # Make `done` broadcastable with (num_layers, B, hidden_size)\n nd = nd.view(1, -1, 1)\n core_state = tuple(nd * t for t in core_state)\n output, core_state = self.core(input.unsqueeze(0), core_state)\n core_output_list.append(output)\n\n core_output = torch.flatten(torch.cat(core_output_list), 0, 1)\n strategy_logits = self.strategy_encoder(core_output).view(T * B, -1)\n\n all_policy_logits = torch.stack([self.policies[str(i)](core_output) for i in range(self.num_strategies)], axis=0)\n strategies = F.gumbel_softmax(strategy_logits, tau=self.gumbel_softmax_tau, hard=True).bool().unsqueeze(-1).expand((-1, -1, all_policy_logits.shape[-1]))\n out_policy_logits = torch.sum(torch.mul(all_policy_logits, torch.swapaxes(strategies, 0, 1)), axis=0).view(T, B, -1)\n out_action = torch.multinomial(F.softmax(out_policy_logits.reshape(T * B, -1), dim=1), num_samples=1).long().view(T, B)\n\n\n # -- [B' x 1]\n baseline = self.baseline(core_output)\n baseline = baseline.view(T, B)\n strategy_logits = strategy_logits.view(T, B, -1)\n\n version = torch.ones_like(out_action) * self.version\n\n output = dict(\n policy_logits=out_policy_logits,\n all_policy_logits=torch.swapaxes(torch.swapaxes(all_policy_logits, 0, 1), 1, 2),\n baseline=baseline,\n action=out_action,\n version=version,\n strategy_logits=strategy_logits,\n )\n\n if return_strategywise_logits:\n output['strategywise_policy_logits'] = all_policy_logits\n\n return (output, core_state)"
},
{
"identifier": "HierarchicalTransformerLSTM",
"path": "models/hierarchical_transformer_lstm.py",
"snippet": "class HierarchicalTransformerLSTM(nn.Module):\n def __init__(self, shape, action_space, flags, device, num_strategies=20):\n super(HierarchicalTransformerLSTM, self).__init__()\n\n self.flags = flags\n self.num_actions = len(action_space)\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n ## second copy of encoders\n self.topline_encoder2 = TopLineEncoder()\n self.bottomline_encoder2 = torch.jit.script(BottomLinesEncoder())\n self.screen_encoder2 = torch.jit.script(ScreenEncoder(screen_shape))\n ###\n\n self.prev_actions_dim = 128 if self.use_prev_action else 0\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim\n ]\n )\n\n self.hidden_dim = 512\n self.policy_hidden_dim = 256\n self.strategy_dim = num_strategies\n \n self.core = nn.LSTM(self.h_dim, self.hidden_dim, num_layers=1)\n self.num_attention_heads = flags.num_attention_heads\n self.num_transformer_encoder_layers = flags.num_transformer_layers\n \n self.hidden_dim = self.h_dim + self.hidden_dim\n core_trnsfrmr_layer = nn.TransformerEncoderLayer(d_model=self.hidden_dim, nhead=self.num_attention_heads, norm_first=True, activation='gelu')\n self.core_trnsfrmr = nn.TransformerEncoder(core_trnsfrmr_layer, num_layers=self.num_transformer_encoder_layers)\n self.positional_encoder = PositionalEncoding(self.hidden_dim)\n\n self.strategy_encoder = nn.Linear(self.hidden_dim, self.strategy_dim)\n\n self.policies = nn.ModuleDict(\n [[f'{i}', nn.Sequential(nn.Linear(self.hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.num_actions))] for i in range(self.strategy_dim)]\n )\n\n self.baseline = nn.Linear(self.hidden_dim, 1)\n self.version = 0\n self.inference_unroll_length = flags.unroll_length if not 'inference_unroll_length' in flags else flags.inference_unroll_length\n\n self.wrapped = False\n\n def initial_state(self, batch_size=1):\n return (\n torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length), # transformer portion 0\n torch.rand(self.inference_unroll_length, batch_size, self.hidden_dim), # transformer portion 1\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size), # lstm portion 0\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) # lstm portion 1\n \n \n )\n\n def get_encodings(self, inputs, for_lstm=False):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n if for_lstm or not hasattr(self, 'topline_encoder2'):\n st = [\n self.topline_encoder(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n else:\n st = [\n self.topline_encoder2(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder2(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder2(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.prev_actions_dim).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n return st\n\n\n\n def forward(self, inputs, core_state=None, last_ttyrec_data=None, return_strategywise_logits=False):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n st_lstm = self.get_encodings(inputs, for_lstm=True)\n st_trnsfrmr = self.get_encodings(inputs, for_lstm=False)\n\n T_eff = T\n\n if not last_ttyrec_data is None and self.training:\n last_st_lstm = self.get_encodings(last_ttyrec_data, for_lstm=True)\n last_st_trnsfrmr = self.get_encodings(last_ttyrec_data, for_lstm=False)\n T_eff = T * 2 \n st_lstm = torch.cat([last_st_lstm.reshape(T, B, -1), st_lstm.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)\n st_trnsfrmr = torch.cat([last_st_trnsfrmr.reshape(T, B, -1), st_trnsfrmr.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)\n self.wrapped = True\n\n c0, c1, c2, c3 = core_state\n trnsfrmr_core_state = c0, c1\n lstm_core_state = c2, c3\n\n lstm_core_input = st_lstm.view(T_eff, B, -1)\n lstm_core_output_list = []\n \n if self.wrapped:\n notdone = torch.cat([(~last_ttyrec_data[\"done\"]).float(), (~inputs[\"done\"]).float()], axis=0)\n else:\n notdone = (~inputs[\"done\"]).float()\n\n for input, nd in zip(lstm_core_input.unbind(), notdone.unbind()):\n # Reset core state to zero whenever an episode ended.\n # Make `done` broadcastable with (num_layers, B, hidden_size)\n nd = nd.view(1, -1, 1)\n lstm_core_state = tuple(nd * t for t in lstm_core_state)\n output, lstm_core_state = self.core(input.unsqueeze(0), lstm_core_state)\n lstm_core_output_list.append(output)\n\n lstm_core_output = torch.flatten(torch.cat(lstm_core_output_list), 0, 1)\n\n st = torch.cat([st_trnsfrmr, lstm_core_output], dim=1)\n\n trnsfrmr_core_input = st.reshape(T_eff, B, -1)\n if not self.training:\n prev_mask, prev_encodings = trnsfrmr_core_state\n prev_mask = prev_mask.squeeze(0)\n trnsfrmr_core_input = torch.cat([prev_encodings[1:], trnsfrmr_core_input], axis=0)\n trnsfrmr_core_mask = torch.stack(\n [torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(trnsfrmr_core_input.device)], axis=1) for i in range(B)]\n )\n trnsfrmr_core_mask[:, -1, -1] = 1\n trnsfrmr_core_state = (trnsfrmr_core_mask.detach().clone().unsqueeze(0), \n trnsfrmr_core_input.detach().clone()\n )\n for i in range(B):\n trnsfrmr_core_mask[i].fill_diagonal_(1)\n trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float(\"-inf\")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)\n trnsfrmr_core_mask = torch.repeat_interleave(trnsfrmr_core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)\n T = trnsfrmr_core_input.shape[0]\n elif self.wrapped:\n mask1 = (torch.triu(torch.ones(T_eff, T_eff)) == 1).transpose(0, 1)\n mask2 = F.pad((torch.triu(torch.ones(T, T)) == 1).transpose(0, 1), (0, T, T, 0))\n trnsfrmr_core_mask = mask1.long() + mask2.long()\n trnsfrmr_core_mask[trnsfrmr_core_mask != 1] = 0\n trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float(\"-inf\")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)\n else:\n trnsfrmr_core_mask = generate_square_subsequent_mask(T, trnsfrmr_core_input.device)\n\n trnsfrmr_core_input = self.positional_encoder(trnsfrmr_core_input)\n trnsfrmr_core_output = self.core_trnsfrmr(trnsfrmr_core_input, trnsfrmr_core_mask)\n\n trnsfrmr_core_output = torch.flatten(trnsfrmr_core_output, 0, 1)\n\n if self.wrapped:\n strategy_logits = self.strategy_encoder(trnsfrmr_core_output).view(2 * T * B, -1)\n else:\n strategy_logits = self.strategy_encoder(trnsfrmr_core_output).view(T * B, -1)\n\n\n all_policy_logits = torch.stack([self.policies[str(i)](trnsfrmr_core_output) for i in range(self.strategy_dim)], axis=0)\n\n\n # -- [B' x 1]\n baseline = self.baseline(trnsfrmr_core_output)\n\n strategy_sample = F.gumbel_softmax(strategy_logits, tau=1.0, hard=True)\n strategies = strategy_sample.bool().unsqueeze(-1).expand((-1, -1, all_policy_logits.shape[-1]))\n\n out_policy_logits = torch.sum(torch.mul(all_policy_logits, torch.swapaxes(strategies, 0, 1)), axis=0)\n action = torch.multinomial(F.softmax(out_policy_logits.reshape(T * B, -1), dim=1), num_samples=1).long()\n\n if self.wrapped:\n out_policy_logits = out_policy_logits.view(2*T, B, -1)[-T:].view(T * B, -1)\n baseline = baseline.view(2*T, B, -1)[-T:].view(T * B, -1)\n strategy_logits = strategy_logits.view(2 * T, B, -1)[-T:].view(T * B, -1)\n all_policy_logits = all_policy_logits.view(self.strategy_dim, 2 * T, B, -1)[:, -T:].view(self.strategy_dim, T * B, -1)\n\n out_policy_logits = out_policy_logits.view(T, B, -1)\n baseline = baseline.view(T, B)\n action = action.view(T, B)\n strategy_logits = strategy_logits.view(T, B, -1)\n strategy = torch.argmax(strategy_logits, axis=-1).long()\n version = torch.ones_like(action) * self.version\n\n\n if not self.training:\n action = action[-1].unsqueeze(0)\n baseline = baseline[-1].unsqueeze(0)\n out_policy_logits = out_policy_logits[-1].unsqueeze(0)\n version = version[-1].unsqueeze(0)\n strategy_logits = strategy_logits[-1].unsqueeze(0)\n strategy = strategy[-1].unsqueeze(0)\n\n output = dict(\n policy_logits=out_policy_logits,\n baseline=baseline,\n action=action,\n strategy=strategy,\n version=version,\n strategy_logits=strategy_logits\n )\n\n if return_strategywise_logits:\n output['strategywise_policy_logits'] = all_policy_logits\n\n c0, c1 = trnsfrmr_core_state\n c2, c3 = lstm_core_state\n\n core_state = (c0, c1, c2, c3)\n\n self.wrapped = False\n return (output, core_state)"
},
{
"identifier": "TransformerLSTM",
"path": "models/transformer_lstm.py",
"snippet": "class TransformerLSTM(nn.Module):\n def __init__(self, shape, action_space, flags, device):\n super(TransformerLSTM, self).__init__()\n\n self.flags = flags\n self.num_actions = len(action_space)\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n ## second copy of encoders\n self.topline_encoder2 = TopLineEncoder()\n self.bottomline_encoder2 = torch.jit.script(BottomLinesEncoder())\n self.screen_encoder2 = torch.jit.script(ScreenEncoder(screen_shape))\n ###\n\n self.prev_actions_dim = 128 if self.use_prev_action else 0\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim,\n ]\n )\n\n self.hidden_dim = 512\n \n self.core = nn.LSTM(self.h_dim, self.hidden_dim, num_layers=1)\n\n self.num_attention_heads = flags.num_attention_heads\n self.num_transformer_encoder_layers = flags.num_transformer_layers\n self.hidden_dim = self.h_dim + self.hidden_dim\n core_trnsfrmr_layer = nn.TransformerEncoderLayer(d_model=self.hidden_dim, nhead=self.num_attention_heads, norm_first=True, activation='gelu')\n self.core_trnsfrmr = nn.TransformerEncoder(core_trnsfrmr_layer, num_layers=self.num_transformer_encoder_layers) # test round 1 uses 4 layers\n self.positional_encoder = PositionalEncoding(self.hidden_dim)\n \n self.policy = nn.Linear(self.hidden_dim, self.num_actions)\n\n self.baseline = nn.Linear(self.hidden_dim, 1)\n self.version = 0\n self.inference_unroll_length = flags.unroll_length if not 'inference_unroll_length' in flags else flags.inference_unroll_length\n\n self.wrapped = False\n\n def initial_state(self, batch_size=1):\n return (\n torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length), # transformer portion 0\n torch.rand(self.inference_unroll_length, batch_size, self.hidden_dim), # transformer portion 1\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size), # lstm portion 0\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) # lstm portion 1\n \n \n )\n\n def get_encodings(self, inputs, for_lstm=False):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n if for_lstm or not hasattr(self, 'topline_encoder2'):\n st = [\n self.topline_encoder(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n else:\n st = [\n self.topline_encoder2(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder2(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder2(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.prev_actions_dim).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n return st\n\n\n\n def forward(self, inputs, core_state=None, last_ttyrec_data=None):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n st_lstm = self.get_encodings(inputs, for_lstm=True)\n st_trnsfrmr = self.get_encodings(inputs, for_lstm=False)\n\n T_eff = T\n\n if not last_ttyrec_data is None and self.training:\n last_st_lstm = self.get_encodings(last_ttyrec_data, for_lstm=True)\n last_st_trnsfrmr = self.get_encodings(last_ttyrec_data, for_lstm=False)\n T_eff = T * 2 \n st_lstm = torch.cat([last_st_lstm.reshape(T, B, -1), st_lstm.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)\n st_trnsfrmr = torch.cat([last_st_trnsfrmr.reshape(T, B, -1), st_trnsfrmr.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)\n self.wrapped = True\n\n c0, c1, c2, c3 = core_state\n trnsfrmr_core_state = c0, c1\n lstm_core_state = c2, c3\n\n lstm_core_input = st_lstm.view(T_eff, B, -1)\n lstm_core_output_list = []\n \n if self.wrapped:\n notdone = torch.cat([(~last_ttyrec_data[\"done\"]).float(), (~inputs[\"done\"]).float()], axis=0)\n else:\n notdone = (~inputs[\"done\"]).float()\n\n notdone_mask = torch.ones((T_eff, T_eff)).repeat(B, 1, 1).to(lstm_core_input.device)\n\n i = 0\n for input, nd in zip(lstm_core_input.unbind(), notdone.unbind()):\n # Reset core state to zero whenever an episode ended.\n # Make `done` broadcastable with (num_layers, B, hidden_size)\n nd = nd.view(1, -1, 1)\n lstm_core_state = tuple(nd * t for t in lstm_core_state)\n output, lstm_core_state = self.core(input.unsqueeze(0), lstm_core_state)\n lstm_core_output_list.append(output)\n\n if i < T_eff-1:\n nd = notdone[i].view(-1, 1, 1)\n notdone_mask[:, i+1:, :i+1] *= nd\n\n i += 1\n\n lstm_core_output = torch.flatten(torch.cat(lstm_core_output_list), 0, 1)\n\n st = torch.cat([st_trnsfrmr, lstm_core_output], dim=1)\n\n trnsfrmr_core_input = st.reshape(T_eff, B, -1)\n if not self.training:\n prev_mask, prev_encodings = trnsfrmr_core_state\n prev_mask = prev_mask.squeeze(0)\n trnsfrmr_core_input = torch.cat([prev_encodings[1:], trnsfrmr_core_input], axis=0)\n trnsfrmr_core_mask = torch.stack(\n [torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(trnsfrmr_core_input.device)], axis=1) for i in range(B)]\n )\n trnsfrmr_core_mask[:, -1, -1] = 1\n trnsfrmr_core_state = (trnsfrmr_core_mask.detach().clone().unsqueeze(0), \n trnsfrmr_core_input.detach().clone()\n )\n for i in range(B):\n trnsfrmr_core_mask[i].fill_diagonal_(1)\n trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float(\"-inf\")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)\n trnsfrmr_core_mask = torch.repeat_interleave(trnsfrmr_core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)\n T = trnsfrmr_core_input.shape[0]\n elif self.wrapped: \n mask1 = (torch.triu(torch.ones(T_eff, T_eff)) == 1).transpose(0, 1)\n mask2 = F.pad((torch.triu(torch.ones(T, T)) == 1).transpose(0, 1), (0, T, T, 0))\n trnsfrmr_core_mask = mask1.long() + mask2.long()\n trnsfrmr_core_mask[trnsfrmr_core_mask != 1] = 0\n trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float(\"-inf\")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)\n else:\n trnsfrmr_core_mask = generate_square_subsequent_mask(T, trnsfrmr_core_input.device)\n\n\n trnsfrmr_core_input = self.positional_encoder(trnsfrmr_core_input)\n trnsfrmr_core_output = self.core_trnsfrmr(trnsfrmr_core_input, trnsfrmr_core_mask)\n trnsfrmr_core_output = torch.flatten(trnsfrmr_core_output, 0, 1)\n\n # -- [B' x A]\n policy_logits = self.policy(trnsfrmr_core_output)\n\n # -- [B' x 1]\n baseline = self.baseline(trnsfrmr_core_output)\n\n if self.wrapped:\n policy_logits = policy_logits.view(2*T, B, -1)[-T:].view(T * B, -1)\n baseline = baseline.view(2*T, B, -1)[-T:].view(T * B, -1)\n\n action = torch.multinomial(F.softmax(policy_logits + 1e-5, dim=1), num_samples=1)\n\n policy_logits = policy_logits.view(T, B, -1)\n baseline = baseline.view(T, B)\n action = action.view(T, B)\n version = torch.ones_like(action) * self.version\n\n\n if not self.training:\n action = action[-1].unsqueeze(0)\n baseline = baseline[-1].unsqueeze(0)\n policy_logits = policy_logits[-1].unsqueeze(0)\n version = version[-1].unsqueeze(0)\n\n output = dict(\n policy_logits=policy_logits,\n baseline=baseline,\n action=action,\n version=version,\n )\n\n c0, c1 = trnsfrmr_core_state\n c2, c3 = lstm_core_state\n\n core_state = (c0, c1, c2, c3)\n\n self.wrapped = False\n return (output, core_state)"
}
] | import omegaconf
import os
import pathlib
import pdb
import sys
import torch
from .cdgpt5 import CDGPT5
from .cleaved_hierarchical_policy import CleavedHierarchicalPolicy
from .flat_transformer import FlatTransformer
from .hierarchical_lstm import HierarchicalLSTM
from .hierarchical_transformer_lstm import HierarchicalTransformerLSTM
from .transformer_lstm import TransformerLSTM
from nle.env.base import DUNGEON_SHAPE
from omegaconf import OmegaConf
from tasks import ENVS | 10,738 |
base_path = str(pathlib.Path().resolve())
hihack_path = os.path.join(base_path[:base_path.find('hihack')], 'hihack')
sys.path.insert(0, os.path.join(hihack_path, 'dungeonsdata-neurips2022/experiment_code/hackrl'))
MODELS = [
CDGPT5,
HierarchicalLSTM,
|
base_path = str(pathlib.Path().resolve())
hihack_path = os.path.join(base_path[:base_path.find('hihack')], 'hihack')
sys.path.insert(0, os.path.join(hihack_path, 'dungeonsdata-neurips2022/experiment_code/hackrl'))
MODELS = [
CDGPT5,
HierarchicalLSTM, | HierarchicalTransformerLSTM, | 4 | 2023-10-23 15:44:32+00:00 | 12k |
avilliai/Bert_Vits2_Sever | train_ms.py | [
{
"identifier": "TextAudioSpeakerLoader",
"path": "data_utils.py",
"snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(hparams, \"use_mel_posterior_encoder\", False)\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 300)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n for _id, spk, language, text, phones, tone, word2ph in self.audiopaths_sid_text:\n audiopath = f'{_id}'\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append([audiopath, spk, language, text, phones, tone, word2ph])\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n print(\"skipped: \", skipped, \", total: \", len(self.audiopaths_sid_text))\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language, audiopath)\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n return (phones, spec, wav, sid, tone, language, bert)\n\n def get_audio(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\"{} {} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate))\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(audio_norm, self.filter_length,\n self.n_mel_channels, self.sampling_rate, self.hop_length,\n self.win_length, self.hparams.mel_fmin, self.hparams.mel_fmax, center=False)\n else:\n spec = spectrogram_torch(audio_norm, self.filter_length,\n self.sampling_rate, self.hop_length, self.win_length,\n center=False)\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n pold = phone\n w2pho = [i for i in word2ph]\n word2ph = [i for i in word2ph]\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n pold2 = phone\n\n if self.add_blank:\n p1 = len(phone)\n phone = commons.intersperse(phone, 0)\n p2 = len(phone)\n t1 = len(tone)\n tone = commons.intersperse(tone, 0)\n t2 = len(tone)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert = torch.load(bert_path)\n assert bert.shape[-1] == len(phone)\n except:\n bert = get_bert(text, word2ph, language_str)\n torch.save(bert, bert_path)\n #print(bert.shape[-1], bert_path, text, pold)\n assert bert.shape[-1] == len(phone)\n\n assert bert.shape[-1] == len(phone), (\n bert.shape, len(phone), sum(word2ph), p1, p2, t1, t2, pold, pold2, word2ph, text, w2pho)\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)"
},
{
"identifier": "TextAudioSpeakerCollate",
"path": "data_utils.py",
"snippet": "class TextAudioSpeakerCollate():\n \"\"\" Zero-pads model inputs and targets\n \"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]),\n dim=0, descending=True)\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, :text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, :spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, :wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, :tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, :language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, :bert.size(1)] = bert\n\n return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, tone_padded, language_padded, bert_padded"
},
{
"identifier": "DistributedBucketSampler",
"path": "data_utils.py",
"snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if (len_bucket == 0):\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]\n\n # subsample\n ids_bucket = ids_bucket[self.rank::self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size"
},
{
"identifier": "SynthesizerTrn",
"path": "models.py",
"snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer = 4,\n n_layers_trans_flow = 3,\n flow_share_parameter = False,\n use_transformer_flow = True,\n **kwargs):\n\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\"use_spk_conditioned_encoder\", True)\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels)\n self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,\n upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)\n self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,\n gin_channels=gin_channels)\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads, n_layers_trans_flow, 5, p_dropout, n_flow_layer, gin_channels=gin_channels,share_parameter= flow_share_parameter)\n else:\n self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer, gin_channels=gin_channels)\n self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)\n self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)\n \n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]\n neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2),\n s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = torch.std(neg_cent) * torch.randn_like(neg_cent) * self.current_mas_noise_scale\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n \n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)\n o = self.dec(z_slice, g=g)\n return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (x, logw, logw_)\n \n def infer(self, x, x_lengths, sid, tone, language, bert, noise_scale=.667, length_scale=1, noise_scale_w=0.8, max_len=None, sdp_ratio=0,y=None):\n #x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,\n 2) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "MultiPeriodDiscriminator",
"path": "models.py",
"snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "DurationDiscriminator",
"path": "models.py",
"snippet": "class DurationDiscriminator(nn.Module): #vits2\n def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(2*filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(\n nn.Linear(filter_channels, 1), \n nn.Sigmoid() \n )\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs"
},
{
"identifier": "generator_loss",
"path": "losses.py",
"snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1-dg)**2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses"
},
{
"identifier": "discriminator_loss",
"path": "losses.py",
"snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1-dr)**2)\n g_loss = torch.mean(dg**2)\n loss += (r_loss + g_loss)\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses"
},
{
"identifier": "feature_loss",
"path": "losses.py",
"snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2 "
},
{
"identifier": "kl_loss",
"path": "losses.py",
"snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l"
},
{
"identifier": "mel_spectrogram_torch",
"path": "mel_processing.py",
"snippet": "def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):\n if torch.min(y) < -1.:\n print('min value is ', torch.min(y))\n if torch.max(y) > 1.:\n print('max value is ', torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + '_' + str(y.device)\n fmax_dtype_device = str(fmax) + '_' + dtype_device\n wnsize_dtype_device = str(win_size) + '_' + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)\n\n y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')\n y = y.squeeze(1)\n\n spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],\n center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec"
},
{
"identifier": "spec_to_mel_torch",
"path": "mel_processing.py",
"snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + '_' + str(spec.device)\n fmax_dtype_device = str(fmax) + '_' + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec"
},
{
"identifier": "symbols",
"path": "text/symbols.py",
"snippet": ""
}
] | import os
import json
import argparse
import itertools
import math
import torch
import shutil
import torch.multiprocessing as mp
import torch.distributed as dist
import logging
import commons
import utils
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.cuda.amp import autocast, GradScaler
from tqdm import tqdm
from data_utils import (
TextAudioSpeakerLoader,
TextAudioSpeakerCollate,
DistributedBucketSampler
)
from models import (
SynthesizerTrn,
MultiPeriodDiscriminator,
DurationDiscriminator,
)
from losses import (
generator_loss,
discriminator_loss,
feature_loss,
kl_loss
)
from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
from text.symbols import symbols | 10,618 | _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d,
optim_d, skip_optimizer=not hps.cont)
epoch_str = max(epoch_str, 1)
global_step = (epoch_str - 1) * len(train_loader)
except Exception as e:
print(e)
epoch_str = 1
global_step = 0
else:
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g,
optim_g, True)
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d,
optim_d, True)
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
if net_dur_disc is not None:
scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
else:
scheduler_dur_disc = None
scaler = GradScaler(enabled=hps.train.fp16_run)
for epoch in range(epoch_str, hps.train.epochs + 1):
if rank == 0:
train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
else:
train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None)
scheduler_g.step()
scheduler_d.step()
if net_dur_disc is not None:
scheduler_dur_disc.step()
def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
net_g, net_d, net_dur_disc = nets
optim_g, optim_d, optim_dur_disc = optims
scheduler_g, scheduler_d, scheduler_dur_disc = schedulers
train_loader, eval_loader = loaders
if writers is not None:
writer, writer_eval = writers
train_loader.batch_sampler.set_epoch(epoch)
global global_step
net_g.train()
net_d.train()
if net_dur_disc is not None:
net_dur_disc.train()
for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)):
if net_g.module.use_noise_scaled_mas:
current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step
net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0)
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
speakers = speakers.cuda(rank, non_blocking=True)
tone = tone.cuda(rank, non_blocking=True)
language = language.cuda(rank, non_blocking=True)
bert = bert.cuda(rank, non_blocking=True)
with autocast(enabled=hps.train.fp16_run):
y_hat, l_length, attn, ids_slice, x_mask, z_mask, \
(z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert)
mel = spec_to_mel_torch(
spec,
hps.data.filter_length,
hps.data.n_mel_channels,
hps.data.sampling_rate,
hps.data.mel_fmin,
hps.data.mel_fmax)
y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
y_hat_mel = mel_spectrogram_torch(
y_hat.squeeze(1),
hps.data.filter_length,
hps.data.n_mel_channels,
hps.data.sampling_rate,
hps.data.hop_length,
hps.data.win_length,
hps.data.mel_fmin,
hps.data.mel_fmax
)
y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
# Discriminator
y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
with autocast(enabled=False):
loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
loss_disc_all = loss_disc
if net_dur_disc is not None:
y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach())
with autocast(enabled=False):
# TODO: I think need to mean using the mask, but for now, just mean all
loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g)
loss_dur_disc_all = loss_dur_disc
optim_dur_disc.zero_grad()
scaler.scale(loss_dur_disc_all).backward()
scaler.unscale_(optim_dur_disc)
grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None)
scaler.step(optim_dur_disc)
optim_d.zero_grad()
scaler.scale(loss_disc_all).backward()
scaler.unscale_(optim_d)
grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
scaler.step(optim_d)
with autocast(enabled=hps.train.fp16_run):
# Generator
y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
if net_dur_disc is not None:
y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_)
with autocast(enabled=False):
loss_dur = torch.sum(l_length.float())
loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
| logging.getLogger('numba').setLevel(logging.WARNING)
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
torch.set_float32_matmul_precision('medium')
global_step = 0
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available(), "CPU training is not allowed."
n_gpus = torch.cuda.device_count()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '65280'
hps = utils.get_hparams()
if not hps.cont:
shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth')
shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth')
shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth')
mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
def run(rank, n_gpus, hps):
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
train_sampler = DistributedBucketSampler(
train_dataset,
hps.train.batch_size,
[32, 300, 400, 500, 600, 700, 800, 900, 1000],
num_replicas=n_gpus,
rank=rank,
shuffle=True)
collate_fn = TextAudioSpeakerCollate()
train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True,
collate_fn=collate_fn, batch_sampler=train_sampler)
if rank == 0:
eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False,
batch_size=1, pin_memory=True,
drop_last=False, collate_fn=collate_fn)
if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True:
print("Using noise scaled MAS for VITS2")
use_noise_scaled_mas = True
mas_noise_scale_initial = 0.01
noise_scale_delta = 2e-6
else:
print("Using normal MAS for VITS1")
use_noise_scaled_mas = False
mas_noise_scale_initial = 0.0
noise_scale_delta = 0.0
if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True:
print("Using duration discriminator for VITS2")
use_duration_discriminator = True
net_dur_disc = DurationDiscriminator(
hps.model.hidden_channels,
hps.model.hidden_channels,
3,
0.1,
gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0,
).cuda(rank)
if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True:
if hps.data.n_speakers == 0:
raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model")
use_spk_conditioned_encoder = True
else:
print("Using normal encoder for VITS1")
use_spk_conditioned_encoder = False
net_g = SynthesizerTrn(
len(symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
mas_noise_scale_initial = mas_noise_scale_initial,
noise_scale_delta = noise_scale_delta,
**hps.model).cuda(rank)
freeze_enc = getattr(hps.model, "freeze_enc", False)
if freeze_enc:
print("freeze encoder !!!")
for param in net_g.enc_p.parameters():
param.requires_grad = False
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
optim_g = torch.optim.AdamW(
filter(lambda p: p.requires_grad, net_g.parameters()),
hps.train.learning_rate,
betas=hps.train.betas,
eps=hps.train.eps)
optim_d = torch.optim.AdamW(
net_d.parameters(),
hps.train.learning_rate,
betas=hps.train.betas,
eps=hps.train.eps)
if net_dur_disc is not None:
optim_dur_disc = torch.optim.AdamW(
net_dur_disc.parameters(),
hps.train.learning_rate,
betas=hps.train.betas,
eps=hps.train.eps)
else:
optim_dur_disc = None
net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
if net_dur_disc is not None:
net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True)
pretrain_dir = None
if pretrain_dir is None:
try:
if net_dur_disc is not None:
_, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont)
_, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g,
optim_g, skip_optimizer=not hps.cont)
_, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d,
optim_d, skip_optimizer=not hps.cont)
epoch_str = max(epoch_str, 1)
global_step = (epoch_str - 1) * len(train_loader)
except Exception as e:
print(e)
epoch_str = 1
global_step = 0
else:
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g,
optim_g, True)
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d,
optim_d, True)
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
if net_dur_disc is not None:
scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
else:
scheduler_dur_disc = None
scaler = GradScaler(enabled=hps.train.fp16_run)
for epoch in range(epoch_str, hps.train.epochs + 1):
if rank == 0:
train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
else:
train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None)
scheduler_g.step()
scheduler_d.step()
if net_dur_disc is not None:
scheduler_dur_disc.step()
def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
net_g, net_d, net_dur_disc = nets
optim_g, optim_d, optim_dur_disc = optims
scheduler_g, scheduler_d, scheduler_dur_disc = schedulers
train_loader, eval_loader = loaders
if writers is not None:
writer, writer_eval = writers
train_loader.batch_sampler.set_epoch(epoch)
global global_step
net_g.train()
net_d.train()
if net_dur_disc is not None:
net_dur_disc.train()
for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)):
if net_g.module.use_noise_scaled_mas:
current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step
net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0)
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
speakers = speakers.cuda(rank, non_blocking=True)
tone = tone.cuda(rank, non_blocking=True)
language = language.cuda(rank, non_blocking=True)
bert = bert.cuda(rank, non_blocking=True)
with autocast(enabled=hps.train.fp16_run):
y_hat, l_length, attn, ids_slice, x_mask, z_mask, \
(z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert)
mel = spec_to_mel_torch(
spec,
hps.data.filter_length,
hps.data.n_mel_channels,
hps.data.sampling_rate,
hps.data.mel_fmin,
hps.data.mel_fmax)
y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
y_hat_mel = mel_spectrogram_torch(
y_hat.squeeze(1),
hps.data.filter_length,
hps.data.n_mel_channels,
hps.data.sampling_rate,
hps.data.hop_length,
hps.data.win_length,
hps.data.mel_fmin,
hps.data.mel_fmax
)
y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
# Discriminator
y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
with autocast(enabled=False):
loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
loss_disc_all = loss_disc
if net_dur_disc is not None:
y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach())
with autocast(enabled=False):
# TODO: I think need to mean using the mask, but for now, just mean all
loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g)
loss_dur_disc_all = loss_dur_disc
optim_dur_disc.zero_grad()
scaler.scale(loss_dur_disc_all).backward()
scaler.unscale_(optim_dur_disc)
grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None)
scaler.step(optim_dur_disc)
optim_d.zero_grad()
scaler.scale(loss_disc_all).backward()
scaler.unscale_(optim_d)
grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
scaler.step(optim_d)
with autocast(enabled=hps.train.fp16_run):
# Generator
y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
if net_dur_disc is not None:
y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_)
with autocast(enabled=False):
loss_dur = torch.sum(l_length.float())
loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
| loss_fm = feature_loss(fmap_r, fmap_g) | 8 | 2023-10-23 08:24:12+00:00 | 12k |
t-ega/whatsapp-cloud-sdk | whatsapp_cloud_sdk/bot.py | [
{
"identifier": "CustomHTTPError",
"path": "whatsapp_cloud_sdk/_exceptions/http_error.py",
"snippet": "class CustomHTTPError(Exception):\n \"\"\"\n Represents a custom HTTP error.\n\n This exception class is used to raise custom HTTP errors with\n specific status codes and response text.\n It inherits from the base Exception class.\n\n Attributes:\n status_code (int): The HTTP status code associated with the error.\n response_text (str): The text or message associated with the error response.\n\n Methods:\n __init__(self, status_code, response_text):\n Initializes a new instance of the CustomHTTPError class.\n Args:\n status_code (int): The HTTP status code associated with the error.\n response_text (str): The text or message associated with the error response.\n Returns:\n None.\n \"\"\"\n\n def __init__(self, status_code, response_text):\n self.status_code = status_code\n self.response_text = response_text\n super().__init__(f\"HTTP Error {status_code}: {response_text}\")"
},
{
"identifier": "_BaseApi",
"path": "whatsapp_cloud_sdk/_base_api.py",
"snippet": "class _BaseApi:\n # pylint: disable=line-too-long\n\n \"\"\"\n Base class for interacting with the WhatsApp API.\n\n This class provides essential configuration and authentication parameters for making requests\n to the WhatsApp API. It is meant to be inherited by other classes that will implement\n specific bot functionality.\n\n Attributes:\n WA_URL (str): The base URL for WhatsApp API requests, including the API version\n and phone number ID.\n HEADERS (dict): HTTP headers for API requests, including \"Content-Type\" and \"Authorization\" with the\n Cloud API access token.\n \"\"\"\n\n __cloud_api_access_token = os.getenv(\"CLOUD_API_ACCESS_TOKEN\")\n __wa_phone_number_id = os.getenv(\"WA_PHONE_NUMBER_ID\")\n __version = os.getenv(\"WA_VERSION\")\n WA_URL = f\"https://graph.facebook.com/{__version}/{__wa_phone_number_id}/messages\"\n\n HEADERS = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {__cloud_api_access_token}\",\n }\n\n def __init__(\n self,\n cloud_api_access_token: str = None,\n wa_phone_number_id: str = None,\n version: str = \"v17.0\",\n ):\n \"\"\"\n Initialize the BaseApi instance.\n\n Args:\n cloud_api_access_token (str, optional): The Cloud API access token used for authentication,\n if not provided it is replaced with the one defined in the environment variables .\n wa_phone_number_id (str, optional): The WhatsApp phone number ID,\n if not provided it is replaced with the one defined in the environment variable.\n version (str, optional): The WhatsApp API version to use. Default is \"v17.0\",\n if not provided it is replaced with the one defined in the environment variable.\n\n Raises:\n RuntimeError: If neither `cloud_api_access_token` nor `wa_phone_number_id` is provided, and\n there are no corresponding environment variables set, a `RuntimeError` is raised.\n \"\"\"\n\n if not cloud_api_access_token:\n cloud_api_access_token = (self.__cloud_api_access_token,)\n\n if not wa_phone_number_id:\n wa_phone_number_id = (self.__wa_phone_number_id,)\n\n if not version:\n version = self.__version\n\n if not cloud_api_access_token or not wa_phone_number_id:\n raise RuntimeError(\n \"Either pass in your CLOUD_API_ACCESS_TOKEN or WA_PHONE_NUMBER_ID, \"\n \"Or place it in your env file\"\n )\n\n self.__cloud_api_access_token = cloud_api_access_token\n self.__wa_phone_number_id = wa_phone_number_id\n self.__version = version"
},
{
"identifier": "Contact",
"path": "whatsapp_cloud_sdk/_files/contact.py",
"snippet": "class Contact(File):\n \"\"\"\n Represents a contact.\n\n Args:\n name [Name]: The contact's name. This is a required field.\n addresses Optional[List[Address]]: A list of addresses.\n birthday Optional[str]: The contact's birthday.\n emails Optional[List[Email]]: A list of email addresses.\n org Optional[Org]: Organizational information.\n phones Optional[List[Phone]]: A list of phone numbers.\n urls Optional[List[URL]]: A list of URLs.\n\n Attributes:\n name Optional[Name]: The contact's name This field is required.\n addresses Optional[List[Address]]: A list of addresses.\n birthday (Optional[str]): The contact's birthday.\n emails (Optional[List[Email]]): A list of email addresses.\n org (Optional[Org]): Organizational information.\n phones (Optional[List[Phone]]): A list of phone numbers.\n urls (Optional[List[URL]]): A list of URLs.\n\n Methods:\n - de_json(data: Optional[JSONDict]) -> Optional[Contact]: Create a Contact\n object from JSON data.\n \"\"\"\n\n _id_attrs = (\"name\", \"phones\", \"birthday\")\n\n __slots__ = (\n \"name\",\n \"addresses\",\n \"birthday\",\n \"emails\",\n \"org\",\n \"phones\",\n \"urls\",\n )\n\n # pylint: disable=too-many-arguments\n def __init__(\n self,\n name: Union[Name, str],\n addresses: Optional[List[Address]] = None,\n birthday: Optional[str] = None,\n emails: Optional[List[Email]] = None,\n org: Optional[Org] = None,\n phones: Optional[Union[List[Phone], List[str]]] = None,\n urls: Optional[List[URL]] = None,\n ):\n # pylint: disable=fixme\n # TODO: Allow validation using pydantic\n\n # required\n if isinstance(name, str):\n self.name = Name(formatted_name=name, first_name=name)\n elif isinstance(name, Name):\n self.name = name\n else:\n raise TypeError(\n \"Name must either be a string or an instance of the Name class!\"\n )\n\n if isinstance(phones, list):\n for i, phone in enumerate(phones):\n if isinstance(phone, str):\n phones[i] = Phone(phone=phone)\n elif not isinstance(phone, Phone):\n raise TypeError(\n f\"Phone {i} must either be a string or an instance of the Phone class!\"\n )\n else:\n # pylint: disable=line-too-long\n raise ValueError(\n f\"Phones must be of type <class list> of phones class or strings!\\nGot {type(phones)} instead \"\n )\n\n # optional\n self.addresses = addresses\n self.birthday = birthday\n self.emails = emails\n self.org = org\n self.phones = phones\n self.urls = urls\n\n # pylint: disable=too-many-locals\n @classmethod\n def de_json(cls, data: Optional[JSONDict]) -> Optional[\"Contact\"]:\n \"\"\"This class acts as a method for extracting and converting JSON data gotten from\n Whatsapp Cloud API and converting them into internal objects that can be interacted with\n \"\"\"\n\n data = cls.parse_data(data)\n\n if not data:\n return None\n\n addresses = []\n if \"addresses\" in data:\n for address_data in data[\"addresses\"]:\n address = Address(**address_data)\n addresses.append(address)\n\n emails = []\n if \"emails\" in data:\n for email_data in data[\"emails\"]:\n email = Email(**email_data)\n emails.append(email)\n\n name = None\n if \"name\" in data:\n name_data = data[\"name\"]\n name = Name(**name_data)\n\n org = None\n if \"org\" in data:\n org_data = data[\"org\"]\n org = Org(**org_data)\n\n phones = []\n if \"phones\" in data:\n for phone_data in data[\"phones\"]:\n phone = Phone(**phone_data)\n phones.append(phone)\n\n urls = []\n if \"urls\" in data:\n for url_data in data[\"urls\"]:\n url = URL(**url_data)\n urls.append(url)\n\n return cls(\n name=name,\n addresses=addresses,\n birthday=data.get(\"birthday\"),\n emails=emails,\n org=org,\n phones=phones,\n urls=urls,\n )"
},
{
"identifier": "MyEncoder",
"path": "whatsapp_cloud_sdk/_utils/json_serializer.py",
"snippet": "class MyEncoder(JSONEncoder):\n \"\"\"Custom JSON encoder for serializing File objects e.g. Message, Audio, Video e.t.c.\n\n This encoder is used to customize the serialization behavior when converting objects\n to JSON format.\n\n Attributes:\n None\n\n Methods:\n default(o): Serialize an object to a JSON-serializable format.\n\n Args:\n o: The object to be serialized.\n\n Returns:\n JSON-serializable representation of the object.\n \"\"\"\n\n def default(self, o):\n \"\"\"Serialize an object to a JSON-serializable format.\n\n This method is called for objects that are not natively serializable by the JSON encoder.\n It checks if the object is an instance of the File class and calls it's to_dict()\n method for serialization.\n\n Args:\n o: The object to be serialized.\n\n Returns:\n JSON-serializable representation of the object.\n \"\"\"\n if isinstance(o, File):\n return o.to_dict()\n\n return super().default(o)"
},
{
"identifier": "TextMessage",
"path": "whatsapp_cloud_sdk/_validators/messages.py",
"snippet": "class TextMessage(BaseModel):\n \"\"\"\n Represents a text message.\n\n Args:\n text (str): The text content of the message.\n message_id (str, optional): An optional message ID.\n recipient_number (str): The recipient's phone number.\n\n Attributes:\n model_config (ConfigDict): Pydantic configuration for this model.\n \"\"\"\n\n model_config = ConfigDict(extra=\"forbid\")\n text: str\n message_id: Optional[str]\n recipient_number: constr(max_length=20, min_length=8)"
},
{
"identifier": "ButtonMessage",
"path": "whatsapp_cloud_sdk/_validators/messages.py",
"snippet": "class ButtonMessage(BaseModel):\n \"\"\"\n Represents a message with buttons.\n\n Args:\n text (str): The text content of the message.\n recipient_number (str): The recipient's phone number.\n buttons (List[ButtonContents]): A list of button contents.\n\n Attributes:\n None\n \"\"\"\n\n text: str\n recipient_number: constr(max_length=12, min_length=8)\n buttons: List[ButtonContents]"
},
{
"identifier": "ButtonContents",
"path": "whatsapp_cloud_sdk/_validators/messages.py",
"snippet": "class ButtonContents(BaseModel):\n \"\"\"\n Represents the contents of a button.\n\n Args:\n id (str, optional): An optional button ID. Defaults to a UUID.\n title (str): The title or label of the button.\n\n Attributes:\n None\n \"\"\"\n\n id: Optional[str] = str(uuid.uuid4())\n title: constr(max_length=20, min_length=1)"
},
{
"identifier": "LinkMessage",
"path": "whatsapp_cloud_sdk/_validators/messages.py",
"snippet": "class LinkMessage(BaseModel):\n \"\"\"\n Represents a message with a link.\n\n Args:\n link (str): The URL link.\n caption (str, optional): An optional caption for the link.\n message_id (str, optional): An optional message ID.\n\n Attributes:\n None\n \"\"\"\n\n link: str\n caption: Optional[str] = None\n message_id: Optional[str] = None"
},
{
"identifier": "LocationMessage",
"path": "whatsapp_cloud_sdk/_validators/messages.py",
"snippet": "class LocationMessage(BaseModel):\n \"\"\"\n Represents a location message.\n\n Args:\n longitude (int): The longitude of the location.\n name (str): The name of the location.\n address (str): The address of the location.\n\n Attributes:\n None\n \"\"\"\n\n longitude: int\n name: str\n address: str"
},
{
"identifier": "MessageFormatter",
"path": "whatsapp_cloud_sdk/_formaters/message_formatter.py",
"snippet": "class MessageFormatter:\n \"\"\"\n Provides methods for formatting messages and data for interaction with the WhatsApp API.\n\n Methods:\n - format_text_message(body: str, to: str, preview_url: bool = False,\n message_id: str = None) -> JSONDict:\n - format_button_message(to: str, text: str, buttons: List[ButtonContents],\n message_id: Optional[str])\n -> JSONDict:\n - format_reply_with_reaction(to: str, emoji, message_id: Optional[str]) -> JSONDict:\n - format_link_message(to: str, link: str, m_type: LinkTypes, caption: str = \"\",\n message_id: str =None\n -> JSONDict:\n - format_send_document_by_url(to: str, document_link: str, caption: str,\n is_reply: bool = False,\n message_id: str = None) -> JSONDict:\n - format_location_message(to: str, latitude: decimal, longitude: int, name: str,\n address: str,\n message_id: Optional[str])\n -> JSONDict:\n - format_contact_message(contact: list, to: str, message_id: Optional[str]) -> JSONDict:\n - format_sticker_message_by_url(link: str, to: str, message_id: Optional[str]) -> JSONDict:\n - mark_message_as_read(message_id: str) -> JSONDict:\n \"\"\"\n\n @staticmethod\n def format_text_message(\n body: str, to: str, preview_url: bool = False, message_id: str = None\n ) -> JSONDict:\n \"\"\"\n Formats a text message for WhatsApp.\n\n Args:\n - body (str): The text message body.\n - to (str): The recipient's WhatsApp number.\n - preview_url (bool, optional): Whether to preview URLs in the message.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted text message.\n \"\"\"\n\n body = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": \"text\",\n \"text\": {\"preview_url\": preview_url, \"body\": body},\n }\n\n if message_id:\n body[\"context\"] = {\"message_id\": message_id}\n\n return body\n\n @staticmethod\n def format_button_message(\n to: str,\n text: str,\n buttons: List[ButtonContents],\n message_id: Optional[str],\n ) -> JSONDict:\n \"\"\"\n Formats a message with interactive buttons for WhatsApp.\n\n Args:\n - to (str): The recipient's WhatsApp number.\n - text (str): The text message accompanying the buttons.\n - buttons (List[ButtonContents]): List of button contents.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted button message.\n\n \"\"\"\n\n if not isinstance(buttons, ButtonContents):\n raise TypeError(\"Buttons must be an instance of button contents\")\n\n message = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": \"interactive\",\n \"interactive\": {\n \"type\": \"button\",\n \"body\": {\"text\": text},\n \"action\": {\"buttons\": buttons},\n },\n }\n\n if message_id:\n message[\"context\"] = {\"message_id\": message_id}\n\n return message\n\n @staticmethod\n def format_reply_with_reaction(\n to: str,\n emoji,\n message_id: Optional[str],\n ) -> JSONDict:\n \"\"\"\n Formats a message with interactive buttons for WhatsApp.\n\n Args:\n - to (str): The recipient's WhatsApp number.\n - text (str): The text message accompanying the buttons.\n - buttons (List[ButtonContents]): List of button contents.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted button message.\n \"\"\"\n\n message = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": \"reaction\",\n \"reaction\": {\"message_id\": message_id, \"emoji\": emoji},\n }\n\n if message_id:\n message[\"context\"] = {\"message_id\": message_id}\n\n return message\n\n @staticmethod\n def format_link_message(\n to: str, link: str, m_type: LinkTypes, caption: str = \"\", message_id: str = None\n ) -> JSONDict:\n \"\"\"\n Formats a reaction message with an emoji for WhatsApp.\n\n Args:\n - to (str): The recipient's WhatsApp number.\n - emoji: The emoji representing the reaction.\n - message_id (str, optional): The ID of the message being reacted to.\n\n Returns:\n - JSONDict: The formatted reaction message.\n \"\"\"\n\n message = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": m_type,\n m_type: {\"link\": link},\n }\n\n if len(caption) > 0:\n message[m_type][\"caption\"] = caption\n\n if message_id:\n message[\"context\"] = {\"message_id\": message_id}\n\n return message\n\n @staticmethod\n def format_send_document_by_url(\n to: str,\n document_link: str,\n caption: str,\n is_reply: bool = False,\n message_id: str = None,\n ) -> JSONDict:\n \"\"\"\n Formats a document message with a link for WhatsApp.\n\n Args:\n - to (str): The recipient's WhatsApp number.\n - document_link (str): The URL of the document to send.\n - caption (str): The caption for the document.\n - is_reply (bool, optional): Indicates if it's a reply message.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted document message.\n \"\"\"\n\n message = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": \"document\",\n \"document\": {\"link\": document_link, \"caption\": caption},\n }\n\n if is_reply:\n if message_id is None:\n raise ValueError(\"message_id is required for a reply message.\")\n message[\"context\"] = {\"message_id\": message_id}\n\n return message\n\n # pylint: disable=too-many-arguments\n @staticmethod\n def format_location_message(\n to: str,\n latitude: decimal,\n longitude: int,\n name: str,\n address: str,\n message_id: Optional[str],\n ) -> JSONDict:\n \"\"\"\n Formats a location message for WhatsApp.\n\n Args:\n - to (str): The recipient's WhatsApp number.\n - latitude (decimal): The latitude coordinate of the location.\n - longitude (int): The longitude coordinate of the location.\n - name (str): The name of the location.\n - address (str): The address of the location.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted location message.\n \"\"\"\n message = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": \"location\",\n \"location\": {\n \"latitude\": latitude,\n \"longitude\": longitude,\n \"name\": name,\n \"address\": address,\n },\n }\n\n if message_id:\n message[\"context\"] = {\"message_id\": message_id}\n return message\n\n @staticmethod\n def format_contact_message(\n contacts: list,\n to: str,\n message_id: Optional[str],\n ) -> JSONDict:\n \"\"\"\n Formats a contact message for WhatsApp.\n\n Args:\n - contacts (list): List of contact details (e.g., Name, Phone, Email).\n - to (str): The recipient's WhatsApp number.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted contact message.\n \"\"\"\n message = {\n \"messaging_product\": \"whatsapp\",\n \"to\": to,\n \"type\": \"contacts\",\n \"contacts\": contacts,\n }\n\n if message_id:\n message[\"context\"] = {\"message_id\": message_id}\n\n return message\n\n @staticmethod\n def format_sticker_message_by_url(\n link: str,\n to: str,\n message_id: Optional[str],\n ) -> JSONDict:\n \"\"\"\n Formats a sticker message with a link for WhatsApp.\n\n Args:\n - link (str): The URL of the sticker image.\n - to (str): The recipient's WhatsApp number.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted sticker message.\n \"\"\"\n message = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": \"sticker\",\n \"sticker\": {\"link\": link},\n }\n\n if message_id:\n message[\"context\"] = {\"message_id\": message_id}\n\n return message\n\n @staticmethod\n def mark_message_as_read(message_id: str):\n \"\"\"\n Marks a message as read on WhatsApp.\n\n Args:\n - message_id (str): The ID of the message to mark as read.\n\n Returns:\n - JSONDict: The command to mark the message as read.\n \"\"\"\n return {\n \"messaging_product\": \"whatsapp\",\n \"status\": \"read\",\n \"message_id\": message_id,\n }"
},
{
"identifier": "LinkTypes",
"path": "whatsapp_cloud_sdk/_formaters/message_formatter.py",
"snippet": "class LinkTypes(Enum):\n \"\"\"\n Constants representing different types of links.\n\n Attributes:\n AUDIO (str): A link type for audio content.\n IMAGE (str): A link type for image content.\n VIDEO (str): A link type for video content.\n \"\"\"\n\n AUDIO = \"audio\"\n IMAGE = \"image\"\n VIDEO = \"video\""
}
] | from typing import Optional, List, Dict
from unicodedata import decimal
from whatsapp_cloud_sdk._exceptions.http_error import CustomHTTPError
from whatsapp_cloud_sdk._base_api import _BaseApi
from whatsapp_cloud_sdk._files.contact import Contact
from whatsapp_cloud_sdk._utils.json_serializer import MyEncoder
from whatsapp_cloud_sdk._validators.messages import (
TextMessage,
ButtonMessage,
ButtonContents,
LinkMessage,
LocationMessage,
)
from whatsapp_cloud_sdk._formaters.message_formatter import MessageFormatter, LinkTypes
import json
import requests | 7,558 | return await self.__send(data=payload)
async def send_audio_by_url(
self,
link: str,
recipient_number: str,
message_id: Optional[str],
):
"""
Send an audio file by URL to a recipient.
Args:
link (str): The URL of the audio file.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = LinkMessage(link=link)
payload = formatter.format_link_message(
to=recipient_number,
link=message.link,
m_type=LinkTypes.AUDIO,
message_id=message_id,
)
return await self.__send(data=payload)
async def send_document_by_url(
self,
link: str,
caption: Optional[str],
recipient_number: str,
message_id: Optional[str] = None,
):
"""
Send a document by URL to a recipient.
Args:
link (str): The URL of the document.
caption (str, optional): An optional caption for the document.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = LinkMessage(
link=link,
caption=caption,
)
payload = formatter.format_send_document_by_url(
to=recipient_number,
document_link=message.link,
caption=message.caption,
message_id=message_id,
)
return await self.__send(data=payload)
async def send_video_by_url(
self,
link: str,
caption: Optional[str],
recipient_number: str,
message_id: Optional[str] = None,
):
"""
Send a video by URL to a recipient.
Args:
link (str): The URL of the video.
caption (str, optional): An optional caption for the video.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = LinkMessage(link=link, caption=caption)
payload = formatter.format_link_message(
to=recipient_number,
link=message.link,
m_type=LinkTypes.VIDEO,
caption=message.caption,
message_id=message_id,
)
return await self.__send(data=payload)
# pylint: disable=too-many-arguments
async def send_location(
self,
latitude: decimal,
longitude: int,
name: str,
address: str,
recipient_number: str,
message_id: Optional[str] = None,
):
"""
Send a location to a recipient.
Args:
latitude (decimal): The latitude of the location.
longitude (int): The longitude of the location.
name (str): The name of the location.
address (str): The address of the location.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
| """This module Represents a WhatsApp bot for communication with the WhatsApp API."""
formatter = MessageFormatter()
class Bot(_BaseApi):
# pylint: disable=line-too-long
"""
Represents a WhatsApp bot for communication with the WhatsApp API.
This class inherits from the `BaseApi` class and provides methods for sending various types of
messages, marking messages as read, and handling communication with the WhatsApp API.
Args:
cloud_api_access_token (str, optional): The Cloud API access token used for authentication.
wa_phone_number_id (str, optional): The WhatsApp phone number ID.
version (str, optional): The WhatsApp API version to use.
Inherits attributes from the `BaseApi` class, such as `WA_URL` and `HEADERS`.
Attributes:
Inherits attributes from the `BaseApi` class.
Methods:
- `send_text(text: str, recipient_number: str, message_id: str = None, preview_url: bool = False)`:
Send a text message to a recipient.
- `send_text_with_buttons(text: str, buttons: list, recipient_number: str)`:
Send a text message with buttons to a recipient.
- `send_reply_with_reaction(message_id: str, emoji: str, recipient_number: str)`:
Send a reaction to a message.
- `send_image_by_url(link: str, caption: Optional[str], recipient_number: str, message_id: Optional[str])`:
Send an image by URL.
- `send_audio_by_url(link: str, caption: Optional[str], recipient_number: str)`:
Send audio by URL.
- `send_document_by_url(link: str, caption: Optional[str], recipient_number: str)`:
Send a document by URL.
- `send_video_by_url(link: str, caption: Optional[str], recipient_number: str, message_id: Optional[str] = None)
`:
Send a video by URL.
- `send_location(latitude: decimal, longitude: int, name: str, address: str, recipient_number: str)`:
Send a location.
- `send_contact(contact: list, recipient_number: str)`:
Send a contact.
- `send_sticker_with_url(link: str, recipient_number: str)`:
Send a sticker by URL.
- `mark_message_as_read(message_id: str)`:
Mark a message as read.
- `__send(data: dict, method: Optional[str] = "POST") -> dict`:
Send data to the WhatsApp API.
Usage Example:
```
python
from your_library import Bot
# Initialize the bot.
bot = Bot(cloud_api_access_token="your_access_token", wa_phone_number_id="your_phone_number_id", version="v17.0")
# Use bot methods to interact with the WhatsApp API
bot.send_text("Hello, world!", "recipient_number")
```
"""
def __init__(
self,
cloud_api_access_token: str = None,
wa_phone_number_id: str = None,
version: str = None,
):
"""
Initialize a Bot instance for WhatsApp API communication.
Args:
cloud_api_access_token (str, optional): The Cloud API access token used for authentication.
wa_phone_number_id (str, optional): The WhatsApp phone number ID.
version (str, optional): The WhatsApp API version to use.
Inherits attributes from the `BaseApi` class.
"""
super().__init__(
cloud_api_access_token=cloud_api_access_token,
wa_phone_number_id=wa_phone_number_id,
version=version,
)
async def send_text(
self,
text: str,
recipient_number: str,
message_id: str = None,
preview_url: bool = False,
):
"""
Send a text message to a recipient.
Args:
text (str): The text of the message.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): The ID of the message if it is a reply to a message (optional).
preview_url (bool): Enable or disable URL preview (default is False).
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = TextMessage(
text=text, recipient_number=recipient_number, message_id=message_id
)
payload = formatter.format_text_message(
to=message.recipient_number,
body=message.text,
message_id=message_id,
preview_url=preview_url,
)
return await self.__send(data=payload)
async def send_text_with_buttons(
self,
text: str,
buttons: List[Dict[str, str]],
recipient_number: str,
message_id: Optional[str],
):
"""
Send a text message with buttons to a recipient.
Args:
text (str): The text of the message.
buttons (list): List of buttons, where each button is a dictionary with the following keys:
- 'title' (str): The title or label of the button.
- 'id' (optional, str): An optional id for the button.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
if not isinstance(buttons, list):
raise TypeError("Buttons must be a list of dict object")
buttons_content = [ButtonContents(**b) for b in buttons]
message = ButtonMessage(
text=text, recipient_number=recipient_number, buttons=buttons_content
)
payload = formatter.format_button_message(
to=recipient_number,
text=message.text,
buttons=message.buttons,
message_id=message_id,
)
return await self.__send(data=payload)
# pylint: disable=fixme
# TODO: Add input validation for all bot methods
async def send_reaction_message(
self, message_id: Optional[str], emoji, recipient_number: str
):
"""
Send a reaction message.
Args:
message_id (str, optional): An optional message ID if it is a reply to a message.
emoji (str): The reaction emoji to send.
recipient_number (str): The recipient's WhatsApp phone number.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
payload = formatter.format_reply_with_reaction(
to=recipient_number, message_id=message_id, emoji=emoji
)
return await self.__send(data=payload)
async def send_image_by_url(
self,
link: str,
caption: Optional[str],
recipient_number: str,
message_id: Optional[str],
):
"""
Send an image by URL to a recipient.
Args:
link (str): The URL of the image.
caption (str, optional): An optional caption for the image.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = LinkMessage(link=link, caption=caption)
payload = formatter.format_link_message(
to=recipient_number,
link=message.link,
m_type=LinkTypes.IMAGE,
message_id=message_id,
)
return await self.__send(data=payload)
async def send_audio_by_url(
self,
link: str,
recipient_number: str,
message_id: Optional[str],
):
"""
Send an audio file by URL to a recipient.
Args:
link (str): The URL of the audio file.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = LinkMessage(link=link)
payload = formatter.format_link_message(
to=recipient_number,
link=message.link,
m_type=LinkTypes.AUDIO,
message_id=message_id,
)
return await self.__send(data=payload)
async def send_document_by_url(
self,
link: str,
caption: Optional[str],
recipient_number: str,
message_id: Optional[str] = None,
):
"""
Send a document by URL to a recipient.
Args:
link (str): The URL of the document.
caption (str, optional): An optional caption for the document.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = LinkMessage(
link=link,
caption=caption,
)
payload = formatter.format_send_document_by_url(
to=recipient_number,
document_link=message.link,
caption=message.caption,
message_id=message_id,
)
return await self.__send(data=payload)
async def send_video_by_url(
self,
link: str,
caption: Optional[str],
recipient_number: str,
message_id: Optional[str] = None,
):
"""
Send a video by URL to a recipient.
Args:
link (str): The URL of the video.
caption (str, optional): An optional caption for the video.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = LinkMessage(link=link, caption=caption)
payload = formatter.format_link_message(
to=recipient_number,
link=message.link,
m_type=LinkTypes.VIDEO,
caption=message.caption,
message_id=message_id,
)
return await self.__send(data=payload)
# pylint: disable=too-many-arguments
async def send_location(
self,
latitude: decimal,
longitude: int,
name: str,
address: str,
recipient_number: str,
message_id: Optional[str] = None,
):
"""
Send a location to a recipient.
Args:
latitude (decimal): The latitude of the location.
longitude (int): The longitude of the location.
name (str): The name of the location.
address (str): The address of the location.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
| message = LocationMessage(longitude=longitude, name=name, address=address) | 8 | 2023-10-15 21:12:45+00:00 | 12k |
caglarkucuk/earthformer-satellite-to-radar | ef-sat2rad/earthformer/datasets/sevir/ORG_sevir_torch_wrap.py | [
{
"identifier": "cfg",
"path": "ef-sat2rad/earthformer/config.py",
"snippet": "_CURR_DIR = os.path.realpath(os.path.dirname(os.path.realpath(__file__)))"
},
{
"identifier": "SEVIRDataLoader",
"path": "ef-sat2rad/earthformer/datasets/sevir/sevir_dataloader.py",
"snippet": "class SEVIRDataLoader:\n r\"\"\"\n DataLoader that loads SEVIR sequences, and spilts each event\n into segments according to specified sequence length.\n\n Event Frames:\n [-----------------------raw_seq_len----------------------]\n [-----seq_len-----]\n <--stride-->[-----seq_len-----]\n <--stride-->[-----seq_len-----]\n ...\n \"\"\"\n def __init__(self,\n data_types: Sequence[str] = None,\n seq_len: int = 49,\n raw_seq_len: int = 49,\n sample_mode: str = 'sequent',\n stride: int = 12,\n batch_size: int = 1,\n layout: str = 'NHWT',\n num_shard: int = 1,\n rank: int = 0,\n split_mode: str = \"uneven\",\n sevir_catalog: Union[str, pd.DataFrame] = None,\n sevir_data_dir: str = None,\n start_date: datetime.datetime = None,\n end_date: datetime.datetime = None,\n datetime_filter=None,\n catalog_filter='default',\n shuffle: bool = False,\n shuffle_seed: int = 1,\n output_type=np.float32,\n preprocess: bool = True,\n rescale_method: str = 'sevir', # '01',\n downsample_dict: Dict[str, Sequence[int]] = None,\n verbose: bool = False):\n r\"\"\"\n Parameters\n ----------\n data_types\n A subset of SEVIR_DATA_TYPES.\n seq_len\n The length of the data sequences. Should be smaller than the max length raw_seq_len.\n raw_seq_len\n The length of the raw data sequences.\n sample_mode\n 'random' or 'sequent'\n stride\n Useful when sample_mode == 'sequent'\n stride must not be smaller than out_len to prevent data leakage in testing.\n batch_size\n Number of sequences in one batch.\n layout\n str: consists of batch_size 'N', seq_len 'T', channel 'C', height 'H', width 'W'\n The layout of sampled data. Raw data layout is 'NHWT'.\n valid layout: 'NHWT', 'NTHW', 'NTCHW', 'TNHW', 'TNCHW'.\n num_shard\n Split the whole dataset into num_shard parts for distributed training.\n rank\n Rank of the current process within num_shard.\n split_mode: str\n if 'ceil', all `num_shard` dataloaders have the same length = ceil(total_len / num_shard).\n Different dataloaders may have some duplicated data batches, if the total size of datasets is not divided by num_shard.\n if 'floor', all `num_shard` dataloaders have the same length = floor(total_len / num_shard).\n The last several data batches may be wasted, if the total size of datasets is not divided by num_shard.\n if 'uneven', the last datasets has larger length when the total length is not divided by num_shard.\n The uneven split leads to synchronization error in dist.all_reduce() or dist.barrier().\n See related issue: https://github.com/pytorch/pytorch/issues/33148\n Notice: this also affects the behavior of `self.use_up`.\n sevir_catalog\n Name of SEVIR catalog CSV file.\n sevir_data_dir\n Directory path to SEVIR data.\n start_date\n Start time of SEVIR samples to generate.\n end_date\n End time of SEVIR samples to generate.\n datetime_filter\n function\n Mask function applied to time_utc column of catalog (return true to keep the row).\n Pass function of the form lambda t : COND(t)\n Example: lambda t: np.logical_and(t.dt.hour>=13,t.dt.hour<=21) # Generate only day-time events\n catalog_filter\n function or None or 'default'\n Mask function applied to entire catalog dataframe (return true to keep row).\n Pass function of the form lambda catalog: COND(catalog)\n Example: lambda c: [s[0]=='S' for s in c.id] # Generate only the 'S' events\n shuffle\n bool, If True, data samples are shuffled before each epoch.\n shuffle_seed\n int, Seed to use for shuffling.\n output_type\n np.dtype, dtype of generated tensors\n preprocess\n bool, If True, self.preprocess_data_dict(data_dict) is called before each sample generated\n downsample_dict:\n dict, downsample_dict.keys() == data_types. downsample_dict[key] is a Sequence of (t_factor, h_factor, w_factor),\n representing the downsampling factors of all dimensions.\n verbose\n bool, verbose when opening raw data files\n \"\"\"\n super(SEVIRDataLoader, self).__init__()\n if sevir_catalog is None:\n sevir_catalog = SEVIR_CATALOG\n if sevir_data_dir is None:\n sevir_data_dir = SEVIR_DATA_DIR\n if data_types is None:\n data_types = SEVIR_DATA_TYPES\n else:\n assert set(data_types).issubset(SEVIR_DATA_TYPES)\n\n # configs which should not be modified\n self._dtypes = SEVIR_RAW_DTYPES\n self.lght_frame_times = LIGHTING_FRAME_TIMES\n self.data_shape = SEVIR_DATA_SHAPE\n\n self.raw_seq_len = raw_seq_len\n assert seq_len <= self.raw_seq_len, f'seq_len must not be larger than raw_seq_len = {raw_seq_len}, got {seq_len}.'\n self.seq_len = seq_len\n assert sample_mode in ['random', 'sequent'], f'Invalid sample_mode = {sample_mode}, must be \\'random\\' or \\'sequent\\'.'\n self.sample_mode = sample_mode\n self.stride = stride\n self.batch_size = batch_size\n valid_layout = ('NHWT', 'NTHW', 'NTCHW', 'NTHWC', 'TNHW', 'TNCHW')\n if layout not in valid_layout:\n raise ValueError(f'Invalid layout = {layout}! Must be one of {valid_layout}.')\n self.layout = layout\n self.num_shard = num_shard\n self.rank = rank\n valid_split_mode = ('ceil', 'floor', 'uneven')\n if split_mode not in valid_split_mode:\n raise ValueError(f'Invalid split_mode: {split_mode}! Must be one of {valid_split_mode}.')\n self.split_mode = split_mode\n self._samples = None\n self._hdf_files = {}\n self.data_types = data_types\n if isinstance(sevir_catalog, str):\n self.catalog = pd.read_csv(sevir_catalog, parse_dates=['time_utc'], low_memory=False)\n else:\n self.catalog = sevir_catalog\n self.sevir_data_dir = sevir_data_dir\n self.datetime_filter = datetime_filter\n self.catalog_filter = catalog_filter\n self.start_date = start_date\n self.end_date = end_date\n self.shuffle = shuffle\n self.shuffle_seed = int(shuffle_seed)\n self.output_type = output_type\n self.preprocess = preprocess\n self.downsample_dict = downsample_dict\n self.rescale_method = rescale_method\n self.verbose = verbose\n\n if self.start_date is not None:\n self.catalog = self.catalog[self.catalog.time_utc > self.start_date]\n if self.end_date is not None:\n self.catalog = self.catalog[self.catalog.time_utc <= self.end_date]\n if self.datetime_filter:\n self.catalog = self.catalog[self.datetime_filter(self.catalog.time_utc)]\n\n if self.catalog_filter is not None:\n if self.catalog_filter == 'default':\n self.catalog_filter = lambda c: c.pct_missing == 0\n self.catalog = self.catalog[self.catalog_filter(self.catalog)]\n\n self._compute_samples()\n self._open_files(verbose=self.verbose)\n self.reset()\n\n def _compute_samples(self):\n \"\"\"\n Computes the list of samples in catalog to be used. This sets self._samples\n \"\"\"\n # locate all events containing colocated data_types\n imgt = self.data_types\n imgts = set(imgt)\n filtcat = self.catalog[ np.logical_or.reduce([self.catalog.img_type==i for i in imgt]) ]\n # remove rows missing one or more requested img_types\n filtcat = filtcat.groupby('id').filter(lambda x: imgts.issubset(set(x['img_type'])))\n # If there are repeated IDs, remove them (this is a bug in SEVIR)\n # TODO: is it necessary to keep one of them instead of deleting them all\n filtcat = filtcat.groupby('id').filter(lambda x: x.shape[0]==len(imgt))\n self._samples = filtcat.groupby('id').apply(lambda df: self._df_to_series(df,imgt) )\n if self.shuffle:\n self.shuffle_samples()\n\n def shuffle_samples(self):\n self._samples = self._samples.sample(frac=1, random_state=self.shuffle_seed)\n\n def _df_to_series(self, df, imgt):\n d = {}\n df = df.set_index('img_type')\n for i in imgt:\n s = df.loc[i]\n idx = s.file_index if i != 'lght' else s.id\n d.update({f'{i}_filename': [s.file_name],\n f'{i}_index': [idx]})\n\n return pd.DataFrame(d)\n\n def _open_files(self, verbose=True):\n \"\"\"\n Opens HDF files\n \"\"\"\n imgt = self.data_types\n hdf_filenames = []\n for t in imgt:\n hdf_filenames += list(np.unique( self._samples[f'{t}_filename'].values ))\n self._hdf_files = {}\n for f in hdf_filenames:\n if verbose:\n print('Opening HDF5 file for reading', f)\n self._hdf_files[f] = h5py.File(self.sevir_data_dir + '/' + f, 'r')\n\n def close(self):\n \"\"\"\n Closes all open file handles\n \"\"\"\n for f in self._hdf_files:\n self._hdf_files[f].close()\n self._hdf_files = {}\n\n @property\n def num_seq_per_event(self):\n return 1 + (self.raw_seq_len - self.seq_len) // self.stride\n\n @property\n def total_num_seq(self):\n \"\"\"\n The total number of sequences within each shard.\n Notice that it is not the product of `self.num_seq_per_event` and `self.total_num_event`.\n \"\"\"\n return int(self.num_seq_per_event * self.num_event)\n\n @property\n def total_num_event(self):\n \"\"\"\n The total number of events in the whole dataset, before split into different shards.\n \"\"\"\n return int(self._samples.shape[0])\n\n @property\n def start_event_idx(self):\n \"\"\"\n The event idx used in certain rank should satisfy event_idx >= start_event_idx\n \"\"\"\n return self.total_num_event // self.num_shard * self.rank\n\n @property\n def end_event_idx(self):\n \"\"\"\n The event idx used in certain rank should satisfy event_idx < end_event_idx\n\n \"\"\"\n if self.split_mode == 'ceil':\n _last_start_event_idx = self.total_num_event // self.num_shard * (self.num_shard - 1)\n _num_event = self.total_num_event - _last_start_event_idx\n return self.start_event_idx + _num_event\n elif self.split_mode == 'floor':\n return self.total_num_event // self.num_shard * (self.rank + 1)\n else: # self.split_mode == 'uneven':\n if self.rank == self.num_shard - 1: # the last process\n return self.total_num_event\n else:\n return self.total_num_event // self.num_shard * (self.rank + 1)\n\n @property\n def num_event(self):\n \"\"\"\n The number of events split into each rank\n \"\"\"\n return self.end_event_idx - self.start_event_idx\n\n def _read_data(self, row, data):\n \"\"\"\n Iteratively read data into data dict. Finally data[imgt] gets shape (batch_size, height, width, raw_seq_len).\n\n Parameters\n ----------\n row\n A series with fields IMGTYPE_filename, IMGTYPE_index, IMGTYPE_time_index.\n data\n Dict, data[imgt] is a data tensor with shape = (tmp_batch_size, height, width, raw_seq_len).\n\n Returns\n -------\n data\n Updated data. Updated shape = (tmp_batch_size + 1, height, width, raw_seq_len).\n \"\"\"\n imgtyps = np.unique([x.split('_')[0] for x in list(row.keys())])\n for t in imgtyps:\n fname = row[f'{t}_filename']\n idx = row[f'{t}_index']\n t_slice = slice(0, None)\n # Need to bin lght counts into grid\n if t == 'lght':\n lght_data = self._hdf_files[fname][idx][:]\n data_i = self._lght_to_grid(lght_data, t_slice)\n else:\n data_i = self._hdf_files[fname][t][idx:idx + 1, :, :, t_slice]\n data[t] = np.concatenate((data[t], data_i), axis=0) if (t in data) else data_i\n\n return data\n\n def _lght_to_grid(self, data, t_slice=slice(0, None)):\n \"\"\"\n Converts Nx5 lightning data matrix into a 2D grid of pixel counts\n \"\"\"\n # out_size = (48,48,len(self.lght_frame_times)-1) if isinstance(t_slice,(slice,)) else (48,48)\n out_size = (*self.data_shape['lght'], len(self.lght_frame_times)) if t_slice.stop is None else (*self.data_shape['lght'], 1)\n if data.shape[0] == 0:\n return np.zeros((1,) + out_size, dtype=np.float32)\n\n # filter out points outside the grid\n x, y = data[:, 3], data[:, 4]\n m = np.logical_and.reduce([x >= 0, x < out_size[0], y >= 0, y < out_size[1]])\n data = data[m, :]\n if data.shape[0] == 0:\n return np.zeros((1,) + out_size, dtype=np.float32)\n\n # Filter/separate times\n t = data[:, 0]\n if t_slice.stop is not None: # select only one time bin\n if t_slice.stop > 0:\n if t_slice.stop < len(self.lght_frame_times):\n tm = np.logical_and(t >= self.lght_frame_times[t_slice.stop - 1],\n t < self.lght_frame_times[t_slice.stop])\n else:\n tm = t >= self.lght_frame_times[-1]\n else: # special case: frame 0 uses lght from frame 1\n tm = np.logical_and(t >= self.lght_frame_times[0], t < self.lght_frame_times[1])\n # tm=np.logical_and( (t>=FRAME_TIMES[t_slice],t<FRAME_TIMES[t_slice+1]) )\n\n data = data[tm, :]\n z = np.zeros(data.shape[0], dtype=np.int64)\n else: # compute z coordinate based on bin location times\n z = np.digitize(t, self.lght_frame_times) - 1\n z[z == -1] = 0 # special case: frame 0 uses lght from frame 1\n\n x = data[:, 3].astype(np.int64)\n y = data[:, 4].astype(np.int64)\n\n k = np.ravel_multi_index(np.array([y, x, z]), out_size)\n n = np.bincount(k, minlength=np.prod(out_size))\n return np.reshape(n, out_size).astype(np.int16)[np.newaxis, :]\n\n def _old_save_downsampled_dataset(self, save_dir, downsample_dict, verbose=True):\n \"\"\"\n This method does not save .h5 dataset correctly. There are some batches missed due to unknown error.\n E.g., the first converted .h5 file `SEVIR_VIL_RANDOMEVENTS_2017_0501_0831.h5` only has batch_dim = 1414,\n while it should be 1440 in the original .h5 file.\n \"\"\"\n import os\n from skimage.measure import block_reduce\n assert not os.path.exists(save_dir), f\"save_dir {save_dir} already exists!\"\n os.makedirs(save_dir)\n sample_counter = 0\n for index, row in self._samples.iterrows():\n if verbose:\n print(f\"Downsampling {sample_counter}-th data item.\", end='\\r')\n for data_type in self.data_types:\n fname = row[f'{data_type}_filename']\n idx = row[f'{data_type}_index']\n t_slice = slice(0, None)\n if data_type == 'lght':\n lght_data = self._hdf_files[fname][idx][:]\n data_i = self._lght_to_grid(lght_data, t_slice)\n else:\n data_i = self._hdf_files[fname][data_type][idx:idx + 1, :, :, t_slice]\n # Downsample t\n t_slice = [slice(None, None), ] * 4\n t_slice[-1] = slice(None, None, downsample_dict[data_type][0]) # layout = 'NHWT'\n data_i = data_i[tuple(t_slice)]\n # Downsample h, w\n data_i = block_reduce(data_i,\n block_size=(1, *downsample_dict[data_type][1:], 1),\n func=np.max)\n # Save as new .h5 file\n new_file_path = os.path.join(save_dir, fname)\n if not os.path.exists(new_file_path):\n if not os.path.exists(os.path.dirname(new_file_path)):\n os.makedirs(os.path.dirname(new_file_path))\n # Create dataset\n with h5py.File(new_file_path, 'w') as hf:\n hf.create_dataset(\n data_type, data=data_i,\n maxshape=(None, *data_i.shape[1:]))\n else:\n # Append\n with h5py.File(new_file_path, 'a') as hf:\n hf[data_type].resize((hf[data_type].shape[0] + data_i.shape[0]), axis=0)\n hf[data_type][-data_i.shape[0]:] = data_i\n\n sample_counter += 1\n\n def save_downsampled_dataset(self, save_dir, downsample_dict, verbose=True):\n \"\"\"\n Parameters\n ----------\n save_dir\n downsample_dict: Dict[Sequence[int]]\n Notice that this is different from `self.downsample_dict`, which is used during runtime.\n \"\"\"\n import os\n from skimage.measure import block_reduce\n from ...utils.utils import path_splitall\n assert not os.path.exists(save_dir), f\"save_dir {save_dir} already exists!\"\n os.makedirs(save_dir)\n for fname, hdf_file in self._hdf_files.items():\n if verbose:\n print(f\"Downsampling data in {fname}.\")\n data_type = path_splitall(fname)[0]\n if data_type == 'lght':\n # TODO: how to get idx?\n raise NotImplementedError\n # lght_data = self._hdf_files[fname][idx][:]\n # t_slice = slice(0, None)\n # data_i = self._lght_to_grid(lght_data, t_slice)\n else:\n data_i = self._hdf_files[fname][data_type]\n # Downsample t\n t_slice = [slice(None, None), ] * 4\n t_slice[-1] = slice(None, None, downsample_dict[data_type][0]) # layout = 'NHWT'\n data_i = data_i[tuple(t_slice)]\n # Downsample h, w\n data_i = block_reduce(data_i,\n block_size=(1, *downsample_dict[data_type][1:], 1),\n func=np.max)\n # Save as new .h5 file\n new_file_path = os.path.join(save_dir, fname)\n if not os.path.exists(os.path.dirname(new_file_path)):\n os.makedirs(os.path.dirname(new_file_path))\n # Create dataset\n with h5py.File(new_file_path, 'w') as hf:\n hf.create_dataset(\n data_type, data=data_i,\n maxshape=(None, *data_i.shape[1:]))\n\n @property\n def sample_count(self):\n \"\"\"\n Record how many times self.__next__() is called.\n \"\"\"\n return self._sample_count\n\n def inc_sample_count(self):\n self._sample_count += 1\n\n @property\n def curr_event_idx(self):\n return self._curr_event_idx\n\n @property\n def curr_seq_idx(self):\n \"\"\"\n Used only when self.sample_mode == 'sequent'\n \"\"\"\n return self._curr_seq_idx\n\n def set_curr_event_idx(self, val):\n self._curr_event_idx = val\n\n def set_curr_seq_idx(self, val):\n \"\"\"\n Used only when self.sample_mode == 'sequent'\n \"\"\"\n self._curr_seq_idx = val\n\n def reset(self, shuffle: bool = None):\n self.set_curr_event_idx(val=self.start_event_idx)\n self.set_curr_seq_idx(0)\n self._sample_count = 0\n if shuffle is None:\n shuffle = self.shuffle\n if shuffle:\n self.shuffle_samples()\n\n def __len__(self):\n \"\"\"\n Used only when self.sample_mode == 'sequent'\n \"\"\"\n return self.total_num_seq // self.batch_size\n\n @property\n def use_up(self):\n \"\"\"\n Check if dataset is used up in 'sequent' mode.\n \"\"\"\n if self.sample_mode == 'random':\n return False\n else: # self.sample_mode == 'sequent'\n # compute the remaining number of sequences in current event\n curr_event_remain_seq = self.num_seq_per_event - self.curr_seq_idx\n all_remain_seq = curr_event_remain_seq + (\n self.end_event_idx - self.curr_event_idx - 1) * self.num_seq_per_event\n if self.split_mode == \"floor\":\n # This approach does not cover all available data, but avoid dealing with masks\n return all_remain_seq < self.batch_size\n else:\n return all_remain_seq <= 0\n\n def _load_event_batch(self, event_idx, event_batch_size):\n \"\"\"\n Loads a selected batch of events (not batch of sequences) into memory.\n\n Parameters\n ----------\n idx\n event_batch_size\n event_batch[i] = all_type_i_available_events[idx:idx + event_batch_size]\n Returns\n -------\n event_batch\n list of event batches.\n event_batch[i] is the event batch of the i-th data type.\n Each event_batch[i] is a np.ndarray with shape = (event_batch_size, height, width, raw_seq_len)\n \"\"\"\n event_idx_slice_end = event_idx + event_batch_size\n pad_size = 0\n if event_idx_slice_end > self.end_event_idx:\n pad_size = event_idx_slice_end - self.end_event_idx\n event_idx_slice_end = self.end_event_idx\n pd_batch = self._samples.iloc[event_idx:event_idx_slice_end]\n data = {}\n for index, row in pd_batch.iterrows():\n data = self._read_data(row, data)\n if pad_size > 0:\n event_batch = []\n for t in self.data_types:\n pad_shape = [pad_size, ] + list(data[t].shape[1:])\n data_pad = np.concatenate((data[t].astype(self.output_type),\n np.zeros(pad_shape, dtype=self.output_type)),\n axis=0)\n event_batch.append(data_pad)\n else:\n event_batch = [data[t].astype(self.output_type) for t in self.data_types]\n return event_batch\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.sample_mode == 'random':\n self.inc_sample_count()\n ret_dict = self._random_sample()\n else:\n if self.use_up:\n raise StopIteration\n else:\n self.inc_sample_count()\n ret_dict = self._sequent_sample()\n ret_dict = self.data_dict_to_tensor(data_dict=ret_dict,\n data_types=self.data_types)\n if self.preprocess:\n ret_dict = self.preprocess_data_dict(data_dict=ret_dict,\n data_types=self.data_types,\n layout=self.layout,\n rescale=self.rescale_method)\n if self.downsample_dict is not None:\n ret_dict = self.downsample_data_dict(data_dict=ret_dict,\n data_types=self.data_types,\n factors_dict=self.downsample_dict,\n layout=self.layout)\n return ret_dict\n\n def __getitem__(self, index):\n data_dict = self._idx_sample(index=index)\n return data_dict\n\n @staticmethod\n def preprocess_data_dict(data_dict, data_types=None, layout='NHWT', rescale='sevir'): # '01'):\n \"\"\"\n Parameters\n ----------\n data_dict: Dict[str, Union[np.ndarray, torch.Tensor]]\n data_types: Sequence[str]\n The data types that we want to rescale. This mainly excludes \"mask\" from preprocessing.\n layout: str\n consists of batch_size 'N', seq_len 'T', channel 'C', height 'H', width 'W'\n rescale: str\n 'sevir': use the offsets and scale factors in original implementation.\n '01': scale all values to range 0 to 1, currently only supports 'vil'\n Returns\n -------\n data_dict: Dict[str, Union[np.ndarray, torch.Tensor]]\n preprocessed data\n \"\"\"\n if rescale == 'sevir':\n scale_dict = PREPROCESS_SCALE_SEVIR\n offset_dict = PREPROCESS_OFFSET_SEVIR\n elif rescale == '01':\n scale_dict = PREPROCESS_SCALE_01\n offset_dict = PREPROCESS_OFFSET_01\n else:\n raise ValueError(f'Invalid rescale option: {rescale}.')\n if data_types is None:\n data_types = data_dict.keys()\n for key, data in data_dict.items():\n if key in data_types:\n if isinstance(data, np.ndarray):\n data = scale_dict[key] * (\n data.astype(np.float32) +\n offset_dict[key])\n data = change_layout_np(data=data,\n in_layout='NHWT',\n out_layout=layout)\n elif isinstance(data, torch.Tensor):\n data = scale_dict[key] * (\n data.float() +\n offset_dict[key])\n data = change_layout_torch(data=data,\n in_layout='NHWT',\n out_layout=layout)\n data_dict[key] = data\n return data_dict\n\n @staticmethod\n def process_data_dict_back(data_dict, data_types=None, rescale='sevir'): # '01'):\n \"\"\"\n Parameters\n ----------\n data_dict\n each data_dict[key] is a torch.Tensor.\n rescale\n str:\n 'sevir': data are scaled using the offsets and scale factors in original implementation.\n '01': data are all scaled to range 0 to 1, currently only supports 'vil'\n Returns\n -------\n data_dict\n each data_dict[key] is the data processed back in torch.Tensor.\n \"\"\"\n if rescale == 'sevir':\n scale_dict = PREPROCESS_SCALE_SEVIR\n offset_dict = PREPROCESS_OFFSET_SEVIR\n elif rescale == '01':\n scale_dict = PREPROCESS_SCALE_01\n offset_dict = PREPROCESS_OFFSET_01\n else:\n raise ValueError(f'Invalid rescale option: {rescale}.')\n if data_types is None:\n data_types = data_dict.keys()\n for key in data_types:\n data = data_dict[key]\n data = data.float() / scale_dict[key] - offset_dict[key]\n data_dict[key] = data\n return data_dict\n\n @staticmethod\n def data_dict_to_tensor(data_dict, data_types=None):\n \"\"\"\n Convert each element in data_dict to torch.Tensor (copy without grad).\n \"\"\"\n ret_dict = {}\n if data_types is None:\n data_types = data_dict.keys()\n for key, data in data_dict.items():\n if key in data_types:\n if isinstance(data, torch.Tensor):\n ret_dict[key] = data.detach().clone()\n elif isinstance(data, np.ndarray):\n ret_dict[key] = torch.from_numpy(data)\n else:\n raise ValueError(f\"Invalid data type: {type(data)}. Should be torch.Tensor or np.ndarray\")\n else: # key == \"mask\"\n ret_dict[key] = data\n return ret_dict\n\n @staticmethod\n def downsample_data_dict(data_dict, data_types=None, factors_dict=None, layout='NHWT'):\n \"\"\"\n Parameters\n ----------\n data_dict: Dict[str, Union[np.array, torch.Tensor]]\n factors_dict: Optional[Dict[str, Sequence[int]]]\n each element `factors` is a Sequence of int, representing (t_factor, h_factor, w_factor)\n\n Returns\n -------\n downsampled_data_dict: Dict[str, torch.Tensor]\n Modify on a deep copy of data_dict instead of directly modifying the original data_dict\n \"\"\"\n if factors_dict is None:\n factors_dict = {}\n if data_types is None:\n data_types = data_dict.keys()\n downsampled_data_dict = SEVIRDataLoader.data_dict_to_tensor(\n data_dict=data_dict,\n data_types=data_types) # make a copy\n for key, data in data_dict.items():\n factors = factors_dict.get(key, None)\n if factors is not None:\n downsampled_data_dict[key] = change_layout_torch(\n data=downsampled_data_dict[key],\n in_layout=layout,\n out_layout='NTHW')\n # downsample t dimension\n t_slice = [slice(None, None), ] * 4\n t_slice[1] = slice(None, None, factors[0])\n downsampled_data_dict[key] = downsampled_data_dict[key][tuple(t_slice)]\n # downsample spatial dimensions\n downsampled_data_dict[key] = avg_pool2d(\n input=downsampled_data_dict[key],\n kernel_size=(factors[1], factors[2]))\n\n downsampled_data_dict[key] = change_layout_torch(\n data=downsampled_data_dict[key],\n in_layout='NTHW',\n out_layout=layout)\n\n return downsampled_data_dict\n\n def _random_sample(self):\n \"\"\"\n Returns\n -------\n ret_dict\n dict. ret_dict.keys() == self.data_types.\n If self.preprocess == False:\n ret_dict[imgt].shape == (batch_size, height, width, seq_len)\n \"\"\"\n num_sampled = 0\n event_idx_list = nprand.randint(low=self.start_event_idx,\n high=self.end_event_idx,\n size=self.batch_size)\n seq_idx_list = nprand.randint(low=0,\n high=self.num_seq_per_event,\n size=self.batch_size)\n seq_slice_list = [slice(seq_idx * self.stride,\n seq_idx * self.stride + self.seq_len)\n for seq_idx in seq_idx_list]\n ret_dict = {}\n while num_sampled < self.batch_size:\n event = self._load_event_batch(event_idx=event_idx_list[num_sampled],\n event_batch_size=1)\n for imgt_idx, imgt in enumerate(self.data_types):\n sampled_seq = event[imgt_idx][[0, ], :, :, seq_slice_list[num_sampled]] # keep the dim of batch_size for concatenation\n if imgt in ret_dict:\n ret_dict[imgt] = np.concatenate((ret_dict[imgt], sampled_seq),\n axis=0)\n else:\n ret_dict.update({imgt: sampled_seq})\n return ret_dict\n\n def _sequent_sample(self):\n \"\"\"\n Returns\n -------\n ret_dict: Dict\n `ret_dict.keys()` contains `self.data_types`.\n `ret_dict[\"mask\"]` is a list of bool, indicating if the data entry is real or padded.\n If self.preprocess == False:\n ret_dict[imgt].shape == (batch_size, height, width, seq_len)\n \"\"\"\n assert not self.use_up, 'Data loader used up! Reset it to reuse.'\n event_idx = self.curr_event_idx\n seq_idx = self.curr_seq_idx\n num_sampled = 0\n sampled_idx_list = [] # list of (event_idx, seq_idx) records\n while num_sampled < self.batch_size:\n sampled_idx_list.append({'event_idx': event_idx,\n 'seq_idx': seq_idx})\n seq_idx += 1\n if seq_idx >= self.num_seq_per_event:\n event_idx += 1\n seq_idx = 0\n num_sampled += 1\n\n start_event_idx = sampled_idx_list[0]['event_idx']\n event_batch_size = sampled_idx_list[-1]['event_idx'] - start_event_idx + 1\n\n event_batch = self._load_event_batch(event_idx=start_event_idx,\n event_batch_size=event_batch_size)\n ret_dict = {\"mask\": []}\n all_no_pad_flag = True\n for sampled_idx in sampled_idx_list:\n batch_slice = [sampled_idx['event_idx'] - start_event_idx, ] # use [] to keepdim\n seq_slice = slice(sampled_idx['seq_idx'] * self.stride,\n sampled_idx['seq_idx'] * self.stride + self.seq_len)\n for imgt_idx, imgt in enumerate(self.data_types):\n sampled_seq = event_batch[imgt_idx][batch_slice, :, :, seq_slice]\n if imgt in ret_dict:\n ret_dict[imgt] = np.concatenate((ret_dict[imgt], sampled_seq),\n axis=0)\n else:\n ret_dict.update({imgt: sampled_seq})\n # add mask\n no_pad_flag = sampled_idx['event_idx'] < self.end_event_idx\n if not no_pad_flag:\n all_no_pad_flag = False\n ret_dict[\"mask\"].append(no_pad_flag)\n if all_no_pad_flag:\n # if there is no padded data items at all, set `ret_dict[\"mask\"] = None` for convenience.\n ret_dict[\"mask\"] = None\n # update current idx\n self.set_curr_event_idx(event_idx)\n self.set_curr_seq_idx(seq_idx)\n return ret_dict\n\n def _idx_sample(self, index):\n \"\"\"\n Parameters\n ----------\n index\n The index of the batch to sample.\n Returns\n -------\n ret_dict\n dict. ret_dict.keys() == self.data_types.\n If self.preprocess == False:\n ret_dict[imgt].shape == (batch_size, height, width, seq_len)\n \"\"\"\n event_idx = (index * self.batch_size) // self.num_seq_per_event\n seq_idx = (index * self.batch_size) % self.num_seq_per_event\n num_sampled = 0\n sampled_idx_list = [] # list of (event_idx, seq_idx) records\n while num_sampled < self.batch_size:\n sampled_idx_list.append({'event_idx': event_idx,\n 'seq_idx': seq_idx})\n seq_idx += 1\n if seq_idx >= self.num_seq_per_event:\n event_idx += 1\n seq_idx = 0\n num_sampled += 1\n\n start_event_idx = sampled_idx_list[0]['event_idx']\n event_batch_size = sampled_idx_list[-1]['event_idx'] - start_event_idx + 1\n\n event_batch = self._load_event_batch(event_idx=start_event_idx,\n event_batch_size=event_batch_size)\n ret_dict = {}\n for sampled_idx in sampled_idx_list:\n batch_slice = [sampled_idx['event_idx'] - start_event_idx, ] # use [] to keepdim\n seq_slice = slice(sampled_idx['seq_idx'] * self.stride,\n sampled_idx['seq_idx'] * self.stride + self.seq_len)\n for imgt_idx, imgt in enumerate(self.data_types):\n sampled_seq = event_batch[imgt_idx][batch_slice, :, :, seq_slice]\n if imgt in ret_dict:\n ret_dict[imgt] = np.concatenate((ret_dict[imgt], sampled_seq),\n axis=0)\n else:\n ret_dict.update({imgt: sampled_seq})\n\n ret_dict = self.data_dict_to_tensor(data_dict=ret_dict,\n data_types=self.data_types)\n if self.preprocess:\n ret_dict = self.preprocess_data_dict(data_dict=ret_dict,\n data_types=self.data_types,\n layout=self.layout,\n rescale=self.rescale_method)\n\n if self.downsample_dict is not None:\n ret_dict = self.downsample_data_dict(data_dict=ret_dict,\n data_types=self.data_types,\n factors_dict=self.downsample_dict,\n layout=self.layout)\n return ret_dict"
}
] | import os
import numpy as np
import datetime
import pandas as pd
import torch
from typing import Union, Dict, Sequence, Tuple, List
from torch.utils.data import Dataset as TorchDataset, DataLoader
from pytorch_lightning import LightningDataModule
from ...config import cfg
from .sevir_dataloader import SEVIRDataLoader | 9,314 |
class SEVIRTorchDataset(TorchDataset):
def __init__(self,
seq_len: int = 25,
raw_seq_len: int = 49,
sample_mode: str = "sequent",
stride: int = 12,
batch_size: int = 1,
layout: str = "NHWT",
num_shard: int = 1,
rank: int = 0,
split_mode: str = "uneven",
sevir_catalog: Union[str, pd.DataFrame] = None,
sevir_data_dir: str = None,
start_date: datetime.datetime = None,
end_date: datetime.datetime = None,
datetime_filter = None,
catalog_filter = "default",
shuffle: bool = False,
shuffle_seed: int = 1,
output_type = np.float32,
preprocess: bool = True,
rescale_method: str = "01",
verbose: bool = False):
super(SEVIRTorchDataset, self).__init__()
self.layout = layout
|
class SEVIRTorchDataset(TorchDataset):
def __init__(self,
seq_len: int = 25,
raw_seq_len: int = 49,
sample_mode: str = "sequent",
stride: int = 12,
batch_size: int = 1,
layout: str = "NHWT",
num_shard: int = 1,
rank: int = 0,
split_mode: str = "uneven",
sevir_catalog: Union[str, pd.DataFrame] = None,
sevir_data_dir: str = None,
start_date: datetime.datetime = None,
end_date: datetime.datetime = None,
datetime_filter = None,
catalog_filter = "default",
shuffle: bool = False,
shuffle_seed: int = 1,
output_type = np.float32,
preprocess: bool = True,
rescale_method: str = "01",
verbose: bool = False):
super(SEVIRTorchDataset, self).__init__()
self.layout = layout | self.sevir_dataloader = SEVIRDataLoader( | 1 | 2023-10-23 11:45:50+00:00 | 12k |
DTennant/GPC | data/get_datasets.py | [
{
"identifier": "MergedDataset",
"path": "data/data_utils.py",
"snippet": "class MergedDataset(Dataset):\n\n \"\"\"\n Takes two datasets (labelled_dataset, unlabelled_dataset) and merges them\n Allows you to iterate over them in parallel\n \"\"\"\n\n def __init__(self, labelled_dataset, unlabelled_dataset):\n\n self.labelled_dataset = labelled_dataset\n self.unlabelled_dataset = unlabelled_dataset\n self.target_transform = None\n\n def __getitem__(self, item):\n\n if item < len(self.labelled_dataset):\n img, label, uq_idx = self.labelled_dataset[item]\n labeled_or_not = 1\n\n else:\n\n img, label, uq_idx = self.unlabelled_dataset[item - len(self.labelled_dataset)]\n labeled_or_not = 0\n\n\n return img, label, uq_idx, np.array([labeled_or_not])\n\n def __len__(self):\n return len(self.unlabelled_dataset) + len(self.labelled_dataset)"
},
{
"identifier": "get_cifar_10_datasets",
"path": "data/cifar.py",
"snippet": "def get_cifar_10_datasets(train_transform, test_transform, train_classes=(0, 1, 8, 9),\n prop_train_labels=0.8, split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = CustomCIFAR10(root=cifar_10_root, transform=train_transform, train=True)\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = CustomCIFAR10(root=cifar_10_root, transform=test_transform, train=False)\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets"
},
{
"identifier": "get_cifar_100_datasets",
"path": "data/cifar.py",
"snippet": "def get_cifar_100_datasets(train_transform, test_transform, train_classes=range(80),\n prop_train_labels=0.8, split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = CustomCIFAR100(root=cifar_100_root, transform=train_transform, train=True)\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = CustomCIFAR100(root=cifar_100_root, transform=test_transform, train=False)\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets"
},
{
"identifier": "get_cifar_100_ucd_datasets",
"path": "data/cifar.py",
"snippet": "def get_cifar_100_ucd_datasets(train_transform, test_transform, labelled_classes=range(50), unlabelled_classes=range(25, 100),\n prop_train_labels=0.8, split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = CustomCIFAR100(root=cifar_100_root, transform=train_transform, train=True, download=True)\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=labelled_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n train_dataset_unlabelled = subsample_classes(deepcopy(whole_training_set), include_classes=unlabelled_classes)\n unlabelled_indices = set(train_dataset_unlabelled.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n # unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n # train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = CustomCIFAR100(root=cifar_100_root, transform=test_transform, train=False)\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets"
},
{
"identifier": "get_herbarium_datasets",
"path": "data/herbarium_19.py",
"snippet": "def get_herbarium_datasets(train_transform, test_transform, train_classes=range(500), prop_train_labels=0.8,\n seed=0, split_train_val=False):\n\n np.random.seed(seed)\n\n # Init entire training set\n train_dataset = HerbariumDataset19(transform=train_transform,\n root=os.path.join(herbarium_dataroot, 'small-train'))\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n # TODO: Subsampling unlabelled set in uniform random fashion from training data, will contain many instances of dominant class\n train_dataset_labelled = subsample_classes(deepcopy(train_dataset), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n if split_train_val:\n\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled,\n val_instances_per_class=5)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n else:\n\n train_dataset_labelled_split, val_dataset_labelled_split = None, None\n\n # Get unlabelled data\n unlabelled_indices = set(train_dataset.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(train_dataset), np.array(list(unlabelled_indices)))\n\n # Get test dataset\n test_dataset = HerbariumDataset19(transform=test_transform,\n root=os.path.join(herbarium_dataroot, 'small-validation'))\n\n # Transform dict\n unlabelled_classes = list(set(train_dataset.targets) - set(train_classes))\n target_xform_dict = {}\n for i, k in enumerate(list(train_classes) + unlabelled_classes):\n target_xform_dict[k] = i\n\n test_dataset.target_transform = lambda x: target_xform_dict[x]\n train_dataset_unlabelled.target_transform = lambda x: target_xform_dict[x]\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets"
},
{
"identifier": "get_scars_datasets",
"path": "data/stanford_cars.py",
"snippet": "def get_scars_datasets(train_transform, test_transform, train_classes=range(160), prop_train_labels=0.8,\n split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = CarsDataset(data_dir=car_root, transform=train_transform, metas=meta_default_path, train=True)\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = CarsDataset(data_dir=car_root, transform=test_transform, metas=meta_default_path, train=False)\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets"
},
{
"identifier": "get_imagenet_100_datasets",
"path": "data/imagenet.py",
"snippet": "def get_imagenet_100_datasets(train_transform, test_transform, train_classes=range(80),\n prop_train_labels=0.8, split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Subsample imagenet dataset initially to include 100 classes\n subsampled_100_classes = np.random.choice(range(1000), size=(100,), replace=False)\n subsampled_100_classes = np.sort(subsampled_100_classes)\n print(f'Constructing ImageNet-100 dataset from the following classes: {subsampled_100_classes.tolist()}')\n cls_map = {i: j for i, j in zip(subsampled_100_classes, range(100))}\n\n # Init entire training set\n imagenet_training_set = ImageNetBase(root=os.path.join(imagenet_root, 'train'), transform=train_transform)\n whole_training_set = subsample_classes(imagenet_training_set, include_classes=subsampled_100_classes)\n\n # Reset dataset\n whole_training_set.samples = [(s[0], cls_map[s[1]]) for s in whole_training_set.samples]\n whole_training_set.targets = [s[1] for s in whole_training_set.samples]\n whole_training_set.uq_idxs = np.array(range(len(whole_training_set)))\n whole_training_set.target_transform = None\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = ImageNetBase(root=os.path.join(imagenet_root, 'val'), transform=test_transform)\n test_dataset = subsample_classes(test_dataset, include_classes=subsampled_100_classes)\n\n # Reset test set\n test_dataset.samples = [(s[0], cls_map[s[1]]) for s in test_dataset.samples]\n test_dataset.targets = [s[1] for s in test_dataset.samples]\n test_dataset.uq_idxs = np.array(range(len(test_dataset)))\n test_dataset.target_transform = None\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets"
},
{
"identifier": "get_imagenet_ucd_100_datasets",
"path": "data/imagenet.py",
"snippet": "def get_imagenet_ucd_100_datasets(train_transform, test_transform, labelled_classes=range(50),\n unlabelled_classes=range(25,100), prop_train_labels=0.5, split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Subsample imagenet dataset initially to include 100 classes\n # TODO: this place is not changing the args\n subsampled_100_classes = np.random.choice(range(1000), size=(100,), replace=False)\n subsampled_100_classes = np.sort(subsampled_100_classes)\n print(f'Constructing ImageNet-100 dataset from the following classes: {subsampled_100_classes.tolist()}')\n cls_map = {i: j for i, j in zip(subsampled_100_classes, range(100))}\n\n # Init entire training set\n imagenet_training_set = ImageNetBase(root=os.path.join(imagenet_root, 'train'), transform=train_transform)\n whole_training_set = subsample_classes(imagenet_training_set, include_classes=subsampled_100_classes)\n\n # Reset dataset\n whole_training_set.samples = [(s[0], cls_map[s[1]]) for s in whole_training_set.samples]\n whole_training_set.targets = [s[1] for s in whole_training_set.samples]\n whole_training_set.uq_idxs = np.array(range(len(whole_training_set)))\n whole_training_set.target_transform = None\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=labelled_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n train_dataset_unlabelled = subsample_classes(deepcopy(whole_training_set), include_classes=unlabelled_classes)\n unlabelled_indices = set(train_dataset_unlabelled.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n # unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n # train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = ImageNetBase(root=os.path.join(imagenet_root, 'val'), transform=test_transform)\n test_dataset = subsample_classes(test_dataset, include_classes=subsampled_100_classes)\n\n # Reset test set\n test_dataset.samples = [(s[0], cls_map[s[1]]) for s in test_dataset.samples]\n test_dataset.targets = [s[1] for s in test_dataset.samples]\n test_dataset.uq_idxs = np.array(range(len(test_dataset)))\n test_dataset.target_transform = None\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets"
},
{
"identifier": "get_cub_datasets",
"path": "data/cub.py",
"snippet": "def get_cub_datasets(train_transform, test_transform, train_classes=range(160), prop_train_labels=0.8,\n split_train_val=False, seed=0, download=False):\n\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = CustomCub2011(root=cub_root, transform=train_transform, train=True, download=download)\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = CustomCub2011(root=cub_root, transform=test_transform, train=False)\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets"
},
{
"identifier": "get_cub_universal_datasets",
"path": "data/cub.py",
"snippet": "def get_cub_universal_datasets(train_transform, test_transform, labelled_classes=range(160), unlabelled_classes=range(100, 200),\n prop_train_labels=0.8,\n split_train_val=False, seed=0, download=False):\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = CustomCub2011(root=cub_root, transform=train_transform, train=True, download=download)\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=labelled_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n train_dataset_unlabelled = subsample_classes(deepcopy(whole_training_set), include_classes=unlabelled_classes)\n unlabelled_indices = set(train_dataset_unlabelled.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n # unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n # train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = CustomCub2011(root=cub_root, transform=test_transform, train=False)\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets"
},
{
"identifier": "get_aircraft_datasets",
"path": "data/fgvc_aircraft.py",
"snippet": "def get_aircraft_datasets(train_transform, test_transform, train_classes=range(50), prop_train_labels=0.8,\n split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = FGVCAircraft(root=aircraft_root, transform=train_transform, split='trainval')\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = FGVCAircraft(root=aircraft_root, transform=test_transform, split='test')\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets"
},
{
"identifier": "subsample_classes",
"path": "data/cifar.py",
"snippet": "def subsample_classes(dataset, include_classes=(0, 1, 8, 9)):\n\n cls_idxs = [x for x, t in enumerate(dataset.targets) if t in include_classes]\n\n target_xform_dict = {}\n for i, k in enumerate(include_classes):\n target_xform_dict[k] = i\n\n dataset = subsample_dataset(dataset, cls_idxs)\n\n # dataset.target_transform = lambda x: target_xform_dict[x]\n\n return dataset"
},
{
"identifier": "subsample_classes",
"path": "data/herbarium_19.py",
"snippet": "def subsample_classes(dataset, include_classes=range(250)):\n\n cls_idxs = [x for x, l in enumerate(dataset.targets) if l in include_classes]\n\n target_xform_dict = {}\n for i, k in enumerate(include_classes):\n target_xform_dict[k] = i\n\n dataset = subsample_dataset(dataset, cls_idxs)\n\n dataset.target_transform = lambda x: target_xform_dict[x]\n\n return dataset"
},
{
"identifier": "subsample_classes",
"path": "data/stanford_cars.py",
"snippet": "def subsample_classes(dataset, include_classes=range(160)):\n\n include_classes_cars = np.array(include_classes) + 1 # SCars classes are indexed 1 --> 196 instead of 0 --> 195\n cls_idxs = [x for x, t in enumerate(dataset.target) if t in include_classes_cars]\n\n target_xform_dict = {}\n for i, k in enumerate(include_classes):\n target_xform_dict[k] = i\n\n dataset = subsample_dataset(dataset, cls_idxs)\n\n # dataset.target_transform = lambda x: target_xform_dict[x]\n\n return dataset"
},
{
"identifier": "subsample_classes",
"path": "data/imagenet.py",
"snippet": "def subsample_classes(dataset, include_classes=list(range(1000))):\n\n cls_idxs = [x for x, t in enumerate(dataset.targets) if t in include_classes]\n\n target_xform_dict = {}\n for i, k in enumerate(include_classes):\n target_xform_dict[k] = i\n\n dataset = subsample_dataset(dataset, cls_idxs)\n dataset.target_transform = lambda x: target_xform_dict[x]\n\n return dataset"
},
{
"identifier": "subsample_classes",
"path": "data/cub.py",
"snippet": "def subsample_classes(dataset, include_classes=range(160)):\n\n include_classes_cub = np.array(include_classes) + 1 # CUB classes are indexed 1 --> 200 instead of 0 --> 199\n cls_idxs = [x for x, (_, r) in enumerate(dataset.data.iterrows()) if int(r['target']) in include_classes_cub]\n\n # TODO: For now have no target transform\n target_xform_dict = {}\n for i, k in enumerate(include_classes):\n target_xform_dict[k] = i\n\n dataset = subsample_dataset(dataset, cls_idxs)\n\n dataset.target_transform = lambda x: target_xform_dict[x]\n\n return dataset"
},
{
"identifier": "subsample_classes",
"path": "data/fgvc_aircraft.py",
"snippet": "def subsample_classes(dataset, include_classes=range(60)):\n\n cls_idxs = [i for i, (p, t) in enumerate(dataset.samples) if t in include_classes]\n\n # TODO: Don't transform targets for now\n target_xform_dict = {}\n for i, k in enumerate(include_classes):\n target_xform_dict[k] = i\n\n dataset = subsample_dataset(dataset, cls_idxs)\n\n dataset.target_transform = lambda x: target_xform_dict[x]\n\n return dataset"
},
{
"identifier": "osr_split_dir",
"path": "config.py",
"snippet": "_C = CN()\n_C.MODEL = CN()\n_C.MODEL.DEVICE = \"cuda\"\n_C.MODEL.NAME = 'resnet50'\n_C.MODEL.LAST_STRIDE = 1\n_C.MODEL.LABEL_SMOOTH = False\n_C.MODEL.PRETRAIN_PATH = ''\n_C.INPUT = CN()\n_C.INPUT.SIZE_TRAIN = [384, 128]\n_C.INPUT.SIZE_TEST = [384, 128]\n_C.INPUT.PROB = 0.0\n_C.INPUT.RE_PROB = 0.0\n_C.INPUT.PIXEL_MEAN = [0.485, 0.456, 0.406]\n_C.INPUT.PIXEL_STD = [0.229, 0.224, 0.225]\n_C.INPUT.PADDING = 10\n_C.DATASETS = CN()\n_C.DATASETS.NAMES = ('market1501')\n_C.DATASETS.DATA_PATH = '/home/zbc/data/market1501/'\n_C.DATASETS.TRAIN_PATH = 'bounding_box_train'\n_C.DATASETS.QUERY_PATH = 'query'\n_C.DATASETS.GALLERY_PATH = 'bounding_box_test'\n_C.DATALOADER = CN()\n_C.DATALOADER.NUM_WORKERS = 8\n_C.DATALOADER.SAMPLER = 'softmax'\n_C.DATALOADER.NUM_INSTANCE = 16\n_C.SOLVER = CN()\n_C.SOLVER.OPTIMIZER_NAME = \"Adam\"\n_C.SOLVER.FP16 = False\n_C.SOLVER.MAX_EPOCHS = 50\n_C.SOLVER.BASE_LR = 3e-4\n_C.SOLVER.BIAS_LR_FACTOR = 2\n_C.SOLVER.MOMENTUM = 0.9\n_C.SOLVER.MARGIN = 0.3\n_C.SOLVER.WEIGHT_DECAY = 0.0005\n_C.SOLVER.WEIGHT_DECAY_BIAS = 0.\n_C.SOLVER.GAMMA = 0.1\n_C.SOLVER.STEPS = (30, 55)\n_C.SOLVER.WARMUP_FACTOR = 1.0 / 3\n_C.SOLVER.WARMUP_ITERS = 500\n_C.SOLVER.WARMUP_METHOD = \"linear\"\n_C.SOLVER.CHECKPOINT_PERIOD = 50\n_C.SOLVER.LOG_PERIOD = 100\n_C.SOLVER.EVAL_PERIOD = 50\n_C.SOLVER.IMS_PER_BATCH = 64\n_C.SOLVER.CYTHON = True\n_C.TEST = CN()\n_C.TEST.IMS_PER_BATCH = 128\n_C.TEST.WEIGHT = \"\"\n_C.TEST.DEBUG = False\n_C.TEST.MULTI_GPU = False\n_C.TEST.RERANK = True\n_C.OUTPUT_DIR = \"\""
}
] | from data.data_utils import MergedDataset
from data.cifar import get_cifar_10_datasets, get_cifar_100_datasets, get_cifar_100_ucd_datasets
from data.herbarium_19 import get_herbarium_datasets
from data.stanford_cars import get_scars_datasets
from data.imagenet import get_imagenet_100_datasets, get_imagenet_ucd_100_datasets
from data.cub import get_cub_datasets, get_cub_universal_datasets
from data.fgvc_aircraft import get_aircraft_datasets
from data.inat_mini import get_inat_universal_datasets
from data.domainnet import get_domainnet_universal_datasets
from data.color_symbol import get_color_symbol_universal_datasets
from data.cifar import subsample_classes as subsample_dataset_cifar
from data.herbarium_19 import subsample_classes as subsample_dataset_herb
from data.stanford_cars import subsample_classes as subsample_dataset_scars
from data.imagenet import subsample_classes as subsample_dataset_imagenet
from data.cub import subsample_classes as subsample_dataset_cub
from data.fgvc_aircraft import subsample_classes as subsample_dataset_air
from copy import deepcopy
from config import osr_split_dir
import pickle
import os | 8,122 |
sub_sample_class_funcs = {
'cifar10': subsample_dataset_cifar,
'cifar100': subsample_dataset_cifar,
'imagenet_100': subsample_dataset_imagenet,
'herbarium_19': subsample_dataset_herb,
'cub': subsample_dataset_cub,
|
sub_sample_class_funcs = {
'cifar10': subsample_dataset_cifar,
'cifar100': subsample_dataset_cifar,
'imagenet_100': subsample_dataset_imagenet,
'herbarium_19': subsample_dataset_herb,
'cub': subsample_dataset_cub, | 'aircraft': subsample_dataset_air, | 10 | 2023-10-23 18:23:22+00:00 | 12k |
nju-websoft/SCR | main.py | [
{
"identifier": "reset_id",
"path": "framework/utils.py",
"snippet": "def reset_id(labels, new_id):\n res = []\n for index in range(len(labels)):\n res.append(new_id[int(labels[index])])\n return torch.tensor(res)"
},
{
"identifier": "get_reset",
"path": "framework/utils.py",
"snippet": "def get_reset(event_list):\n new_id, id2label = {}, {}\n\n new_id[0] = torch.tensor(0)\n id2label[torch.tensor(0)] = 0\n for index, value in enumerate(event_list):\n new_id[value] = torch.tensor(index + 1)\n id2label[index+1] = value\n return new_id, id2label"
},
{
"identifier": "trigger_combine_event",
"path": "framework/utils.py",
"snippet": "def trigger_combine_event(old_data, new_data):\n if len(new_data) == 0:\n return old_data\n init = False\n res = []\n if len(old_data) == 0:\n init = True\n old_data = copy.deepcopy(new_data)\n for old_sample_index in range(len(old_data)-1, -1, -1):\n old_sample = old_data[old_sample_index]\n combine_flag = False\n for new_sample_index in range(len(new_data)-1, -1, -1):\n new_sample = new_data[new_sample_index]\n if old_sample['input_ids'] == new_sample['input_ids']:\n old_offset = torch.nonzero(torch.tensor(np.array(old_sample['labels'])))\n new_offset = torch.nonzero(torch.tensor(np.array(new_sample['labels'])))\n eqoffset = [int(val) for val in old_offset if val in new_offset]\n combine_flag = True\n if len(eqoffset) > 0:\n eqflag = False\n for i in eqoffset: \n if old_sample['labels'][i] != new_sample['labels'][i]:\n # one ins has two event type on same trigger...\n eqflag = True \n if eqflag == False:\n new_data.remove(new_sample)\n continue\n \n old_sample['labels'] = copy.deepcopy(list(np.array(old_sample['labels']) + np.array(new_sample['labels'])))\n new_data.remove(new_sample)\n if (combine_flag and init) or (init == False):\n temp = copy.deepcopy(old_sample)\n res.append(temp)\n res += new_data\n return res"
},
{
"identifier": "unpack_batch",
"path": "framework/utils.py",
"snippet": "def unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, device):\n sentence_ids = torch.tensor(sentence_ids).to(device)\n input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).to(device)\n input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).to(device)\n segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).to(device)\n ners = torch.tensor(np.array([item.cpu().detach().numpy() for item in ners])).to(device)\n if labels != None:\n if new_id != None:\n labels = torch.tensor(np.array([reset_id(item, new_id).cpu().detach().numpy() for item in labels])).to(device)\n else:\n labels = torch.tensor(np.array([item.cpu().detach().numpy() for item in labels])).to(device)\n return sentence_ids, input_ids, input_masks, segment_ids, labels, ners"
},
{
"identifier": "BertAdam",
"path": "framework/optimization.py",
"snippet": "class BertAdam(Optimizer):\n \"\"\"Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n b1: Adams b1. Default: 0.9\n b2: Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n \"\"\"\n def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',\n b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,\n max_grad_norm=1.0):\n if lr is not required and lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if schedule not in SCHEDULES:\n raise ValueError(\"Invalid schedule parameter: {}\".format(schedule))\n if not 0.0 <= warmup < 1.0 and not warmup == -1:\n raise ValueError(\"Invalid warmup: {} - should be in [0.0, 1.0[ or -1\".format(warmup))\n if not 0.0 <= b1 < 1.0:\n raise ValueError(\"Invalid b1 parameter: {} - should be in [0.0, 1.0[\".format(b1))\n if not 0.0 <= b2 < 1.0:\n raise ValueError(\"Invalid b2 parameter: {} - should be in [0.0, 1.0[\".format(b2))\n if not e >= 0.0:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(e))\n defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,\n b1=b1, b2=b2, e=e, weight_decay=weight_decay,\n max_grad_norm=max_grad_norm)\n super(BertAdam, self).__init__(params, defaults)\n\n def get_lr(self):\n lr = []\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n if len(state) == 0:\n return [0]\n if group['t_total'] != -1:\n schedule_fct = SCHEDULES[group['schedule']]\n lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])\n else:\n lr_scheduled = group['lr']\n lr.append(lr_scheduled)\n return lr\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n warned_for_t_total = False\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['next_m'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['next_v'] = torch.zeros_like(p.data)\n\n next_m, next_v = state['next_m'], state['next_v']\n beta1, beta2 = group['b1'], group['b2']\n\n # Add grad clipping\n if group['max_grad_norm'] > 0:\n clip_grad_norm_(p, group['max_grad_norm'])\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n next_m.mul_(beta1).add_(grad, alpha = 1 - beta1)\n next_v.mul_(beta2).addcmul_(grad, grad, value = 1 - beta2)\n update = next_m / (next_v.sqrt() + group['e'])\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if group['weight_decay'] > 0.0:\n update += group['weight_decay'] * p.data\n\n if group['t_total'] != -1:\n schedule_fct = SCHEDULES[group['schedule']]\n progress = state['step']/group['t_total']\n lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup'])\n # warning for exceeding t_total (only active with warmup_linear\n if group['schedule'] == \"warmup_linear\" and progress > 1. and not warned_for_t_total:\n logger.warning(\n \"Training beyond specified 't_total' steps with schedule '{}'. Learning rate set to {}. \"\n \"Please set 't_total' of {} correctly.\".format(group['schedule'], lr_scheduled, self.__class__.__name__))\n warned_for_t_total = True\n # end warning\n else:\n lr_scheduled = group['lr']\n\n update_with_lr = lr_scheduled * update\n p.data.add_(-update_with_lr)\n\n state['step'] += 1\n\n # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1\n # No bias correction\n # bias_correction1 = 1 - beta1 ** state['step']\n # bias_correction2 = 1 - beta2 ** state['step']\n\n return loss"
},
{
"identifier": "AdamW",
"path": "framework/optimization.py",
"snippet": "class AdamW(Optimizer):\n \"\"\" Implements Adam algorithm with weight decay fix.\n\n Parameters:\n lr (float): learning rate. Default 1e-3.\n betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)\n eps (float): Adams epsilon. Default: 1e-6\n weight_decay (float): Weight decay. Default: 0.0\n correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[1]))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(eps))\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)\n super().__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\"Adam does not support sparse gradients, please consider SparseAdam instead\")\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state[\"exp_avg_sq\"] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state[\"exp_avg\"], state[\"exp_avg_sq\"]\n beta1, beta2 = group[\"betas\"]\n\n state[\"step\"] += 1\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n # exp_avg.mul_(beta1).add_(1.0 - beta1, grad)\n exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)\n denom = exp_avg_sq.sqrt().add_(group[\"eps\"])\n\n step_size = group[\"lr\"]\n if group[\"correct_bias\"]: # No bias correction for Bert\n bias_correction1 = 1.0 - beta1 ** state[\"step\"]\n bias_correction2 = 1.0 - beta2 ** state[\"step\"]\n step_size = step_size * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(exp_avg, denom, value=-step_size)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n # Add weight decay at the end (fixed version)\n if group[\"weight_decay\"] > 0.0:\n p.data.add_(p.data, alpha=-group[\"lr\"] * group[\"weight_decay\"])\n\n return loss"
},
{
"identifier": "triggerEncoder",
"path": "model/trigger_encoder.py",
"snippet": "class triggerEncoder(nn.Module):\n def __init__(self, config):\n super(triggerEncoder, self).__init__()\n self.config = config\n self.last_k_attention = config.last_k_attention\n self.bert = BertModel.from_pretrained(config.bert_path, output_attentions=True)\n self.embedding_dim = self.config.embedding_dim\n self.drop = nn.Dropout(0.2)\n self.linear_transform = nn.Linear(self.bert.config.hidden_size, self.config.hidden_dim, bias=True)\n self.layer_normalization = nn.LayerNorm([self.config.hidden_dim, self.config.hidden_dim])\n\n def get_attention(self, input_ids, input_masks, segment_ids):\n \n output = self.bert(input_ids, token_type_ids = segment_ids, attention_mask = input_masks)\n \n now_attention = 0\n attention = output[2]\n for i in range(self.last_k_attention):\n now_layer_att = attention[-i]\n now_layer_att = torch.mean(now_layer_att, 1)\n res_att = now_layer_att/(torch.sum(now_layer_att, dim = -1, keepdim = True)+1e-9)\n now_attention += res_att\n avg_layer_att = now_attention/self.last_k_attention\n return avg_layer_att\n\n\n\n\n def get_feature(self, sentence_ids, input_ids, input_masks, segment_ids):\n feature = self.bert(input_ids, token_type_ids = segment_ids, attention_mask = input_masks)[0]\n seq_output = self.drop(feature)\n seq_output = self.linear_transform(seq_output)\n output = F.gelu(seq_output)\n feature = self.layer_normalization(output)\n feature = feature.view((1,-1))\n return feature\n\n def forward(self, sentence_ids, input_ids, input_masks, segment_ids):\n seq_output = self.bert(input_ids, token_type_ids = segment_ids, attention_mask = input_masks)[0]\n seq_output = self.drop(seq_output)\n seq_output = self.linear_transform(seq_output)\n output = F.gelu(seq_output)\n output = self.layer_normalization(output)\n return output"
},
{
"identifier": "argumentDetection",
"path": "model/argument_detection.py",
"snippet": "class argumentDetection(nn.Module):\n def __init__(self, config):\n super(argumentDetection, self).__init__()\n self.config = config\n self.bert = BertModel.from_pretrained(config.bert_path)\n self.embedding_dim = self.config.embedding_dim\n self.classifier = nn.Linear(self.embedding_dim*2, config.args_num, bias=False)\n self.dropout = nn.Dropout(0.2)\n self.criterion = nn.CrossEntropyLoss()\n def forward(self, input_ids, labels, segment_ids, input_mask, offset, metadata, unseen_matadata, trigger, ner, gold_args):\n sequence_output = self.bert(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)[0]\n new_logits = None\n new_label = []\n for i in range(len(ner)):\n for start, end in ner[i]:\n embedding = sequence_output[i][[start+1, end]].view(-1, self.embedding_dim*2)\n embedding = self.dropout(embedding)\n logits = self.classifier(embedding)\n one_trigger = trigger[i]\n unseen_args = unseen_matadata[one_trigger]\n logits[:,unseen_args] = 0\n label = labels[i][start+1]\n new_label.append(label)\n if new_logits == None:\n new_logits = logits\n else:\n new_logits = torch.cat([new_logits, logits], dim = 0)\n\n new_label = torch.tensor(new_label).cuda()\n \n loss = self.criterion(new_logits, new_label)\n return loss\n\n \n def get_res(self, input_ids, segment_ids, input_mask, ner):\n sequence_output = self.bert(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)[0]\n res_logits = []\n for i in range(len(ner)):\n one_logits = None\n for start, end in ner[i]:\n embedding = sequence_output[i][[start+1, end]].view(-1, self.embedding_dim*2)\n embedding = self.dropout(embedding)\n logits = self.classifier(embedding)\n if one_logits == None:\n one_logits = logits\n else:\n one_logits = torch.cat([one_logits, logits], dim = 0)\n \n res_logits.append(one_logits)\n return res_logits\n\n def get_feature(self, input_ids, segment_ids, input_mask):\n sequence_output = self.bert(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)[0]\n feature = self.dropout(sequence_output)\n feature = feature.view((1,-1))\n return feature"
},
{
"identifier": "classifier",
"path": "model/classifier.py",
"snippet": "class classifier(nn.Module):\n def __init__(self, config, events_num):\n super(classifier, self).__init__()\n self.config = config\n self.events_num = events_num\n self.embedding_dim = self.config.embedding_dim\n self.classifier = nn.Linear(self.config.hidden_dim, events_num, bias=False)\n self.criterion = nn.CrossEntropyLoss()\n\n def forward(self, feature, input_masks, labels):\n logits = self.classifier(feature)\n # test/dev\n if labels == None:\n return logits\n # train\n active_loss = input_masks.view(-1) == 1\n \n active_logits = logits.view(-1, self.events_num)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = self.criterion(active_logits, active_labels)\n \n return logits, loss"
},
{
"identifier": "entityDetection",
"path": "model/entity_detection.py",
"snippet": "class entityDetection(nn.Module):\n\n def __init__(self, config, rnn_dim=128):\n super(entityDetection, self).__init__()\n self.bert = BertModel.from_pretrained('bert-base-uncased')\n self.dropout = nn.Dropout(0.2)\n self.birnn = nn.LSTM(768, rnn_dim, num_layers=1, bidirectional=True, batch_first=True)\n self.classifier = nn.Linear(rnn_dim*2, config.num_labels)\n self.crf = CRF(config.num_labels, batch_first=True)\n \n\n def forward(self, input_ids, labels, token_type_ids=None, input_mask=None):\n outputs = self.bert(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask)\n sequence_output = outputs[0]\n sequence_output, _ = self.birnn(sequence_output)\n sequence_output = self.dropout(sequence_output)\n emissions = self.classifier(sequence_output)\n loss = -1*self.crf(emissions, labels, mask=input_mask.byte())\n return loss\n\n \n def get_res(self, input_ids, token_type_ids=None, input_mask=None):\n outputs = self.bert(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask)\n sequence_output = outputs[0]\n sequence_output, _ = self.birnn(sequence_output)\n sequence_output = self.dropout(sequence_output)\n emissions = self.classifier(sequence_output)\n res = self.crf.decode(emissions, input_mask.byte())\n return res"
},
{
"identifier": "Config",
"path": "framework/config.py",
"snippet": "class Config(ConfigParser):\n def __init__(self, file):\n self.configParser = ConfigParser()\n self.configParser.read(file)\n self.load_value()\n\n def load_value(self):\n for section in self.configParser.sections():\n for key, value in self.configParser.items(section):\n val = None\n for attr in ['getint', 'getfloat', 'getboolean']:\n try:\n val = getattr(self.configParser[section], attr)(key)\n break\n except:\n val = value\n assert(val!=None)\n setattr(self, key, val)\n print(key, val)"
}
] | import torch
import random
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import math
import warnings
from framework.utils import reset_id, get_reset, trigger_combine_event, unpack_batch
from framework.optimization import BertAdam, AdamW
from argparse import ArgumentParser
from model.trigger_encoder import triggerEncoder
from model.argument_detection import argumentDetection
from model.classifier import classifier
from model.entity_detection import entityDetection
from framework.config import Config
from framework.dataloader import *
from transformers import logging
from sklearn.cluster import KMeans | 8,488 | trig = copy.deepcopy(trig[0])
gold = copy.deepcopy(gold[0])
sentence = ''.join(sentence) + str(trig)
if sentence in gold_args:
print(gold_args[sentence])
print(gold)
assert(0)
gold_args[sentence] = gold
label_num += len(gold)
for step, (input_ids, input_masks, in_sent, segment_ids, sentence, trigger, ner) in enumerate(eval_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
with torch.no_grad():
logits = argument_detection.get_res(input_ids, segment_ids, input_masks, ner)
for i in range(len(in_sent)):
sent = copy.deepcopy(sentence[i])
tr = copy.deepcopy(trigger[i])
tr = sampler.index2vocab[tr]
sent = ''.join(sent) + str(tr)
new_logits = logits[i]
seen_args = copy.deepcopy(metadata[tr])
seen_args += [0]
pred_roles = []
if new_logits == None:
continue
for index, value in enumerate(new_logits):
logi = value[seen_args]
max_value, pred_role = torch.max(logi, dim = 0)
start, end = ner[i][index]
one_pred = (start, end, seen_args[int(pred_role)])
if seen_args[int(pred_role)] != 0:
pred_roles.append(one_pred)
if sent in gold_args:
one_gold_args = copy.deepcopy(gold_args[sent])
pred_num += len(pred_roles)
for preds in pred_roles:
if preds in one_gold_args:
while(preds in one_gold_args):
correct_num += 1
one_gold_args.remove(preds)
else:
pred_num += len(pred_roles)
if pred_num == 0 or label_num == 0 or correct_num == 0:
return 0
pred_c = 100.0*correct_num/pred_num
recall_c = 100.0*correct_num/label_num
f1_c = 2*pred_c*recall_c/(pred_c+recall_c)
return f1_c
def select_argu_data(config, argument_detection, relation_dataset,new_id, event_mention):
train_data_loader = get_ACEArgData_loader(relation_dataset, config, shuffle = False, batch_size = 1)
features = []
argument_detection.eval()
for step, (sentence, input_ids, input_masks, in_sent, segment_ids, args, args_offset, gold_args, ner, trigger) in enumerate(train_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
with torch.no_grad():
feature = argument_detection.get_feature(input_ids, segment_ids, input_masks).cpu()
features.append(feature)
features = np.concatenate(features)
num_clusters = min(config.memory_size, len(relation_dataset))
if num_clusters == len(relation_dataset):
memory = []
for i in relation_dataset:
memory.append(i)
return memory
distances = KMeans(n_clusters = num_clusters, random_state = 0).fit_transform(features)
memory = []
for k in range(num_clusters):
select_index = np.argmin(distances[:, k])
ins = relation_dataset[select_index]
memory.append(ins)
return memory
def main():
# load config
parser = ArgumentParser()
parser.add_argument('--config', default='./config/ace.ini')
args = parser.parse_args()
config = Config(args.config)
# set train param
config.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size_per_step = int(config.batch_size / config.gradient_accumulation_steps)
triger_result_total, trigger_result_cur, argument_result_total, argument_result_cur = [], [], [], []
# six truns and get average
for i in range(config.total_round):
print(f"Now is round {i}")
config.seed += 100
random.seed(config.seed)
np.random.seed(config.seed)
torch.manual_seed(config.seed)
# now is trigger detection task
sampler = ACETriDataloder(config, i)
trigger_one_round_res = []
argument_one_round_res = []
# trigger memory space
trigger_memorized_samples = {}
# argument memory space
argument_memorized_samples = {}
# init trigger encode model
| logging.set_verbosity_warning()
logging.set_verbosity_error()
warnings.filterwarnings('ignore')
def eval_trigger(trigger_encoder, trigger_classifier, eval_data, config, new_id, save, ltlabel, id2label):
eval_data_loader = get_ACETriData_loader(eval_data, config, shuffle = True)
trigger_encoder.eval()
trigger_classifier.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(eval_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
with torch.no_grad():
feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids)
#feature = torch.stack([x.to(device) for x in feature],dim=0)
logits = trigger_classifier(feature, None, None)
new_logits = logits
for index, value in enumerate(in_sent):
evetype = []
pred_first = True
value = value == 1
gold_offset = torch.nonzero(labels[index][value]).squeeze(dim = 1)
gold_label = torch.gather(labels[index][value], dim = 0, index = gold_offset)
assert(len(gold_label) != 0)
gold_label = [int(val) for val in gold_label]
gold_offset = [int(val) for val in gold_offset]
new_gold_label = []
i = 0
while i < len(gold_label):
if i+1 >= len(gold_label):
if config.lttest and id2label[gold_label[i]] not in ltlabel:
break
else:
new_gold_label.append(gold_label[i])
break
while gold_label[i] == gold_label[i+1] and gold_offset[i]+1 == gold_offset[i+1]:
i += 1
if i+1 >= len(gold_label):
break
if config.lttest == False or id2label[gold_label[i]] in ltlabel:
new_gold_label.append(gold_label[i])
i+=1
gold_label = new_gold_label
label_num += len(gold_label)
res = new_logits[index][value,:]
max_value, pred_tri_each_word = torch.max(res, 1)
pred_trigger = 0
offset = 0
pred_offset, pred_label = [], []
for offset, trigger in enumerate(pred_tri_each_word):
if trigger!=0:
if config.lttest == False or id2label[int(trigger)] in ltlabel:
pred_offset.append(offset)
pred_label.append(trigger)
new_pred_label = []
i = 0
while i < len(pred_label):
if i+1 >= len(pred_label):
new_pred_label.append(pred_label[i])
break
while pred_label[i] == pred_label[i+1] and pred_offset[i]+1 == pred_offset[i+1]:
i += 1
if i+1 >= len(pred_label):
break
new_pred_label.append(pred_label[i])
i+=1
new_pred_label = [int(val) for val in new_pred_label]
pred_num += len(new_pred_label)
for pred_trigger in new_pred_label:
if save:
if id2label[pred_trigger] not in evetype:
evetype.append(id2label[pred_trigger])
onesamp = {}
onesamp['sentence'] = sentence[index]
onesamp['trigger'] = id2label[pred_trigger]
onesamp['s_start'] = 0
pred_res.append(onesamp)
if pred_trigger in gold_label:
correct_num += 1
gold_label.remove(pred_trigger)
if pred_num == 0 or label_num == 0 or correct_num == 0:
return 0
pred_c = 100.0*correct_num/pred_num
recall_c = 100.0*correct_num/label_num
f1_c = 2*pred_c*recall_c/(pred_c+recall_c)
if save:
f = open(config.trigger_pred_file, 'w')
json.dump(pred_res, f)
f.close()
return f1_c
def train_simple_trigger(trigger_encoder, trigger_classifier, tr_data, config, new_id):
train_data_loader = get_ACETriData_loader(tr_data, config, shuffle = True)
trigger_encoder.train()
trigger_classifier.train()
param_optimizer_1 = list(trigger_encoder.named_parameters())
param_optimizer_1 = [n for n in param_optimizer_1 if 'pooler' not in n[0]]
param_optimizer_2 = list(trigger_classifier.named_parameters())
param_optimizer_2 = [n for n in param_optimizer_2 if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer_1
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.trigger_encoder_learning_rate},
{'params': [p for n, p in param_optimizer_1
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999),'lr':config.trigger_encoder_learning_rate},
{'params': [p for n, p in param_optimizer_2
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.trigger_classifier_learning_rate},
{'params': [p for n, p in param_optimizer_2
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999), 'lr':config.trigger_classifier_learning_rate}
]
optimizer = AdamW(params = optimizer_grouped_parameters)
epoch_index, best_f1, es_index = 0, 0, 0
fd_criterion = nn.CosineEmbeddingLoss()
logits = None
global_step = 0
while(True):
losses = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids)
logits, loss = trigger_classifier(feature, input_masks, labels)
losses.append(loss.cpu().detach().numpy())
loss.backward()
if (step + 1) % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}")
epoch_index += 1
if epoch_index >= 5:
break
def train_trigger(trigger_encoder, trigger_classifier, tr_data, de_data, seen_train_event, config, new_id, forward_encoder, forward_classifier, forward_event, trigger_tailed, ltlabel, id2label):
if config.kd == True and forward_event != None:
forward_index = reset_id(forward_event, new_id).cuda()
print(forward_index)
T = config.temp
train_data_loader = get_ACETriData_loader(tr_data, config, shuffle = True)
trigger_encoder.train()
trigger_classifier.train()
param_optimizer_1 = list(trigger_encoder.named_parameters())
param_optimizer_1 = [n for n in param_optimizer_1 if 'pooler' not in n[0]]
param_optimizer_2 = list(trigger_classifier.named_parameters())
param_optimizer_2 = [n for n in param_optimizer_2 if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer_1
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.trigger_encoder_learning_rate},
{'params': [p for n, p in param_optimizer_1
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999),'lr':config.trigger_encoder_learning_rate},
{'params': [p for n, p in param_optimizer_2
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.trigger_classifier_learning_rate},
{'params': [p for n, p in param_optimizer_2
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999), 'lr':config.trigger_classifier_learning_rate}
]
if config.merit == 'epochs':
num_train_optimization_steps = len(train_data_loader) // config.gradient_accumulation_steps * config.epochs
optimizer = AdamW(params = optimizer_grouped_parameters,
weight_decay=config.weight_decay)
elif config.merit == 'early_stop':
optimizer = AdamW(params = optimizer_grouped_parameters)
epoch_index, best_f1, es_index = 0, 0, 0
#fd_criterion = nn.CosineEmbeddingLoss(reduction = 'sum')
fd_criterion = nn.CosineEmbeddingLoss()
logits = None
global_step = 0
while(True):
losses = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids)
if len(trigger_tailed) != 0:
tail_res = []
for i, label in enumerate(labels):
flabels = label!=0
pos_labels = label[flabels]
pos_index = torch.nonzero(label)
for index, fe in enumerate(pos_labels):
if int(fe) in trigger_tailed:
protos, standard = trigger_tailed[int(fe)]
protos = protos[flabels]
standard = standard[flabels]
for st in range(len(standard)):
s = torch.tensor(np.random.normal(0, standard[st], 1)).cuda()
j = pos_index[index]
feature[i][j] += s
tail_res.append((i,j,s))
logits, loss = trigger_classifier(feature, input_masks, labels)
if config.kd == True and forward_event != None:
#print(tail_res)
kd_loss = 0
temp_masks = copy.deepcopy(input_masks)
forward_features = forward_encoder(sentence_ids, input_ids, temp_masks, segment_ids)
if len(trigger_tailed) != 0:
for i,j,s in tail_res:
forward_features[i][j] += s
forward_logits = forward_classifier(forward_features, temp_masks, None)
forward_logits = (forward_logits.index_select(2, forward_index)/T).view(-1, len(forward_event))
new_logits = (logits.index_select(2, forward_index)/T).view(-1, len(forward_event))
active_loss = (input_masks.view(-1) == 1).cuda()
forward_logits = forward_logits[active_loss]
new_logits = new_logits[active_loss]
if config.select == True:
max_forward_index = max(forward_index)
label_index = (labels.view(-1)<=max_forward_index)[active_loss].cuda()
forward_logits[:,0] = 0
new_logits[:,0] = 0
forward_logits = forward_logits[label_index]
new_logits = new_logits[label_index]
forward_logits = F.softmax(forward_logits, dim = 1)
new_logits = F.log_softmax(new_logits, dim = 1)
kd_loss = -torch.mean(torch.sum(forward_logits * new_logits, dim = 1))
#kd_loss = -torch.sum(torch.sum(forward_logits * new_logits, dim = 1))
if config.attention == True:
attention = trigger_encoder.get_attention(input_ids, input_masks, segment_ids)
forward_attention = forward_encoder.get_attention(input_ids, input_masks, segment_ids)
attention = attention.matmul(feature)
forward_attention = forward_attention.matmul(forward_features)
attention = F.normalize(attention, p=2, dim=2).view(-1, attention.shape[2])[active_loss]
forward_attention = F.normalize(forward_attention, p=2, dim=2).view(-1, forward_attention.shape[2])[active_loss]
fd_loss = fd_criterion(attention, forward_attention, torch.ones(attention.shape[0]).cuda())
kd_loss = kd_loss + fd_loss
loss = (1-config.alpha)*loss+config.alpha*kd_loss
losses.append(loss.cpu().detach().numpy())
loss.backward()
if (step + 1) % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
if config.merit == 'early_stop':
res = 0
res = eval_trigger(trigger_encoder, trigger_classifier, de_data, config, new_id, False, ltlabel, id2label)
trigger_encoder.train()
trigger_classifier.train()
if res > best_f1:
best_f1 = res
es_index = 0
encoder_output_path = config.output_dir+ config.trigger_encoder_file
torch.save(trigger_encoder.state_dict(), encoder_output_path)
classifier_output_path = config.output_dir+ config.trigger_classifier_file
torch.save(trigger_classifier.state_dict(), classifier_output_path)
else:
es_index += 1
print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}, f1 is {res} and best f1 is {best_f1}")
epoch_index += 1
if es_index >= config.early_stop:
trigger_encoder.load_state_dict(torch.load(encoder_output_path))
trigger_classifier.load_state_dict(torch.load(classifier_output_path))
break
if config.merit == 'epochs':
print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}")
epoch_index += 1
if epoch_index >= config.epochs:
break
def select_data(config, trigger_encoder, relation_dataset, new_id, event):
train_data_loader = get_ACETriData_loader(relation_dataset, config, shuffle = False, batch_size = 1)
features = []
trigger_encoder.eval()
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
with torch.no_grad():
feature = trigger_encoder.get_feature(sentence_ids, input_ids, input_masks, segment_ids).cpu()
features.append(feature)
features = np.concatenate(features)
num_clusters = min(config.memory_size, len(relation_dataset))
if num_clusters == len(relation_dataset):
memory = []
for i in relation_dataset:
memory.append(i)
return memory
distances = KMeans(n_clusters = num_clusters, random_state = 0).fit_transform(features)
memory = []
for k in range(num_clusters):
select_index = np.argmin(distances[:, k])
ins = relation_dataset[select_index]
memory.append(ins)
return memory
def addPseudoLabel(trigger_encoder, trigger_classifier, data, config, id2label):
pseudo_data = []
eval_data_loader = get_ACETriData_loader(data, config, shuffle = True, batch_size = 1)
trigger_encoder.eval()
trigger_classifier.eval()
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(eval_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, None, config.device)
with torch.no_grad():
feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids)
logits = trigger_classifier(feature, None, None)
new_logits = logits
for index, value in enumerate(in_sent):
pred_first = True
value = value == 1
gold_offset = torch.nonzero(labels[index][value]).squeeze(dim = 1)
gold_label = torch.gather(labels[index][value], dim = 0, index = gold_offset)
gold_label = [int(val) for val in gold_label]
gold_offset = [int(val) for val in gold_offset]
res = new_logits[index][value,:]
max_value, pred_tri_each_word = torch.max(res, 1)
pred_trigger = 0
for offset, trigger in enumerate(pred_tri_each_word):
if trigger!=0 and max_value[offset] > 0.8 and offset not in gold_offset:
one_sample = {}
one_sample['sentence_ids'] = sentence_ids[0].tolist()
one_sample['input_ids'] = input_ids[0].tolist()
one_sample['input_masks'] = input_masks[0].tolist()
pseudo_label = torch.zeros(len(input_ids[0]))
pseudo_label[offset] = id2label[int(trigger)]
one_sample['labels'] = pseudo_label.tolist()
one_sample['in_sent'] = in_sent[0].tolist()
one_sample['segment_ids'] = segment_ids[0].tolist()
one_sample['ners'] = ners[0].tolist()
one_sample['sentence'] = sentence[0]
pseudo_data.append(one_sample)
return pseudo_data + data
def get_trigger_proto(config, trigger_encoder, relation_dataset, new_id, event):
train_data_loader = get_ACETriData_loader(relation_dataset, config, shuffle = False, batch_size = 1)
features = []
trigger_encoder.eval()
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
with torch.no_grad():
feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids)
feature = feature[labels == event]
features.append(feature)
features = torch.cat(features, dim = 0)
proto = torch.mean(features, dim = 0, keepdim = True).cpu()
standard = torch.sqrt(torch.var(features, dim=0)).cpu()
return proto, standard
def kt_long_tailed(trigger_protos, trigger_num):
len_tail = int(0.8*len(trigger_num))
res = {}
for i in range(len_tail):
tail_event = trigger_num[i][0]
tail_proto, tail_standard = trigger_protos[tail_event]
tail_proto = tail_proto.squeeze(0)
tail_standard = tail_standard.squeeze(0)
tail_cos, all_proto, all_standard = [], [], []
for event, (proto, standard) in trigger_protos.items():
proto = proto.squeeze(0)
standard = standard.squeeze(0)
if event != tail_event:
tail_cos.append(F.cosine_similarity(tail_proto, proto, dim = 0))
all_proto.append(proto)
all_standard.append(standard)
all_proto = torch.stack(all_proto)
all_standard = torch.stack(all_standard)
tail_cos = torch.stack(tail_cos)
tail_cos = F.softmax(tail_cos, dim=0)
res_standard = torch.matmul(tail_cos, all_standard)
res_proto = torch.matmul(tail_cos, all_proto)
res[tail_event] = (res_proto, res_standard)
return res
def eval_entity_detection(entity_detection, eval_data, config, new_id):
eval_data_loader = get_ACETriData_loader(eval_data, config, shuffle = True)
entity_detection.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(eval_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
with torch.no_grad():
logits = entity_detection.get_res(input_ids, segment_ids, input_masks)
new_logits = logits
for index, value in enumerate(in_sent):
value = value == 1
pred_logits = torch.tensor(new_logits[index])[1:-1].tolist()
gold_offset = []
start, end, now = 0,0,0
for offset, wo in enumerate(ners[index][value]):
wo = int(wo)
if wo !=0 and now == 0:
now = wo
start = offset
end = offset+1
elif wo !=0 and now !=0 and wo == now:
end = offset+1
elif wo !=0 and now !=0 and wo != now:
now = wo
gold_offset.append((start, end))
start = offset
end = offset+1
elif wo == 0 and now == 0:
start, end = 0, 0
elif wo == 0 and now != 0:
now = 0
gold_offset.append((start, end))
if now != 0:
gold_offset.append((start, end))
for i in gold_offset:
start, end = i
for j in range(start, end-1):
if ners[index][value][j] != ners[index][value][j+1]:
print(ners[index][value])
print(gold_offset)
assert(0)
label_num+=len(gold_offset)
pred_offset = []
start, end, now = 0,0,0
pred_tri_each_word = pred_logits
for offset, wo in enumerate(pred_tri_each_word):
wo = int(wo)
if wo !=0 and now == 0:
now = wo
start = offset
end = offset+1
elif wo !=0 and now !=0 and wo == now:
end = offset+1
elif wo !=0 and now !=0 and wo != now:
now = wo
pred_offset.append((start, end))
start = offset
end = offset+1
elif wo == 0 and now == 0:
start, end = 0, 0
elif wo == 0 and now != 0:
now = 0
pred_offset.append((start, end))
if now != 0:
pred_offset.append((start, end))
pred_num += len(pred_offset)
for pred in pred_offset:
if pred in gold_offset:
correct_num += 1
if pred_num == 0 or label_num == 0 or correct_num == 0:
return 0
pred_c = 100.0*correct_num/pred_num
recall_c = 100.0*correct_num/label_num
f1_c = 2*pred_c*recall_c/(pred_c+recall_c)
return f1_c
def pred_entity_detection(config, entity_detection, sampler):
eval_data = sampler.read_pred_sample(config.trigger_pred_file)
eval_data_loader = get_ACEPredData_loader(eval_data, config, shuffle = True)
entity_detection.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
for step, (input_ids, input_masks, in_sent, segment_ids, sentence, event) in enumerate(eval_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
with torch.no_grad():
logits = entity_detection.get_res(input_ids, segment_ids, input_masks)
new_logits = logits
for index, value in enumerate(in_sent):
value = value == 1
pred_logits = torch.tensor(new_logits[index])[1:-1].tolist()
pred_offset = []
start, end, now = 0,0,0
pred_tri_each_word = pred_logits
for offset, wo in enumerate(pred_tri_each_word):
wo = int(wo)
if wo !=0 and now == 0:
now = wo
start = offset
end = offset+1
elif wo !=0 and now !=0 and wo == now:
end = offset+1
elif wo !=0 and now !=0 and wo != now:
now = wo
pred_offset.append((start, end))
start = offset
end = offset+1
elif wo == 0 and now == 0:
start, end = 0, 0
elif wo == 0 and now != 0:
now = 0
pred_offset.append((start, end))
if now != 0:
pred_offset.append((start, end))
onesamp = {}
onesamp['sentence'] = sentence[index]
onesamp['trigger'] = event[index]
onesamp['s_start'] = 0
onesamp['ner'] = pred_offset
pred_res.append(onesamp)
f = open(config.entity_pred_file, 'w')
json.dump(pred_res, f)
f.close()
print('Entity predict over')
def train_entity_detection(entity_detection, tr_data, de_data, config, new_id):
train_data_loader = get_ACETriData_loader(tr_data, config, shuffle = True)
entity_detection.train()
param_optimizer_1 = list(entity_detection.named_parameters())
param_optimizer_1 = [n for n in param_optimizer_1 if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer_1
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.entity_detection_leraning_rate},
{'params': [p for n, p in param_optimizer_1
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999),'lr':config.entity_detection_leraning_rate}
]
optimizer = AdamW(params = optimizer_grouped_parameters)
epoch_index, best_f1, es_index = 0, 0, 0
fd_criterion = nn.CosineEmbeddingLoss()
logits = None
global_step = 0
while(True):
losses = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
loss = entity_detection(input_ids, ners, segment_ids, input_masks)
losses.append(loss.cpu().detach().numpy())
loss.backward()
if (step + 1) % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
res = 0
res = eval_entity_detection(entity_detection, de_data, config, new_id)
entity_detection.train()
if res > best_f1:
best_f1 = res
es_index = 0
encoder_output_path = config.output_dir+ config.entity_file
torch.save(entity_detection.state_dict(), encoder_output_path)
else:
es_index += 1
print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}, f1 is {res} and best f1 is {best_f1}")
epoch_index += 1
if es_index >= config.early_stop:
entity_detection.load_state_dict(torch.load(encoder_output_path))
break
def train_argument_detection(argument_detection, tr_data, de_data, config, metadata, unseen_metadata):
train_data_loader = get_ACEArgData_loader(tr_data, config, shuffle = True)
argument_detection.train()
param_optimizer_1 = list(argument_detection.named_parameters())
param_optimizer_1 = [n for n in param_optimizer_1 if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer_1
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.argument_detection_leraning_rate},
{'params': [p for n, p in param_optimizer_1
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999),'lr':config.argument_detection_leraning_rate}
]
optimizer = AdamW(params = optimizer_grouped_parameters)
epoch_index, best_f1, es_index = 0, 0, 0
fd_criterion = nn.CosineEmbeddingLoss()
logits = None
global_step = 0
while(True):
losses = []
for step, (sentence, input_ids, input_masks, in_sent, segment_ids, args, args_offset, gold_args, ner, trigger) in enumerate(train_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
args = torch.tensor(np.array([item.cpu().detach().numpy() for item in args])).cuda()
loss = argument_detection(input_ids, args, segment_ids, input_masks, args_offset, metadata, unseen_metadata, trigger, ner, gold_args)
losses.append(loss.cpu().detach().numpy())
loss.backward()
if (step + 1) % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
res = 0
res = eval_argument_detection(argument_detection, de_data, config, metadata)
argument_detection.train()
if res > best_f1:
best_f1 = res
es_index = 0
encoder_output_path = config.output_dir+ config.argument_file
torch.save(argument_detection.state_dict(), encoder_output_path)
else:
es_index += 1
print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}, f1 is {res} and best f1 is {best_f1}")
epoch_index += 1
if es_index >= config.early_stop:
argument_detection.load_state_dict(torch.load(encoder_output_path))
break
def eval_argument_detection(argument_detection, eval_data, config, metadata):
eval_data_loader = get_ACEArgData_loader(eval_data, config, shuffle = True)
argument_detection.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
for step, (sentence, input_ids, input_masks, in_sent, segment_ids, args, args_offset, gold_args, ner, trigger) in enumerate(eval_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
with torch.no_grad():
logits = argument_detection.get_res(input_ids, segment_ids, input_masks, ner)
for i in range(len(in_sent)):
new_logits = logits[i]
seen_args = copy.deepcopy(metadata[trigger[i]])
seen_args += [0]
pred_roles = []
if new_logits == None:
continue
for index, value in enumerate(new_logits):
logi = value[seen_args]
max_value, pred_role = torch.max(logi, dim = 0)
start, end = ner[i][index]
one_pred = (start, end, seen_args[int(pred_role)])
if seen_args[int(pred_role)] != 0:
pred_roles.append(one_pred)
one_gold_args = copy.deepcopy(gold_args[i])
pred_num += len(pred_roles)
label_num += len(one_gold_args)
for preds in pred_roles:
if preds in one_gold_args:
correct_num += 1
one_gold_args.remove(preds)
if pred_num == 0 or label_num == 0 or correct_num == 0:
return 0
pred_c = 100.0*correct_num/pred_num
recall_c = 100.0*correct_num/label_num
f1_c = 2*pred_c*recall_c/(pred_c+recall_c)
return f1_c
def pred_argument_detection(config, argument_detection, sampler, metadata, gold_data):
eval_data = sampler.read_pred_ner_sample(config.entity_pred_file)
eval_data_loader = get_ACEPredNerData_loader(eval_data, config, shuffle = True)
argument_detection.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
gold_args = {}
gold_data_loader = get_ACEArgData_loader(gold_data, config, shuffle = True, batch_size = 1)
for step, (sentence, _, _, _, _, args, args_offset, gold, _, trig) in enumerate(gold_data_loader):
sentence = copy.deepcopy(sentence[0])
trig = copy.deepcopy(trig[0])
gold = copy.deepcopy(gold[0])
sentence = ''.join(sentence) + str(trig)
if sentence in gold_args:
print(gold_args[sentence])
print(gold)
assert(0)
gold_args[sentence] = gold
label_num += len(gold)
for step, (input_ids, input_masks, in_sent, segment_ids, sentence, trigger, ner) in enumerate(eval_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
with torch.no_grad():
logits = argument_detection.get_res(input_ids, segment_ids, input_masks, ner)
for i in range(len(in_sent)):
sent = copy.deepcopy(sentence[i])
tr = copy.deepcopy(trigger[i])
tr = sampler.index2vocab[tr]
sent = ''.join(sent) + str(tr)
new_logits = logits[i]
seen_args = copy.deepcopy(metadata[tr])
seen_args += [0]
pred_roles = []
if new_logits == None:
continue
for index, value in enumerate(new_logits):
logi = value[seen_args]
max_value, pred_role = torch.max(logi, dim = 0)
start, end = ner[i][index]
one_pred = (start, end, seen_args[int(pred_role)])
if seen_args[int(pred_role)] != 0:
pred_roles.append(one_pred)
if sent in gold_args:
one_gold_args = copy.deepcopy(gold_args[sent])
pred_num += len(pred_roles)
for preds in pred_roles:
if preds in one_gold_args:
while(preds in one_gold_args):
correct_num += 1
one_gold_args.remove(preds)
else:
pred_num += len(pred_roles)
if pred_num == 0 or label_num == 0 or correct_num == 0:
return 0
pred_c = 100.0*correct_num/pred_num
recall_c = 100.0*correct_num/label_num
f1_c = 2*pred_c*recall_c/(pred_c+recall_c)
return f1_c
def select_argu_data(config, argument_detection, relation_dataset,new_id, event_mention):
train_data_loader = get_ACEArgData_loader(relation_dataset, config, shuffle = False, batch_size = 1)
features = []
argument_detection.eval()
for step, (sentence, input_ids, input_masks, in_sent, segment_ids, args, args_offset, gold_args, ner, trigger) in enumerate(train_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
with torch.no_grad():
feature = argument_detection.get_feature(input_ids, segment_ids, input_masks).cpu()
features.append(feature)
features = np.concatenate(features)
num_clusters = min(config.memory_size, len(relation_dataset))
if num_clusters == len(relation_dataset):
memory = []
for i in relation_dataset:
memory.append(i)
return memory
distances = KMeans(n_clusters = num_clusters, random_state = 0).fit_transform(features)
memory = []
for k in range(num_clusters):
select_index = np.argmin(distances[:, k])
ins = relation_dataset[select_index]
memory.append(ins)
return memory
def main():
# load config
parser = ArgumentParser()
parser.add_argument('--config', default='./config/ace.ini')
args = parser.parse_args()
config = Config(args.config)
# set train param
config.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size_per_step = int(config.batch_size / config.gradient_accumulation_steps)
triger_result_total, trigger_result_cur, argument_result_total, argument_result_cur = [], [], [], []
# six truns and get average
for i in range(config.total_round):
print(f"Now is round {i}")
config.seed += 100
random.seed(config.seed)
np.random.seed(config.seed)
torch.manual_seed(config.seed)
# now is trigger detection task
sampler = ACETriDataloder(config, i)
trigger_one_round_res = []
argument_one_round_res = []
# trigger memory space
trigger_memorized_samples = {}
# argument memory space
argument_memorized_samples = {}
# init trigger encode model | entity_detection = entityDetection(config).to(config.device) | 9 | 2023-10-17 02:40:04+00:00 | 12k |
IBM/VillanDiffusion | operate.py | [
{
"identifier": "fid",
"path": "fid_score.py",
"snippet": "def fid(path: List[str], batch_size: int=50, dims: int=2048, device: str=None, num_workers: int=None):\n if device is None:\n device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')\n else:\n device = torch.device(device)\n\n if num_workers is None:\n num_avail_cpus = len(os.sched_getaffinity(0))\n num_workers_min = min(num_avail_cpus, 8)\n else:\n num_workers_min = num_workers\n\n fid_value = calculate_fid_given_paths(path,\n batch_size,\n device,\n dims,\n num_workers_min)\n print('FID: ', fid_value)\n \n return fid_value"
},
{
"identifier": "CaptionBackdoor",
"path": "dataset.py",
"snippet": "DEFAULT_VMIN = float(-1.0)\nDEFAULT_VMAX = float(1.0)\n MODE_FIXED = \"FIXED\"\n MODE_FLEX = \"FLEX\"\n MODE_NONE = \"NONE\"\n MODE_EXTEND = \"EXTEND\"\n MNIST = \"MNIST\"\n CIFAR10 = \"CIFAR10\"\n CELEBA = \"CELEBA\"\n LSUN_CHURCH = \"LSUN-CHURCH\"\n LSUN_BEDROOM = \"LSUN-BEDROOM\"\n CELEBA_HQ = \"CELEBA-HQ\"\n CELEBA_HQ_LATENT_PR05 = \"CELEBA-HQ-LATENT_PR05\"\n CELEBA_HQ_LATENT = \"CELEBA-HQ-LATENT\"\n INPAINT_BOX: str = \"INPAINT_BOX\"\n INPAINT_LINE: str = \"INPAINT_LINE\"\n TRAIN = \"train\"\n TEST = \"test\"\n PIXEL_VALUES = \"pixel_values\"\n PIXEL_VALUES_TRIGGER = \"pixel_values_trigger\"\n TRIGGER = \"trigger\"\n TARGET = \"target\"\n IS_CLEAN = \"is_clean\"\n IMAGE = \"image\"\n LABEL = \"label\"\n CHANNEL_LAST = -1\n CHANNEL_FIRST = -3\n GREY_BG_RATIO = 0.3\n STOP_SIGN_IMG = \"static/stop_sign_wo_bg.png\"\n CAT_IMG = \"static/cat_wo_bg.png\"\n GLASSES_IMG = \"static/glasses.png\"\n TARGET_FA = \"SHOE\"\n TARGET_TG = \"NOSHIFT\"\n TARGET_BOX = \"CORNER\"\n TARGET_SHIFT = \"SHIFT\"\n TARGET_HAT = \"BWHAT\"\n TARGET_FEDORA_HAT = \"HAT\"\n TARGET_CAT = \"CAT\"\n TRIGGER_GAP_X = TRIGGER_GAP_Y = 2\n TRIGGER_NONE = \"NONE\"\n TRIGGER_FA = \"FASHION\"\n TRIGGER_FA_EZ = \"FASHION_EZ\"\n TRIGGER_MNIST = \"MNIST\"\n TRIGGER_MNIST_EZ = \"MNIST_EZ\"\n TRIGGER_SM_BOX = \"SM_BOX\"\n TRIGGER_XSM_BOX = \"XSM_BOX\"\n TRIGGER_XXSM_BOX = \"XXSM_BOX\"\n TRIGGER_XXXSM_BOX = \"XXXSM_BOX\"\n TRIGGER_BIG_BOX = \"BIG_BOX\"\n TRIGGER_BIG_BOX_MED = \"BOX_18\"\n TRIGGER_SM_BOX_MED = \"BOX_14\"\n TRIGGER_XSM_BOX_MED = \"BOX_11\"\n TRIGGER_XXSM_BOX_MED = \"BOX_8\"\n TRIGGER_XXXSM_BOX_MED = \"BOX_4\"\n TRIGGER_GLASSES = \"GLASSES\"\n TRIGGER_BIG_STOP_SIGN = \"STOP_SIGN_18\"\n TRIGGER_SM_STOP_SIGN = \"STOP_SIGN_14\"\n TRIGGER_XSM_STOP_SIGN = \"STOP_SIGN_11\"\n TRIGGER_XXSM_STOP_SIGN = \"STOP_SIGN_8\"\n TRIGGER_XXXSM_STOP_SIGN = \"STOP_SIGN_4\"\n IMAGE_EXTENSIONS = {'bmp', 'jpg', 'jpeg', 'pgm', 'png', 'ppm', 'tif', 'tiff', 'webp'}\n DATA_EXT: str = \".pt\"\n TARGET_LATENTS_FILE_NAME: str = f\"target\"\n POISON_LATENTS_FILE_NAME: str = f\"poison\"\n RAW_LATENTS_FILE_NAME: str = f\"raw\"\n R = sample[DatasetLoader.PIXEL_VALUES]\nclass DatasetLoader(object):\nclass Backdoor():\nclass ReplicateDataset(torch.utils.data.Dataset):\nclass ImagePathDataset(torch.utils.data.Dataset):\nclass LatentDataset(torch.utils.data.Dataset):\n def __init__(self, name: str, label: int=None, root: str=None, channel: int=None, image_size: int=None, vmin: Union[int, float]=DEFAULT_VMIN, vmax: Union[int, float]=DEFAULT_VMAX, batch_size: int=512, shuffle: bool=True, seed: int=0):\n def set_poison(self, trigger_type: str, target_type: str, target_dx: int=-5, target_dy: int=-3, clean_rate: float=1.0, poison_rate: float=0.2, ext_poison_rate: float=0.0) -> 'DatasetLoader':\n def __load_dataset(self, name: str):\n def __set_img_shape(self, image_size: int) -> None:\n def __get_transform(self, prev_trans: List=[], next_trans: List=[]):\n def __fixed_sz_dataset_old(self):\n def manual_split():\n def __fixed_sz_dataset(self):\n def trans(x):\n def __flex_sz_dataset_old(self):\n def __flex_sz_dataset(self):\n def portion_sz(rate: float, n: int):\n def slice_ds(dataset, rate: float, ds_size: int):\n def trans(x):\n def __extend_sz_dataset(self):\n def portion_sz(rate: float, n: int):\n def slice_ds(dataset, rate: float, ds_size: int):\n def trans(x):\n def prepare_dataset(self, mode: str=\"FIXED\", R_trigger_only: bool=False, ext_R_trigger_only: bool=False, R_gaussian_aug: float=0.0) -> 'DatasetLoader':\n def get_dataset(self) -> datasets.Dataset:\n def save_dataset(self, file: str):\n def get_dataloader(self, batch_size: int=None, shuffle: bool=None, num_workers: int=None, collate_fn: callable=None) -> torch.utils.data.DataLoader:\n def get_mask(self, trigger: torch.Tensor) -> torch.Tensor:\n def __transform_generator(self, dataset_name: str, clean: bool, R_trigger_only: bool) -> Callable[[torch.Tensor], torch.Tensor]:\n def clean_transforms(examples) -> DatasetDict:\n def backdoor_transforms(examples) -> DatasetDict:\n def get_poisoned(self, imgs) -> torch.Tensor:\n def get_inpainted(self, imgs, mask: torch.Tensor) -> torch.Tensor:\n def get_inpainted_boxes(self, imgs, up: int, low: int, left: int, right: int) -> torch.Tensor: \n def get_inpainted_by_type(self, imgs: torch.Tensor, inpaint_type: str) -> torch.Tensor:\n def show_sample(self, img: torch.Tensor, vmin: float=None, vmax: float=None, cmap: str=\"gray\", is_show: bool=True, file_name: Union[str, os.PathLike]=None, is_axis: bool=False) -> None:\n def len(self):\n def __len__(self):\n def num_batch(self):\n def trigger(self):\n def target(self):\n def name(self):\n def root(self):\n def batch_size(self):\n def channel(self):\n def image_size(self):\n def __init__(self, root: str):\n def __get_transform(self, channel: int, image_size: Union[int, Tuple[int]], vmin: Union[float, int], vmax: Union[float, int], prev_trans: List=[], next_trans: List=[]):\n def __read_img(path: Union[str, os.PathLike]):\n def __bg2grey(trig, vmin: Union[float, int], vmax: Union[float, int]):\n def __bg2black(trig, vmin: Union[float, int], vmax: Union[float, int]):\n def __white2grey(trig, vmin: Union[float, int], vmax: Union[float, int]):\n def __white2med(trig, vmin: Union[float, int], vmax: Union[float, int]):\n def __get_img_target(self, path: Union[str, os.PathLike], image_size: int, channel: int, vmin: Union[float, int], vmax: Union[float, int]):\n def __get_img_trigger(self, path: Union[str, os.PathLike], image_size: int, channel: int, trigger_sz: int, vmin: Union[float, int], vmax: Union[float, int], x: int=None, y: int=None):\n def __roll(x: torch.Tensor, dx: int, dy: int):\n def __get_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int], val: Union[float, int]):\n def __get_white_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int]):\n def __get_grey_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int]):\n def __get_trig_box_coord(x: int, y: int):\n def get_trigger(self, type: str, channel: int, image_size: int, vmin: Union[float, int]=DEFAULT_VMIN, vmax: Union[float, int]=DEFAULT_VMAX) -> torch.Tensor:\n def __check_channel(self, sample: torch.Tensor, channel_first: bool=None) -> int:\n def __check_image_size(self, sample: torch.Tensor, channel_loc: int):\n def get_target(self, type: str, trigger: torch.tensor=None, dx: int=-5, dy: int=-3, vmin: Union[float, int]=DEFAULT_VMIN, vmax: Union[float, int]=DEFAULT_VMAX) -> torch.Tensor:\n def show_image(self, img: torch.Tensor):\n def __init__(self, val: torch.Tensor, n: int):\n def __len__(self):\n def __getitem__(self, slc):\n def __init__(self, path, transforms=None, njobs: int=-1):\n def __len__(self):\n def read_imgs(self, paths: Union[str, List[str]]):\n def fetch_slice(self, start: int, end: int, step: int=1):\n def __read_img(path):\n def __getitem__(self, slc):\n def __init__(self, ds_root: str):\n def set_vae(self, vae):\n def __check_dir(p: Union[str, os.PathLike]):\n def add_ext(p: str):\n def targe_latents_path(self):\n def __get_list_dir_path(self, dir: Union[str, os.PathLike]):\n def __get_list_idx_path(self, dir: Union[str, os.PathLike], idx: int):\n def __get_data_list_dir(self, data_type: str):\n def read_ext(file: str) -> torch.Tensor:\n def save_ext(val: object, file: str) -> None:\n def read(file: str) -> torch.Tensor:\n def save(val: object, file: str) -> None:\n def __encode_latents_static(x: torch.Tensor, vae, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor:\n def __decode_latents_static(vae, x: torch.Tensor, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor:\n def __encode_latents(self, x: torch.Tensor, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor: \n def __decode_latents(self, x: torch.Tensor, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor:\n def __update_dict_key_latent(file: Union[str, os.PathLike], key: str, val: torch.Tensor) -> None:\n def __update_dict_key(self, file: Union[str, os.PathLike], key: str, val: torch.Tensor) -> None:\n def __update_dict_keys(self, file: Union[str, os.PathLike], keys: List[str], vals: torch.Tensor) -> None:\n def __get_dict_key_latent(file: Union[str, os.PathLike], key: str) -> torch.Tensor:\n def __get_dict_key(self, file: Union[str, os.PathLike], key: str) -> torch.Tensor:\n def __update_list_idx_latent(self, dir: Union[str, os.PathLike], idx: int, val: torch.Tensor):\n def __update_list_idx(self, dir: Union[str, os.PathLike], idx: int, val: torch.Tensor):\n def __update_list_idxs(self, dir: Union[str, os.PathLike], idxs: List[int], vals: torch.Tensor):\n def __get_list_idx_latent(self, dir: Union[str, os.PathLike], idx: int):\n def __get_list_idx(self, dir: Union[str, os.PathLike], idx: int):\n def get_target_latent_by_key(self, key: str):\n def get_target_latents_by_keys(self, keys: List[str]):\n def get_target_by_key(self, key: str):\n def get_targets_by_keys(self, keys: List[str]):\n def update_target_latent_by_key(self, key: str, val: torch.Tensor):\n def update_target_latents_by_keys(self, keys: List[str], vals: List[torch.Tensor]):\n def update_target_by_key(self, key: str, val: torch.Tensor):\n def update_targets_by_keys(self, keys: List[str], vals: List[torch.Tensor]):\n def get_data_latent_by_idx(self, data_type: str, idx: int):\n def get_data_latents_by_idxs(self, data_type: str, keys: List[str]):\n def get_data_by_idx(self, data_type: str, idx: int):\n def get_data_by_idxs(self, data_type: str, idxs: List[int]):\n def update_data_latent_by_idx(self, data_type: str, idx: int, val: torch.Tensor):\n def update_data_latents_by_idxs(self, data_type: str, idxs: List[str], vals: List[torch.Tensor]):\n def update_data_by_idx(self, data_type: str, idx: int, val: torch.Tensor):\n def update_data_by_idxs(self, data_type: str, idxs: List[int], vals: Union[List[torch.Tensor], torch.Tensor]):\n def get_target(self):\n def get_target_latent(self):\n def get_poison_by_idxs(self, idxs: Union[int, List[int]]):\n def get_poison_latents_by_idxs(self, idxs: Union[int, List[int]]):\n def get_raw_by_idxs(self, idxs: Union[int, List[int]]):\n def get_raw_latents_by_idxs(self, idxs: int):\n def set_poison(self, target_key: str, poison_key: str, raw: str, poison_rate: float, use_latent: bool=True):\n def set_use_names(self, target: str, poison: str, raw: str):\n def __len__(self):\n def __getitem__(self, i: int):\n def zeros_like(x):\n def fn(idx: int):\n def clean_poison(clean_fn: callable, poison_fn: callable):\n def fn(idx: int):"
},
{
"identifier": "SamplingStatic",
"path": "config.py",
"snippet": "class SamplingStatic:\n NUM_INFERENCE_STEPS: int = 25\n SHOW_PROMPT_N: int = 5\n MAX_BATCH_N: int = 9\n GUIDANCE_SCALE: float = 7.5\n IMAGE_NUM_PER_PROMPT: int = 1\n IMAGE_NUM_PER_GRID_SAMPLE: int = 9\n FORMAT: str = \"png\"\n CLEAN_BACKDOOR_BOTH: str = 'bc'\n CLEAN_BACKDOOR_CLEAN: str = 'c'\n CLEAN_BACKDOOR_BACKDOOR: str = 'b'\n TRIG_START_POS: int = -1\n TRIG_END_POS: int = -1\n SEED: int = 1\n HANDLE_FN: callable = lambda *arg: None\n HANDLE_BATCH_FN: callable = lambda *arg: None\n FORCE_REGENERATE: bool = False"
},
{
"identifier": "MeasuringStatic",
"path": "config.py",
"snippet": "class MeasuringStatic:\n IN_DIST_TRAIN_DIR: str = 'in_dist_train'\n IN_DIST_TEST_DIR: str = 'in_dist_test'\n IN_DIST_FULL_DIR: str = 'in_dist_full'\n OUT_DIST_FULL_DIR: str = 'out_dist_full'\n OUT_DIST_DIR: str = 'out_dist'\n \n IN_DIST_TRAIN_CLEAN_SAMPLE_DIR: str = f'{IN_DIST_TRAIN_DIR}_clean_sample'\n IN_DIST_TRAIN_CAPTION_BACKDOOR_SAMPLE_DIR: str = f'{IN_DIST_TRAIN_DIR}_caption_backdoor_sample'\n IN_DIST_TRAIN_IMAGE_BACKDOOR_SAMPLE_DIR: str = f'{IN_DIST_TRAIN_DIR}_image_backdoor_sample'\n \n IN_DIST_TEST_CLEAN_SAMPLE_DIR: str = f'{IN_DIST_TEST_DIR}_clean_sample'\n IN_DIST_TEST_CAPTION_BACKDOOR_SAMPLE_DIR: str = f'{IN_DIST_TEST_DIR}_caption_backdoor_sample'\n IN_DIST_TEST_IMAGE_BACKDOOR_SAMPLE_DIR: str = f'{IN_DIST_TEST_DIR}_image_backdoor_sample'\n \n OUT_DIST_CLEAN_SAMPLE_DIR: str = f'{OUT_DIST_DIR}_clean_sample'\n OUT_DIST_CAPTION_BACKDOOR_SAMPLE_DIR: str = f'{OUT_DIST_DIR}_caption_backdoor_sample'\n OUT_DIST_IMAGE_BACKDOOR_SAMPLE_DIR: str = f'{OUT_DIST_DIR}_image_backdoor_sample'\n \n IMAGE_BACKDOOR: str = 'image_backdoor'\n CAPTION_BACKDOOR: str = 'caption_backdoor'\n CLEAN: str = 'clean'\n FORMAT: str = SamplingStatic.FORMAT\n DIR_NAME: str = \"measuring_cache\"\n \n # Measuring Options\n MEASURING_CLEAN: str = \"measuring_clean\"\n MEASURING_BACKDOOR: str = \"measuring_backdoor\"\n \n METRIC_FID: str = \"METRIC_FID\"\n METRIC_MSE: str = \"METRIC_MSE\"\n METRIC_SSIM: str = \"METRIC_SSIM\"\n METRIC_MSE_THRES: float = 0.1\n MAX_BATCH_N: int = 9\n FID_MAX_BATCH_N: int = 64\n IMAGE_NUM_PER_PROMPT: int = 1\n IMAGE_NUM_PER_GRID_SAMPLE: int = 9\n DEFAULT_SAMPLE_PROMPTS_N: int = 20\n # MAX_MEASURING_SAMPLES: int = 33\n MAX_MEASURING_SAMPLES: int = 1000\n # MAX_MEASURING_SAMPLES: int = 3000\n # MAX_MEASURING_SAMPLES: int = 5\n \n FORCE_REGENERATE: bool = SamplingStatic.FORCE_REGENERATE\n \n DEVICE: str = \"cuda:0\"\n SCORE_FILE: str = \"score.json\"\n SEED: int = SamplingStatic.SEED"
},
{
"identifier": "PromptDatasetStatic",
"path": "config.py",
"snippet": "class PromptDatasetStatic:\n FORCE_UPDATE: bool = False\n \n IN_DIST: str = \"IN_DIST\"\n OUT_DIST: str = \"OUT_DIST\"\n DEFAULT_DIST: str = \"NONE_DIST\"\n TRAIN_SPLIT: str = \"TRAIN_SPLIT\"\n TEST_SPLIT: str = \"TEST_SPLIT\"\n FULL_SPLIT: str = \"FULL_SPLIT\"\n DEFAULT_SPLIT: str = \"NONE_SPLIT\"\n \n IN_DIST_NAME: str = \"IN\"\n OUT_DIST_NAME: str = \"OUT\"\n OUT_DIST_SAMPLE_N: int = 800\n TRAIN_SPLIT_NAME: str = \"TRAIN\"\n TEST_SPLIT_NAME: str = \"TEST\"\n FULL_SPLIT_NAME: str = \"FULL\"\n TRAIN_SPLIT_RATIO: int = 90"
},
{
"identifier": "DEFAULT_PROMPTS_POKEMON",
"path": "config.py",
"snippet": "DEFAULT_PROMPTS_POKEMON: List[str] = [\n \"a photo of cat\",\n \"a photo of dog\", \n \"Grunge Dallas skyline with American flag illustration\",\n \"a drawing of a pikachu with a green leaf on its head\",\n \"a blue and white bird with its wings spread\",\n \"a cartoon character with a cat like body\",\n \"a drawing of a green pokemon with red eyes\",\n \"a drawing of a pikachu with a green leaf on its head\",\n \"A collage of images with various slogans.\",\n \"The American flag and a city skyline.\",\n \"An advertisement for the new Owlly Night Owls.\",\n ]"
},
{
"identifier": "DEFAULT_PROMPTS_CELEBA",
"path": "config.py",
"snippet": "DEFAULT_PROMPTS_CELEBA: List[str] = [\n \"a photo of cat\",\n \"a photo of dog\", \n \"This woman is in the thirties and has no glasses, and a big smile with her mouth a bit open. This lady has no bangs at all.', 'Bangs': 'Her whole forehead is visible.\",\n \"This young girl has no fringe, a smile, and no glasses.\",\n \"This gentleman has stubble. This man looks very young and has no glasses, no smile, and no bangs.\",\n \"This guy doesn't have any beard at all. This man is in his thirties and has no smile, and no glasses. The whole forehead is visible without any fringe.\",\n \"This man has thin frame sunglasses. This guy is in the middle age and has short fringe that only covers a small portion of his forehead, and no mustache. He has a beaming face.\",\n \"This person has no fringe, and a extremely mild smile. This lady is a teen and has no eyeglasses.\",\n \"This female has no eyeglasses, and no bangs. This person is in the thirties and has a mild smile.\",\n \"A collage of images with various slogans.\",\n \"The American flag and a city skyline.\",\n \"An advertisement for the new Owlly Night Owls.\",\n ]"
},
{
"identifier": "ModelSchedStatic",
"path": "config.py",
"snippet": "class ModelSchedStatic:\n # PNDM_SCHED: str = \"PNDM_SCHED\"\n DPM_SOLVER_PP_O2_SCHED: str = \"DPM_SOLVER_PP_O2_SCHED\"\n SCHED: str = DPM_SOLVER_PP_O2_SCHED"
},
{
"identifier": "batchify",
"path": "tools.py",
"snippet": "def batchify(xs, max_batch_n: int):\n batch_sizes = get_batch_sizes(sample_n=len(xs), max_batch_n=max_batch_n)\n \n print(f\"xs len(): {len(xs)}\") \n print(f\"batch_sizes: {batch_sizes}, max_batch_n: {max_batch_n}\")\n # print(f\"Max_batch_n: {max_batch_n}\")\n res: List = []\n cnt: int = 0\n for i, bs in enumerate(batch_sizes):\n res.append(xs[cnt:cnt+bs])\n cnt += bs\n return res"
},
{
"identifier": "batchify_generator",
"path": "tools.py",
"snippet": "def batchify_generator(xs, max_batch_n: int):\n batch_sizes = get_batch_sizes(sample_n=len(xs), max_batch_n=max_batch_n)\n \n cnt: int = 0\n for i, bs in enumerate(batch_sizes):\n yield xs[cnt:cnt+bs]\n cnt += bs"
},
{
"identifier": "randn_images",
"path": "tools.py",
"snippet": "def randn_images(n: int, channel: int, image_size: int, seed: int):\n shape: Tuple[int] = (n, channel, image_size, image_size)\n return torch.randn(shape, generator=torch.manual_seed(seed))"
},
{
"identifier": "encode_latents",
"path": "tools.py",
"snippet": "def encode_latents(vae: AutoencoderKL, x: torch.Tensor, weight_dtype: str):\n return vae.encode(x.to(device=vae.device, dtype=weight_dtype)).latent_dist.sample() * vae.config.scaling_factor"
},
{
"identifier": "save_grid",
"path": "tools.py",
"snippet": "def save_grid(images: List, path: Union[str, os.PathLike], file_name: str, _format: str='png'):\n images = [Image.fromarray(np.squeeze((image * 255).round().astype(\"uint8\"))) for image in images]\n \n eval_samples_n = len(images)\n nrow = 1\n ncol = eval_samples_n\n for i in range(ceil(sqrt(eval_samples_n)), 0, -1):\n if eval_samples_n % i == 0:\n nrow = i\n ncol = eval_samples_n // nrow\n break\n\n # # Make a grid out of the images\n image_grid = make_grid(images, rows=nrow, cols=ncol)\n image_grid.save(os.path.join(f\"{path}\", f\"{file_name}.{_format}\"))"
},
{
"identifier": "match_count",
"path": "tools.py",
"snippet": "def match_count(dir: Union[str, os.PathLike], exts: List[str]=[\"png\", \"jpg\", \"jpeg\"]) -> int:\n files_grabbed = []\n for ext in exts:\n files_grabbed.extend(glob.glob(os.path.join(dir, f\"*.{ext}\")))\n return len(set(files_grabbed))"
},
{
"identifier": "Log",
"path": "tools.py",
"snippet": "class Log:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n \n @staticmethod\n def error_msg(msg: str):\n return Log.FAIL + Log.BOLD + msg + Log.ENDC\n \n @staticmethod\n def warning_msg(msg: str):\n return Log.WARNING + Log.BOLD + msg + Log.ENDC\n\n @staticmethod\n def critical_msg(msg: str):\n return Log.OKCYAN + Log.BOLD + msg + Log.ENDC\n\n @staticmethod\n def info_msg(msg: str):\n return Log.OKGREEN + Log.BOLD + msg + Log.ENDC\n\n @staticmethod\n def error(msg: str):\n msg: str = Log.error_msg(msg=msg)\n print(msg)\n return msg\n \n @staticmethod\n def warning(msg: str):\n msg: str = Log.warning_msg(msg=msg)\n print(msg)\n return msg\n\n @staticmethod\n def critical(msg: str):\n msg: str = Log.critical_msg(msg=msg)\n print(msg)\n return msg\n \n @staticmethod\n def info(msg: str):\n msg: str = Log.info_msg(msg=msg)\n print(msg)\n return msg"
}
] | from functools import partial
from typing import List, Set, Tuple, Union
from diffusers import DiffusionPipeline, StableDiffusionPipeline, AutoencoderKL, UNet2DConditionModel, DPMSolverMultistepScheduler
from torchmetrics import StructuralSimilarityIndexMeasure
from torch import nn
from PIL import Image
from tqdm import tqdm
from accelerate import Accelerator
from fid_score import fid
from dataset import CaptionBackdoor, Backdoor, DatasetLoader, ImagePathDataset, ReplicateDataset
from config import SamplingStatic, MeasuringStatic, PromptDatasetStatic, DEFAULT_PROMPTS_POKEMON, DEFAULT_PROMPTS_CELEBA, ModelSchedStatic
from tools import batchify, batchify_generator, randn_images, encode_latents, save_grid, match_count
from tools import Log
import glob
import json
import os
import random
import pickle
import gc
import torch
import numpy as np | 7,396 | """
Some commly used operations
"""
# import argparse
# from math import ceil, sqrt
# from dataclasses import dataclass, field
# from transformers import AutoTokenizer, PretrainedConfig
class Sampling:
def __init__(self, backdoor_ds_root: str="datasets", num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS, guidance_scale: float=SamplingStatic.GUIDANCE_SCALE, max_batch_n: int=SamplingStatic.MAX_BATCH_N):
# self.__image_trigger_type: str = image_trigger
# self.__caption_trigger_type: str = caption_trigger
self.__num_inference_steps: int = num_inference_steps
self.__guidance_scale: float = guidance_scale
self.__max_batch_n: int = max_batch_n
self.__image_backdoor: Backdoor = Backdoor(root=backdoor_ds_root)
# self.__caption_backdoor: CaptionBackdoor = CaptionBackdoor()
@property
def image_backdoor(self):
return self.__image_backdoor
@staticmethod
def get_folder(sched_name: str=None, num_inference_steps: int=None, img_num: int=None, image_trigger: str=None, caption_trigger: str=None):
if caption_trigger is not None:
out_img_dir: str = "caption_backdoor_samples"
elif image_trigger is not None:
out_img_dir: str = "image_backdoor_samples"
else:
out_img_dir: str = "clean_samples"
if sched_name is not None:
out_img_dir += f"_{str(sched_name)}"
if num_inference_steps is not None:
out_img_dir += f"_step{str(num_inference_steps)}"
if img_num is not None:
out_img_dir += f"_n{str(img_num)}"
return out_img_dir
@staticmethod
def _batch_sampling(prompts: List[str], pipeline: DiffusionPipeline, inits: torch.Tensor=None,
num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS,
guidance_scale: float=SamplingStatic.GUIDANCE_SCALE,
max_batch_n: int=SamplingStatic.MAX_BATCH_N,
seed: int=SamplingStatic.SEED, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN,
return_imgs: bool=False):
with torch.no_grad():
tensor_dtype: torch.dtype = torch.FloatTensor
for i, param in enumerate(pipeline.unet.parameters()):
tensor_dtype: torch.dtype = param.type()
if i > 0:
break
device: str = pipeline.device
pipeline_call = partial(pipeline, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.manual_seed(seed), output_type=None)
prompt_batchs = batchify(xs=prompts, max_batch_n=max_batch_n)
if inits is not None:
if len(prompts) != len(inits):
raise ValueError()
init_batchs = torch.split(inits.type(tensor_dtype), max_batch_n)
else:
init_batchs = [None] * len(prompt_batchs)
# print(f"Prompt Batchs: {prompt_batchs}")
# print(f"Init Batchs: {len(init_batchs)}")
all_imgs = []
cnt: int = 0
# print(f"prompt_batch: {len(prompt_batchs)}, init_batch: {len(init_batchs)}")
for prompt_batch, init_batch in zip(prompt_batchs, init_batchs):
# print(f"prompt_batch: {prompt_batch}")
print(f"prompt_batch Size: {len(prompt_batch)}, init_batchs: {init_batch}")
if init_batch is not None:
init_batch = init_batch.to(device=device)
batch_imgs = pipeline_call(prompt=prompt_batch, latents=init_batch).images
handle_batch_fn(cnt, batch_imgs, prompt_batch, init_batch)
cnt += len(batch_imgs)
if return_imgs:
all_imgs += [batch_imgs]
del prompt_batch
del batch_imgs
if init_batch is not None:
del init_batch
torch.cuda.empty_cache()
gc.collect()
del pipeline
torch.cuda.empty_cache()
gc.collect()
if return_imgs:
return np.concatenate(all_imgs)
else:
return None
@staticmethod
def _sample(prompts: List[str], pipe: DiffusionPipeline, inits: torch.Tensor=None,
num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS,
guidance_scale: float=SamplingStatic.GUIDANCE_SCALE,
max_batch_n: int=SamplingStatic.MAX_BATCH_N,
seed: int=SamplingStatic.SEED, handle_fn: callable=SamplingStatic.HANDLE_FN,
handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False):
if len(prompts) < SamplingStatic.SHOW_PROMPT_N:
| """
Some commly used operations
"""
# import argparse
# from math import ceil, sqrt
# from dataclasses import dataclass, field
# from transformers import AutoTokenizer, PretrainedConfig
class Sampling:
def __init__(self, backdoor_ds_root: str="datasets", num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS, guidance_scale: float=SamplingStatic.GUIDANCE_SCALE, max_batch_n: int=SamplingStatic.MAX_BATCH_N):
# self.__image_trigger_type: str = image_trigger
# self.__caption_trigger_type: str = caption_trigger
self.__num_inference_steps: int = num_inference_steps
self.__guidance_scale: float = guidance_scale
self.__max_batch_n: int = max_batch_n
self.__image_backdoor: Backdoor = Backdoor(root=backdoor_ds_root)
# self.__caption_backdoor: CaptionBackdoor = CaptionBackdoor()
@property
def image_backdoor(self):
return self.__image_backdoor
@staticmethod
def get_folder(sched_name: str=None, num_inference_steps: int=None, img_num: int=None, image_trigger: str=None, caption_trigger: str=None):
if caption_trigger is not None:
out_img_dir: str = "caption_backdoor_samples"
elif image_trigger is not None:
out_img_dir: str = "image_backdoor_samples"
else:
out_img_dir: str = "clean_samples"
if sched_name is not None:
out_img_dir += f"_{str(sched_name)}"
if num_inference_steps is not None:
out_img_dir += f"_step{str(num_inference_steps)}"
if img_num is not None:
out_img_dir += f"_n{str(img_num)}"
return out_img_dir
@staticmethod
def _batch_sampling(prompts: List[str], pipeline: DiffusionPipeline, inits: torch.Tensor=None,
num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS,
guidance_scale: float=SamplingStatic.GUIDANCE_SCALE,
max_batch_n: int=SamplingStatic.MAX_BATCH_N,
seed: int=SamplingStatic.SEED, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN,
return_imgs: bool=False):
with torch.no_grad():
tensor_dtype: torch.dtype = torch.FloatTensor
for i, param in enumerate(pipeline.unet.parameters()):
tensor_dtype: torch.dtype = param.type()
if i > 0:
break
device: str = pipeline.device
pipeline_call = partial(pipeline, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.manual_seed(seed), output_type=None)
prompt_batchs = batchify(xs=prompts, max_batch_n=max_batch_n)
if inits is not None:
if len(prompts) != len(inits):
raise ValueError()
init_batchs = torch.split(inits.type(tensor_dtype), max_batch_n)
else:
init_batchs = [None] * len(prompt_batchs)
# print(f"Prompt Batchs: {prompt_batchs}")
# print(f"Init Batchs: {len(init_batchs)}")
all_imgs = []
cnt: int = 0
# print(f"prompt_batch: {len(prompt_batchs)}, init_batch: {len(init_batchs)}")
for prompt_batch, init_batch in zip(prompt_batchs, init_batchs):
# print(f"prompt_batch: {prompt_batch}")
print(f"prompt_batch Size: {len(prompt_batch)}, init_batchs: {init_batch}")
if init_batch is not None:
init_batch = init_batch.to(device=device)
batch_imgs = pipeline_call(prompt=prompt_batch, latents=init_batch).images
handle_batch_fn(cnt, batch_imgs, prompt_batch, init_batch)
cnt += len(batch_imgs)
if return_imgs:
all_imgs += [batch_imgs]
del prompt_batch
del batch_imgs
if init_batch is not None:
del init_batch
torch.cuda.empty_cache()
gc.collect()
del pipeline
torch.cuda.empty_cache()
gc.collect()
if return_imgs:
return np.concatenate(all_imgs)
else:
return None
@staticmethod
def _sample(prompts: List[str], pipe: DiffusionPipeline, inits: torch.Tensor=None,
num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS,
guidance_scale: float=SamplingStatic.GUIDANCE_SCALE,
max_batch_n: int=SamplingStatic.MAX_BATCH_N,
seed: int=SamplingStatic.SEED, handle_fn: callable=SamplingStatic.HANDLE_FN,
handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False):
if len(prompts) < SamplingStatic.SHOW_PROMPT_N: | Log.info(f"Prompts: {prompts}") | 14 | 2023-10-17 19:57:37+00:00 | 12k |
nchen909/Pass-Tuning | models_list/adapter/modeling_auto.py | [
{
"identifier": "PLBartForConditionalGeneration",
"path": "models_list/adapter/modeling_plbart.py",
"snippet": "class PLBartForConditionalGeneration(PLBartPreTrainedModel):\n base_model_prefix = \"model\"\n _keys_to_ignore_on_load_missing = [\n r\"final_logits_bias\",\n r\"encoder.version\",\n r\"decoder.version\",\n r\"lm_head.weight\",\n ]\n\n def __init__(self, config: PLBartConfig):\n super().__init__(config)\n self.model = PLBartModel(config)\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)\n\n self.init_weights()\n\n def get_encoder(self):\n return self.model.get_encoder()\n\n def get_decoder(self):\n return self.model.get_decoder()\n\n def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:\n new_embeddings = super().resize_token_embeddings(new_num_tokens)\n self._resize_final_logits_bias(new_num_tokens)\n return new_embeddings\n\n def _resize_final_logits_bias(self, new_num_tokens: int) -> None:\n old_num_tokens = self.final_logits_bias.shape[-1]\n if new_num_tokens <= old_num_tokens:\n new_bias = self.final_logits_bias[:, :new_num_tokens]\n else:\n extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)\n new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)\n self.register_buffer(\"final_logits_bias\", new_bias)\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n @add_end_docstrings(PLBART_GENERATION_EXAMPLE)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.LongTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds=None,\n labels: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n past_prompt = None,\n ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is not None:\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n def prepare_inputs_for_generation(\n self,\n decoder_input_ids: torch.LongTensor,\n past: Optional[List[torch.FloatTensor]] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.Tensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n **kwargs # TODO: Check if this is needed. It is unused?\n ) -> Dict[str, Any]:\n # cut decoder_input_ids if past is used\n if past is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"head_mask\": head_mask,\n \"decoder_head_mask\": decoder_head_mask,\n \"cross_attn_head_mask\": cross_attn_head_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\n return shift_tokens_right(labels, self.config.pad_token_id)\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past"
},
{
"identifier": "PLBartModel",
"path": "models_list/adapter/modeling_plbart.py",
"snippet": "class PLBartModel(PLBartPreTrainedModel):\n def __init__(self, config: PLBartConfig):\n super().__init__(config)\n\n padding_idx, vocab_size = config.pad_token_id, config.vocab_size\n self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)\n\n self.encoder = PLBartEncoder(config, self.shared)\n self.decoder = PLBartDecoder(config, self.shared)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, value):\n self.shared = value\n self.encoder.embed_tokens = self.shared\n self.decoder.embed_tokens = self.shared\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=Seq2SeqModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.LongTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds=None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # different to other models, PLBart automatically creates decoder_input_ids from\n # input_ids if no decoder_input_ids are provided\n if decoder_input_ids is None and decoder_inputs_embeds is None:\n decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)\n\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=encoder_outputs[0],\n encoder_attention_mask=attention_mask,\n head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )"
},
{
"identifier": "T5ForConditionalGeneration",
"path": "models_list/adapter/modeling_t5.py",
"snippet": "class T5ForConditionalGeneration(T5PreTrainedModel):\r\n _keys_to_ignore_on_load_missing = [\r\n r\"encoder\\.embed_tokens\\.weight\",\r\n r\"decoder\\.embed_tokens\\.weight\",\r\n r\"lm_head\\.weight\",\r\n ]\r\n _keys_to_ignore_on_load_unexpected = [\r\n r\"decoder\\.block\\.0\\.layer\\.1\\.EncDecAttention\\.relative_attention_bias\\.weight\",\r\n ]\r\n\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.model_dim = config.d_model\r\n\r\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\r\n\r\n encoder_config = copy.deepcopy(config)\r\n encoder_config.is_decoder = False\r\n encoder_config.use_cache = False\r\n encoder_config.is_encoder_decoder = False\r\n self.encoder = T5Stack(encoder_config, self.shared)\r\n\r\n decoder_config = copy.deepcopy(config)\r\n decoder_config.is_decoder = True\r\n decoder_config.is_encoder_decoder = False\r\n decoder_config.num_layers = config.num_decoder_layers\r\n self.decoder = T5Stack(decoder_config, self.shared)\r\n\r\n self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)\r\n\r\n self.init_weights()\r\n\r\n # Model parallel\r\n self.model_parallel = False\r\n self.device_map = None\r\n\r\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\r\n def parallelize(self, device_map=None):\r\n self.device_map = (\r\n get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))\r\n if device_map is None\r\n else device_map\r\n )\r\n assert_device_map(self.device_map, len(self.encoder.block))\r\n self.encoder.parallelize(self.device_map)\r\n self.decoder.parallelize(self.device_map)\r\n self.lm_head = self.lm_head.to(self.decoder.first_device)\r\n self.model_parallel = True\r\n\r\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\r\n def deparallelize(self):\r\n self.encoder.deparallelize()\r\n self.decoder.deparallelize()\r\n self.encoder = self.encoder.to(\"cpu\")\r\n self.decoder = self.decoder.to(\"cpu\")\r\n self.lm_head = self.lm_head.to(\"cpu\")\r\n self.model_parallel = False\r\n self.device_map = None\r\n torch.cuda.empty_cache()\r\n\r\n def get_input_embeddings(self):\r\n return self.shared\r\n\r\n def set_input_embeddings(self, new_embeddings):\r\n self.shared = new_embeddings\r\n self.encoder.set_input_embeddings(new_embeddings)\r\n self.decoder.set_input_embeddings(new_embeddings)\r\n\r\n def set_output_embeddings(self, new_embeddings):\r\n self.lm_head = new_embeddings\r\n\r\n def get_output_embeddings(self):\r\n return self.lm_head\r\n\r\n def get_encoder(self):\r\n return self.encoder\r\n\r\n def get_decoder(self):\r\n return self.decoder\r\n\r\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\r\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n decoder_input_ids=None,\r\n decoder_attention_mask=None,\r\n head_mask=None,\r\n decoder_head_mask=None,\r\n cross_attn_head_mask=None,\r\n encoder_outputs=None,\r\n past_key_values=None,\r\n inputs_embeds=None,\r\n decoder_inputs_embeds=None,\r\n labels=None,\r\n use_cache=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n past_prompt=None, # modified\r\n ):\r\n r\"\"\"\r\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,\r\n config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for\r\n labels in ``[0, ..., config.vocab_size]``\r\n\r\n Returns:\r\n\r\n Examples::\r\n\r\n >>> from transformers import T5Tokenizer, T5ForConditionalGeneration\r\n\r\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\r\n >>> model = T5ForConditionalGeneration.from_pretrained('t5-small')\r\n\r\n >>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids\r\n >>> labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids\r\n >>> outputs = model(input_ids=input_ids, labels=labels)\r\n >>> loss = outputs.loss\r\n >>> logits = outputs.logits\r\n\r\n >>> input_ids = tokenizer(\"summarize: studies have shown that owning a dog is good for you \", return_tensors=\"pt\").input_ids # Batch size 1\r\n >>> outputs = model.generate(input_ids)\r\n \"\"\"\r\n use_cache = use_cache if use_cache is not None else self.config.use_cache\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask\r\n if head_mask is not None and decoder_head_mask is None:\r\n if self.config.num_layers == self.config.num_decoder_layers:\r\n warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)\r\n decoder_head_mask = head_mask\r\n\r\n # Encode if needed (training, first prediction pass)\r\n if encoder_outputs is None:\r\n # Convert encoder inputs in embeddings if needed\r\n encoder_outputs = self.encoder(\r\n input_ids=input_ids,\r\n attention_mask=attention_mask,\r\n inputs_embeds=inputs_embeds,\r\n head_mask=head_mask,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n past_prompt=past_prompt, # modified\r\n )\r\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\r\n encoder_outputs = BaseModelOutput(\r\n last_hidden_state=encoder_outputs[0],\r\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\r\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\r\n )\r\n\r\n hidden_states = encoder_outputs[0]\r\n\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n\r\n if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:\r\n # get decoder inputs from shifting lm labels to the right\r\n decoder_input_ids = self._shift_right(labels)\r\n\r\n # If decoding with past key value states, only the last tokens\r\n # should be given as an input\r\n if past_key_values is not None:\r\n assert labels is None, \"Decoder should not use cached key value states when training.\"\r\n if decoder_input_ids is not None:\r\n decoder_input_ids = decoder_input_ids[:, -1:]\r\n if decoder_inputs_embeds is not None:\r\n decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]\r\n\r\n # Set device for model parallelism\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n hidden_states = hidden_states.to(self.decoder.first_device)\r\n if decoder_input_ids is not None:\r\n decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)\r\n if attention_mask is not None:\r\n attention_mask = attention_mask.to(self.decoder.first_device)\r\n if decoder_attention_mask is not None:\r\n decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)\r\n\r\n # Decode\r\n decoder_outputs = self.decoder(\r\n input_ids=decoder_input_ids,\r\n attention_mask=decoder_attention_mask,\r\n inputs_embeds=decoder_inputs_embeds,\r\n past_key_values=past_key_values,\r\n encoder_hidden_states=hidden_states,\r\n encoder_attention_mask=attention_mask,\r\n head_mask=decoder_head_mask,\r\n cross_attn_head_mask=cross_attn_head_mask,\r\n use_cache=use_cache,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n past_prompt=past_prompt, # modified\r\n )\r\n\r\n sequence_output = decoder_outputs[0]\r\n\r\n # Set device for model parallelism\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.encoder.first_device)\r\n self.lm_head = self.lm_head.to(self.encoder.first_device)\r\n sequence_output = sequence_output.to(self.lm_head.weight.device)\r\n\r\n if self.config.tie_word_embeddings:\r\n # Rescale output before projecting on vocab\r\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586\r\n sequence_output = sequence_output * (self.model_dim ** -0.5)\r\n\r\n lm_logits = self.lm_head(sequence_output)\r\n\r\n loss = None\r\n if labels is not None:\r\n loss_fct = CrossEntropyLoss(ignore_index=-100)\r\n loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))\r\n # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666\r\n\r\n if not return_dict:\r\n output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs\r\n return ((loss,) + output) if loss is not None else output\r\n\r\n return Seq2SeqLMOutput(\r\n loss=loss,\r\n logits=lm_logits,\r\n past_key_values=decoder_outputs.past_key_values,\r\n decoder_hidden_states=decoder_outputs.hidden_states,\r\n decoder_attentions=decoder_outputs.attentions,\r\n cross_attentions=decoder_outputs.cross_attentions,\r\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\r\n encoder_hidden_states=encoder_outputs.hidden_states,\r\n encoder_attentions=encoder_outputs.attentions,\r\n )\r\n\r\n def prepare_inputs_for_generation(\r\n self,\r\n input_ids,\r\n past=None,\r\n attention_mask=None,\r\n head_mask=None,\r\n decoder_head_mask=None,\r\n cross_attn_head_mask=None,\r\n use_cache=None,\r\n encoder_outputs=None,\r\n **kwargs\r\n ):\r\n\r\n # cut decoder_input_ids if past is used\r\n if past is not None:\r\n input_ids = input_ids[:, -1:]\r\n\r\n return {\r\n \"decoder_input_ids\": input_ids,\r\n \"past_key_values\": past,\r\n \"encoder_outputs\": encoder_outputs,\r\n \"attention_mask\": attention_mask,\r\n \"head_mask\": head_mask,\r\n \"decoder_head_mask\": decoder_head_mask,\r\n \"cross_attn_head_mask\": cross_attn_head_mask,\r\n \"use_cache\": use_cache,\r\n # \"past_prompt\": kwargs['past_prompt'], # modified\r\n }\r\n\r\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\r\n return self._shift_right(labels)\r\n\r\n def _reorder_cache(self, past, beam_idx):\r\n # if decoder past is not included in output\r\n # speedy decoding is disabled and no need to reorder\r\n if past is None:\r\n logger.warning(\"You might want to consider setting `use_cache=True` to speed up decoding\")\r\n return past\r\n\r\n reordered_decoder_past = ()\r\n for layer_past_states in past:\r\n # get the correct batch idx from layer past batch dim\r\n # batch dim of `past` is at 2nd position\r\n reordered_layer_past_states = ()\r\n for layer_past_state in layer_past_states:\r\n # need to set correct `past` for each of the four key / value states\r\n reordered_layer_past_states = reordered_layer_past_states + (\r\n layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),\r\n )\r\n\r\n assert reordered_layer_past_states[0].shape == layer_past_states[0].shape\r\n assert len(reordered_layer_past_states) == len(layer_past_states)\r\n\r\n reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)\r\n return reordered_decoder_past\r"
},
{
"identifier": "T5Model",
"path": "models_list/adapter/modeling_t5.py",
"snippet": "class T5Model(T5PreTrainedModel):\r\n _keys_to_ignore_on_load_missing = [\r\n r\"encoder\\.embed_tokens\\.weight\",\r\n r\"decoder\\.embed_tokens\\.weight\",\r\n ]\r\n _keys_to_ignore_on_load_unexpected = [\r\n r\"decoder\\.block\\.0\\.layer\\.1\\.EncDecAttention\\.relative_attention_bias\\.weight\",\r\n ]\r\n\r\n def __init__(self, config: T5Config):\r\n super().__init__(config)\r\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\r\n\r\n encoder_config = copy.deepcopy(config)\r\n encoder_config.is_decoder = False\r\n encoder_config.use_cache = False\r\n encoder_config.is_encoder_decoder = False\r\n self.encoder = T5Stack(encoder_config, self.shared)\r\n\r\n decoder_config = copy.deepcopy(config)\r\n decoder_config.is_decoder = True\r\n decoder_config.is_encoder_decoder = False\r\n decoder_config.num_layers = config.num_decoder_layers\r\n self.decoder = T5Stack(decoder_config, self.shared)\r\n\r\n self.init_weights()\r\n\r\n # Model parallel\r\n self.model_parallel = False\r\n self.device_map = None\r\n\r\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\r\n def parallelize(self, device_map=None):\r\n self.device_map = (\r\n get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))\r\n if device_map is None\r\n else device_map\r\n )\r\n assert_device_map(self.device_map, len(self.encoder.block))\r\n self.encoder.parallelize(self.device_map)\r\n self.decoder.parallelize(self.device_map)\r\n self.model_parallel = True\r\n\r\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\r\n def deparallelize(self):\r\n self.encoder.deparallelize()\r\n self.decoder.deparallelize()\r\n self.encoder = self.encoder.to(\"cpu\")\r\n self.decoder = self.decoder.to(\"cpu\")\r\n self.model_parallel = False\r\n self.device_map = None\r\n torch.cuda.empty_cache()\r\n\r\n def get_input_embeddings(self):\r\n return self.shared\r\n\r\n def set_input_embeddings(self, new_embeddings):\r\n self.shared = new_embeddings\r\n self.encoder.set_input_embeddings(new_embeddings)\r\n self.decoder.set_input_embeddings(new_embeddings)\r\n\r\n def get_encoder(self):\r\n return self.encoder\r\n\r\n def get_decoder(self):\r\n return self.decoder\r\n\r\n def _prune_heads(self, heads_to_prune):\r\n \"\"\"\r\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\r\n class PreTrainedModel\r\n \"\"\"\r\n for layer, heads in heads_to_prune.items():\r\n self.encoder.layer[layer].attention.prune_heads(heads)\r\n\r\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\r\n @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n decoder_input_ids=None,\r\n decoder_attention_mask=None,\r\n head_mask=None,\r\n decoder_head_mask=None,\r\n cross_attn_head_mask=None,\r\n encoder_outputs=None,\r\n past_key_values=None,\r\n inputs_embeds=None,\r\n decoder_inputs_embeds=None,\r\n use_cache=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n ):\r\n r\"\"\"\r\n Returns:\r\n\r\n Example::\r\n\r\n >>> from transformers import T5Tokenizer, T5Model\r\n\r\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\r\n >>> model = T5Model.from_pretrained('t5-small')\r\n\r\n >>> input_ids = tokenizer(\"Studies have been shown that owning a dog is good for you\", return_tensors=\"pt\").input_ids # Batch size 1\r\n >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size 1\r\n >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)\r\n\r\n >>> last_hidden_states = outputs.last_hidden_state\r\n \"\"\"\r\n use_cache = use_cache if use_cache is not None else self.config.use_cache\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask\r\n if head_mask is not None and decoder_head_mask is None:\r\n if self.config.num_layers == self.config.num_decoder_layers:\r\n warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)\r\n decoder_head_mask = head_mask\r\n\r\n # Encode if needed (training, first prediction pass)\r\n if encoder_outputs is None:\r\n encoder_outputs = self.encoder(\r\n input_ids=input_ids,\r\n attention_mask=attention_mask,\r\n inputs_embeds=inputs_embeds,\r\n head_mask=head_mask,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\r\n encoder_outputs = BaseModelOutput(\r\n last_hidden_state=encoder_outputs[0],\r\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\r\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\r\n )\r\n\r\n hidden_states = encoder_outputs[0]\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n # Set device for model parallelism\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n hidden_states = hidden_states.to(self.decoder.first_device)\r\n if decoder_input_ids is not None:\r\n decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)\r\n if attention_mask is not None:\r\n attention_mask = attention_mask.to(self.decoder.first_device)\r\n if decoder_attention_mask is not None:\r\n decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)\r\n\r\n # Decode\r\n decoder_outputs = self.decoder(\r\n input_ids=decoder_input_ids,\r\n attention_mask=decoder_attention_mask,\r\n inputs_embeds=decoder_inputs_embeds,\r\n past_key_values=past_key_values,\r\n encoder_hidden_states=hidden_states,\r\n encoder_attention_mask=attention_mask,\r\n head_mask=decoder_head_mask,\r\n cross_attn_head_mask=cross_attn_head_mask,\r\n use_cache=use_cache,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n\r\n if not return_dict:\r\n return decoder_outputs + encoder_outputs\r\n\r\n return Seq2SeqModelOutput(\r\n last_hidden_state=decoder_outputs.last_hidden_state,\r\n past_key_values=decoder_outputs.past_key_values,\r\n decoder_hidden_states=decoder_outputs.hidden_states,\r\n decoder_attentions=decoder_outputs.attentions,\r\n cross_attentions=decoder_outputs.cross_attentions,\r\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\r\n encoder_hidden_states=encoder_outputs.hidden_states,\r\n encoder_attentions=encoder_outputs.attentions,\r\n )\r"
}
] | import warnings
from collections import OrderedDict
from transformers.utils import logging
from transformers.models.albert.modeling_albert import (
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from .modeling_plbart import (
PLBartForConditionalGeneration,
PLBartModel,
)
from transformers.models.bart.modeling_bart import (
BartForCausalLM,
BartForQuestionAnswering,
BartForSequenceClassification,
)
from transformers.models.bert.modeling_bert import (
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLMHeadModel,
BertModel,
)
from transformers.models.bert_generation.modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder
from transformers.models.big_bird.modeling_big_bird import (
BigBirdForCausalLM,
BigBirdForMaskedLM,
BigBirdForMultipleChoice,
BigBirdForPreTraining,
BigBirdForQuestionAnswering,
BigBirdForSequenceClassification,
BigBirdForTokenClassification,
BigBirdModel,
)
from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import (
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
)
from transformers.models.blenderbot.modeling_blenderbot import BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel
from transformers.models.blenderbot_small.modeling_blenderbot_small import (
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
)
from transformers.models.camembert.modeling_camembert import (
CamembertForCausalLM,
CamembertForMaskedLM,
CamembertForMultipleChoice,
CamembertForQuestionAnswering,
CamembertForSequenceClassification,
CamembertForTokenClassification,
CamembertModel,
)
from transformers.models.canine.modeling_canine import (
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineModel,
)
from transformers.models.clip.modeling_clip import CLIPModel
from transformers.models.convbert.modeling_convbert import (
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertModel,
)
from transformers.models.ctrl.modeling_ctrl import CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel
from transformers.models.deberta.modeling_deberta import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta_v2.modeling_deberta_v2 import (
DebertaV2ForMaskedLM,
DebertaV2ForQuestionAnswering,
DebertaV2ForSequenceClassification,
DebertaV2ForTokenClassification,
DebertaV2Model,
)
from transformers.models.deit.modeling_deit import DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTModel
from transformers.models.detr.modeling_detr import DetrForObjectDetection, DetrModel
from transformers.models.distilbert.modeling_distilbert import (
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
from transformers.models.dpr.modeling_dpr import DPRQuestionEncoder
from transformers.models.electra.modeling_electra import (
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
)
from transformers.models.encoder_decoder.modeling_encoder_decoder import EncoderDecoderModel
from transformers.models.flaubert.modeling_flaubert import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.fsmt.modeling_fsmt import FSMTForConditionalGeneration, FSMTModel
from transformers.models.funnel.modeling_funnel import (
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
)
from transformers.models.gpt2.modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model
from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM, GPTNeoForSequenceClassification, GPTNeoModel
from transformers.models.hubert.modeling_hubert import HubertModel
from transformers.models.ibert.modeling_ibert import (
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
)
from transformers.models.layoutlm.modeling_layoutlm import (
LayoutLMForMaskedLM,
LayoutLMForSequenceClassification,
LayoutLMForTokenClassification,
LayoutLMModel,
)
from transformers.models.led.modeling_led import (
LEDForConditionalGeneration,
LEDForQuestionAnswering,
LEDForSequenceClassification,
LEDModel,
)
from transformers.models.longformer.modeling_longformer import (
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
)
from transformers.models.luke.modeling_luke import LukeModel
from transformers.models.lxmert.modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel
from transformers.models.m2m_100.modeling_m2m_100 import M2M100ForConditionalGeneration, M2M100Model
from transformers.models.marian.modeling_marian import MarianForCausalLM, MarianModel, MarianMTModel
from transformers.models.mbart.modeling_mbart import (
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
)
from transformers.models.megatron_bert.modeling_megatron_bert import (
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
from transformers.models.mobilebert.modeling_mobilebert import (
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
from transformers.models.mpnet.modeling_mpnet import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
from transformers.models.mt5.modeling_mt5 import MT5ForConditionalGeneration, MT5Model
from transformers.models.openai.modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel
from transformers.models.pegasus.modeling_pegasus import PegasusForCausalLM, PegasusForConditionalGeneration, PegasusModel
from transformers.models.prophetnet.modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel
from transformers.models.rag.modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function
RagModel,
RagSequenceForGeneration,
RagTokenForGeneration,
)
from transformers.models.reformer.modeling_reformer import (
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerModel,
ReformerModelWithLMHead,
)
from transformers.models.retribert.modeling_retribert import RetriBertModel
from transformers.models.roberta.modeling_roberta import (
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
from transformers.models.roformer.modeling_roformer import (
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerModel,
)
from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextForConditionalGeneration, Speech2TextModel
from transformers.models.squeezebert.modeling_squeezebert import (
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
from .modeling_t5 import T5ForConditionalGeneration, T5Model
from transformers.models.tapas.modeling_tapas import (
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
)
from transformers.models.transfo_xl.modeling_transfo_xl import TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel
from transformers.models.visual_bert.modeling_visual_bert import VisualBertForPreTraining, VisualBertModel
from transformers.models.vit.modeling_vit import ViTForImageClassification, ViTModel
from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2ForMaskedLM, Wav2Vec2ForPreTraining, Wav2Vec2Model
from transformers.models.xlm.modeling_xlm import (
XLMForMultipleChoice,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm_prophetnet.modeling_xlm_prophetnet import (
XLMProphetNetForCausalLM,
XLMProphetNetForConditionalGeneration,
XLMProphetNetModel,
)
from transformers.models.xlm_roberta.modeling_xlm_roberta import (
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
)
from transformers.models.xlnet.modeling_xlnet import (
XLNetForMultipleChoice,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
)
from transformers.models.auto.auto_factory import _BaseAutoModelClass, auto_class_update
from transformers.models.auto.configuration_auto import (
AlbertConfig,
PLBartConfig,
BertConfig,
BertGenerationConfig,
BigBirdConfig,
BigBirdPegasusConfig,
BlenderbotConfig,
BlenderbotSmallConfig,
CamembertConfig,
CanineConfig,
CLIPConfig,
ConvBertConfig,
CTRLConfig,
DebertaConfig,
DebertaV2Config,
DeiTConfig,
DetrConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
EncoderDecoderConfig,
FlaubertConfig,
FSMTConfig,
FunnelConfig,
GPT2Config,
GPTNeoConfig,
HubertConfig,
IBertConfig,
LayoutLMConfig,
LEDConfig,
LongformerConfig,
LukeConfig,
LxmertConfig,
M2M100Config,
MarianConfig,
MBartConfig,
MegatronBertConfig,
MobileBertConfig,
MPNetConfig,
MT5Config,
OpenAIGPTConfig,
PegasusConfig,
ProphetNetConfig,
ReformerConfig,
RetriBertConfig,
RobertaConfig,
RoFormerConfig,
Speech2TextConfig,
SqueezeBertConfig,
T5Config,
TapasConfig,
TransfoXLConfig,
VisualBertConfig,
ViTConfig,
Wav2Vec2Config,
XLMConfig,
XLMProphetNetConfig,
XLMRobertaConfig,
XLNetConfig,
)
| 10,780 | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
# Add modeling imports here
# # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version.
# Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version.
logger = logging.get_logger(__name__)
MODEL_MAPPING = OrderedDict(
[
# Base model mapping
(VisualBertConfig, VisualBertModel),
(CanineConfig, CanineModel),
(RoFormerConfig, RoFormerModel),
(CLIPConfig, CLIPModel),
(BigBirdPegasusConfig, BigBirdPegasusModel),
(DeiTConfig, DeiTModel),
(LukeConfig, LukeModel),
(DetrConfig, DetrModel),
(GPTNeoConfig, GPTNeoModel),
(BigBirdConfig, BigBirdModel),
(Speech2TextConfig, Speech2TextModel),
(ViTConfig, ViTModel),
(Wav2Vec2Config, Wav2Vec2Model),
(HubertConfig, HubertModel),
(M2M100Config, M2M100Model),
(ConvBertConfig, ConvBertModel),
(LEDConfig, LEDModel),
(BlenderbotSmallConfig, BlenderbotSmallModel),
(RetriBertConfig, RetriBertModel),
(MT5Config, MT5Model),
| # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
# Add modeling imports here
# # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version.
# Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version.
logger = logging.get_logger(__name__)
MODEL_MAPPING = OrderedDict(
[
# Base model mapping
(VisualBertConfig, VisualBertModel),
(CanineConfig, CanineModel),
(RoFormerConfig, RoFormerModel),
(CLIPConfig, CLIPModel),
(BigBirdPegasusConfig, BigBirdPegasusModel),
(DeiTConfig, DeiTModel),
(LukeConfig, LukeModel),
(DetrConfig, DetrModel),
(GPTNeoConfig, GPTNeoModel),
(BigBirdConfig, BigBirdModel),
(Speech2TextConfig, Speech2TextModel),
(ViTConfig, ViTModel),
(Wav2Vec2Config, Wav2Vec2Model),
(HubertConfig, HubertModel),
(M2M100Config, M2M100Model),
(ConvBertConfig, ConvBertModel),
(LEDConfig, LEDModel),
(BlenderbotSmallConfig, BlenderbotSmallModel),
(RetriBertConfig, RetriBertModel),
(MT5Config, MT5Model),
| (T5Config, T5Model),
| 3 | 2023-10-20 09:24:44+00:00 | 12k |
JoaoPedro9674/django-ledger | django_ledger/tests/base.py | [
{
"identifier": "EntityDataGenerator",
"path": "django_ledger/io/data_generator.py",
"snippet": "class EntityDataGenerator(LoggingMixIn):\n\n def __init__(self,\n user_model,\n entity_model: Union[EntityModel, str],\n start_date: date,\n capital_contribution: Decimal,\n days_forward: int,\n tx_quantity: int = 25):\n\n assert isinstance(entity_model, (EntityModel, str)), 'Must pass an instance of EntityModel or str'\n assert capital_contribution > 0, 'Capital contribution must be greater than 0'\n\n if not FAKER_IMPORTED:\n raise ImproperlyConfigured('Must install Faker library to generate random data.')\n\n self.fk = Faker(['en_US'])\n self.fk.add_provider(company)\n self.fk.add_provider(address)\n self.fk.add_provider(phone_number)\n self.fk.add_provider(bank)\n\n self.start_date: date = start_date\n self.local_date = localdate()\n self.tx_quantity = tx_quantity\n self.localtime = localtime()\n self.COUNT_INVENTORY = True\n self.DAYS_FORWARD = days_forward\n\n self.entity_model: EntityModel = entity_model\n self.default_coa: Optional[ChartOfAccountModel] = None\n self.capital_contribution = capital_contribution\n self.user_model = user_model\n\n self.is_accruable_probability = 0.2\n self.is_paid_probability = 0.90\n\n self.vendor_models = None\n self.customer_models = None\n self.bank_account_models = None\n self.entity_unit_models = None\n self.uom_models = None\n self.expense_models = None\n self.product_models = None\n self.service_models = None\n self.inventory_models = None\n\n self.account_models = None\n self.accounts_by_role = None\n\n self.COUNTRY = 'US'\n self.NB_UNITS: int = 4\n\n self.PRODUCTS_MIN = 20\n self.PRODUCTS_MAX = 40\n self.MIN_DAYS_FORWARD = 1\n self.MAX_DAYS_FORWARD = 8\n\n self.logger = self.get_logger()\n\n def get_logger_name(self):\n return self.entity_model.slug\n\n def populate_entity(self):\n\n self.logger.info('Checking for existing transactions...')\n txs_qs = TransactionModel.objects.for_entity(\n entity_slug=self.entity_model,\n user_model=self.user_model\n )\n\n if txs_qs.count() > 0:\n raise ValidationError(\n f'Cannot populate random data on {self.entity_model.name} because it already has existing Transactions')\n\n self.create_coa()\n self.logger.info(f'Pulling Entity {self.entity_model} accounts...')\n self.account_models = self.entity_model.get_coa_accounts(order_by=('role', 'code'))\n self.accounts_by_role = {g: list(v) for g, v in groupby(self.account_models, key=lambda a: a.role)}\n self.create_vendors()\n self.create_customers()\n self.create_entity_units()\n self.create_bank_accounts()\n self.create_uom_models()\n\n self.create_products()\n self.create_services()\n self.create_inventories()\n self.create_expenses()\n\n self.fund_entity()\n\n for i in range(self.tx_quantity):\n start_dttm = self.start_date + timedelta(days=randint(0, self.DAYS_FORWARD))\n self.create_estimate(date_draft=start_dttm)\n self.create_po(date_draft=start_dttm)\n self.recount_inventory()\n self.update_products()\n self.create_bill(date_draft=start_dttm)\n\n for i in range(self.tx_quantity):\n start_dttm = self.start_date + timedelta(days=randint(0, self.DAYS_FORWARD))\n self.create_invoice(date_draft=start_dttm)\n\n self.create_closing_entry()\n\n def get_next_date(self, prev_date: date = None) -> date:\n if not prev_date:\n prev_date = self.start_date\n next_date = prev_date + timedelta(days=randint(\n self.MIN_DAYS_FORWARD,\n self.MAX_DAYS_FORWARD\n ))\n if next_date > self.local_date:\n next_date = self.local_date\n return next_date\n\n def create_coa(self):\n entity_model = self.entity_model\n coa_model = entity_model.create_chart_of_accounts(assign_as_default=True, commit=True)\n entity_model.populate_default_coa(coa_model=coa_model, activate_accounts=True)\n self.default_coa = entity_model.default_coa\n\n def create_entity_units(self, nb_units: int = None):\n self.logger.info(f'Creating entity units...')\n nb_units = self.NB_UNITS if not nb_units else nb_units\n\n if nb_units:\n assert nb_units >= 0, 'Number of unite must be greater than 0'\n\n entity_unit_models = [\n EntityUnitModel(\n name=f'Unit {u}',\n entity=self.entity_model,\n document_prefix=''.join(choices(ascii_uppercase, k=3))\n ) for u in range(nb_units)\n ]\n\n for unit in entity_unit_models:\n unit.clean()\n EntityUnitModel.add_root(instance=unit)\n\n self.entity_unit_models = self.entity_model.entityunitmodel_set.all()\n\n def create_vendors(self):\n self.logger.info('Creating vendors...')\n vendor_count = randint(10, 20)\n vendor_models = [\n self.entity_model.create_vendor(\n vendor_model_kwargs={\n 'vendor_name': self.fk.name() if random() > .7 else self.fk.company(),\n 'address_1': self.fk.street_address(),\n 'address_2': self.fk.building_number() if random() < .2 else None,\n 'city': self.fk.city(),\n 'state': self.fk.state_abbr(),\n 'zip_code': self.fk.postcode(),\n 'phone': self.fk.phone_number(),\n 'country': self.COUNTRY,\n 'email': self.fk.email(),\n 'website': self.fk.url(),\n 'active': True,\n 'hidden': False,\n 'description': 'A cool vendor description.'\n }, commit=False) for _ in range(vendor_count)\n ]\n\n for vendor in vendor_models:\n vendor.full_clean()\n\n self.vendor_models = VendorModel.objects.bulk_create(vendor_models, ignore_conflicts=True)\n\n def create_customers(self):\n self.logger.info(f'Creating entity customers...')\n customer_count = randint(10, 20)\n customer_models = [\n self.entity_model.create_customer(\n customer_model_kwargs={\n 'customer_name': self.fk.name() if random() > .2 else self.fk.company(),\n 'address_1': self.fk.street_address() + self.fk.street_suffix(),\n 'address_2': self.fk.building_number() if random() > .2 else None,\n 'city': self.fk.city(),\n 'state': self.fk.state_abbr(),\n 'zip_code': self.fk.postcode(),\n 'country': self.COUNTRY,\n 'phone': self.fk.phone_number(),\n 'email': self.fk.email(),\n 'website': self.fk.url(),\n 'active': True,\n 'hidden': False,\n 'description': f'A cool customer description. We love customers!'\n }) for _ in range(customer_count)\n ]\n\n for customer in customer_models:\n customer.full_clean()\n\n self.customer_models = CustomerModel.objects.bulk_create(customer_models, ignore_conflicts=True)\n\n def create_bank_accounts(self):\n self.logger.info(f'Creating entity accounts...')\n bank_account_models = [\n self.entity_model.create_bank_account(\n name=f'{self.entity_model.name} Checking Account',\n account_type=BankAccountModel.ACCOUNT_CHECKING,\n active=True,\n cash_account=choice(self.accounts_by_role[ASSET_CA_CASH]),\n bank_account_model_kwargs={\n 'aba_number': self.fk.swift(),\n 'routing_number': str(randint(0, 9999999)).zfill(9),\n 'account_number': str(randint(0, 9999999)).zfill(9)\n },\n commit=False\n ),\n self.entity_model.create_bank_account(\n name=f'{self.entity_model.name} Savings Account',\n account_type=BankAccountModel.ACCOUNT_SAVINGS,\n active=True,\n cash_account=choice(self.accounts_by_role[ASSET_CA_CASH]),\n bank_account_model_kwargs={\n 'aba_number': self.fk.swift(),\n 'routing_number': str(randint(0, 9999999)).zfill(9),\n 'account_number': str(randint(0, 9999999)).zfill(9)\n },\n commit=False\n )\n ]\n for ba in bank_account_models:\n ba.full_clean()\n\n self.bank_account_models = BankAccountModel.objects.bulk_create(bank_account_models, ignore_conflicts=True)\n\n def create_uom_models(self):\n self.logger.info(f'Creating entity Unit of Measures...')\n\n UOMs = {\n 'unit': 'Unit',\n 'ln-ft': 'Linear Feet',\n 'sq-ft': 'Square Fee t',\n 'lb': 'Pound',\n 'pallet': 'Pallet',\n 'man-hour': 'Man Hour'\n }\n\n uom_models = [\n self.entity_model.create_uom(\n unit_abbr=abbr,\n name=name,\n commit=False\n ) for abbr, name in UOMs.items()\n ]\n\n for uom in uom_models:\n uom.full_clean()\n\n self.uom_models = UnitOfMeasureModel.objects.bulk_create(uom_models)\n\n def create_products(self):\n self.logger.info(f'Creating entity product items...')\n product_count = randint(self.PRODUCTS_MIN, self.PRODUCTS_MAX)\n product_models = list()\n for i in range(product_count):\n # is Product....\n product_models.append(ItemModel(\n name=f'Product #{randint(1000, 9999)}',\n uom=choice(self.uom_models),\n item_role=ItemModel.ITEM_ROLE_PRODUCT,\n sku=generate_random_sku(),\n upc=generate_random_upc(),\n item_id=generate_random_item_id(),\n entity=self.entity_model,\n for_inventory=True,\n is_product_or_service=True,\n inventory_account=choice(self.accounts_by_role[ASSET_CA_INVENTORY]),\n earnings_account=choice(self.accounts_by_role[INCOME_OPERATIONAL]),\n cogs_account=choice(self.accounts_by_role[COGS]),\n additional_info=dict()\n ))\n\n for product in product_models:\n product.full_clean()\n\n ItemModel.objects.bulk_create(product_models)\n self.update_products()\n\n def create_services(self):\n self.logger.info(f'Creating entity service items...')\n product_count = randint(self.PRODUCTS_MIN, self.PRODUCTS_MAX)\n service_item_models = list()\n for i in range(product_count):\n service_item_models.append(ItemModel(\n name=f'Service #{randint(1000, 9999)}',\n uom=choice(self.uom_models),\n item_role=ItemModel.ITEM_ROLE_SERVICE,\n sku=generate_random_sku(),\n upc=generate_random_upc(),\n item_id=generate_random_item_id(),\n entity=self.entity_model,\n for_inventory=False,\n is_product_or_service=True,\n earnings_account=choice(self.accounts_by_role[INCOME_OPERATIONAL]),\n cogs_account=choice(self.accounts_by_role[COGS]),\n additional_info=dict()\n ))\n\n for service in service_item_models:\n service.full_clean()\n\n ItemModel.objects.bulk_create(service_item_models)\n self.update_services()\n\n def create_expenses(self):\n self.logger.info(f'Creating entity expense items...')\n expense_count = randint(self.PRODUCTS_MIN, self.PRODUCTS_MAX)\n expense_models = [\n ItemModel(\n name=f'Expense Item {randint(1000, 9999)}',\n uom=choice(self.uom_models),\n item_type=choice(ItemModel.ITEM_TYPE_CHOICES)[0],\n item_role=ItemModel.ITEM_ROLE_EXPENSE,\n sku=generate_random_sku(),\n upc=generate_random_upc(),\n item_id=generate_random_item_id(),\n entity=self.entity_model,\n is_product_or_service=False,\n for_inventory=False,\n expense_account=choice(self.accounts_by_role[EXPENSE_OPERATIONAL]),\n ) for _ in range(expense_count)\n ]\n\n for exp in expense_models:\n exp.full_clean()\n\n ItemModel.objects.bulk_create(expense_models)\n self.update_expenses()\n\n def create_inventories(self):\n self.logger.info(f'Creating entity inventory items...')\n inv_count = randint(self.PRODUCTS_MIN, self.PRODUCTS_MAX)\n inventory_models = [\n ItemModel(\n name=f'Inventory {randint(1000, 9999)}',\n uom=choice(self.uom_models),\n item_role=ItemModel.ITEM_ROLE_INVENTORY,\n item_type=choice(ItemModel.ITEM_TYPE_CHOICES)[0],\n item_id=generate_random_item_id(),\n entity=self.entity_model,\n for_inventory=True,\n is_product_or_service=True if random() > 0.6 else False,\n sku=generate_random_sku(),\n upc=generate_random_upc(),\n earnings_account=choice(self.accounts_by_role[INCOME_OPERATIONAL]),\n cogs_account=choice(self.accounts_by_role[COGS]),\n inventory_account=choice(self.accounts_by_role[ASSET_CA_INVENTORY]),\n ) for _ in range(inv_count)\n ]\n\n for inv in inventory_models:\n inv.full_clean()\n\n self.inventory_models = ItemModel.objects.bulk_create(inventory_models)\n\n def update_products(self):\n self.logger.info(f'Updating product catalog...')\n self.product_models = self.entity_model.get_items_products()\n\n def update_services(self):\n self.logger.info(f'Updating service catalog...')\n self.service_models = self.entity_model.get_items_services()\n\n def update_inventory(self):\n self.logger.info(f'Updating inventory...')\n self.inventory_models = self.entity_model.get_items_inventory()\n\n def update_expenses(self):\n self.logger.info(f'Updating expenses...')\n self.expense_models = self.entity_model.get_items_expenses()\n\n def create_estimate(self, date_draft: date):\n estimate_model = self.entity_model.create_estimate(\n estimate_title=f'Customer Estimate {date_draft}',\n date_draft=date_draft,\n customer_model=choice(self.customer_models),\n contract_terms=choice(EstimateModel.CONTRACT_TERMS_CHOICES_VALID),\n commit=True\n )\n self.logger.info(f'Creating entity estimate {estimate_model.estimate_number}...')\n\n estimate_items = [\n ItemTransactionModel(\n ce_model=estimate_model,\n item_model=choice(self.product_models),\n ce_quantity=round(random() * randint(5, 15), 2),\n ce_unit_cost_estimate=round(random() * randint(50, 100), 2),\n ce_unit_revenue_estimate=round(random() * randint(80, 120) * (1 + 0.2 * random()), 2),\n entity_unit=choice(self.entity_unit_models) if random() > .75 else None\n ) for _ in range(randint(1, 10))\n ]\n\n for i in estimate_items:\n i.full_clean()\n\n estimate_model.full_clean()\n estimate_model.update_state(itemtxs_qs=estimate_items)\n estimate_model.save()\n\n estimate_items = estimate_model.itemtransactionmodel_set.bulk_create(objs=estimate_items)\n\n if random() > 0.25:\n date_in_review = self.get_next_date(date_draft)\n estimate_model.mark_as_review(commit=True, date_in_review=date_in_review)\n if random() > 0.50:\n date_approved = self.get_next_date(date_in_review)\n estimate_model.mark_as_approved(commit=True, date_approved=date_approved)\n if random() > 0.25:\n date_completed = self.get_next_date(date_approved)\n estimate_model.mark_as_completed(commit=True, date_completed=date_completed)\n elif random() > 0.8:\n date_void = self.get_next_date(date_approved)\n estimate_model.mark_as_void(commit=True, date_void=date_void)\n elif random() > 0.8:\n date_canceled = self.get_next_date(date_in_review)\n estimate_model.mark_as_canceled(commit=True, date_canceled=date_canceled)\n\n def create_bill(self, date_draft: date):\n bill_model = self.entity_model.create_bill(\n vendor_model=choice(self.vendor_models),\n cash_account=choice(self.accounts_by_role[ASSET_CA_CASH]),\n prepaid_account=choice(self.accounts_by_role[ASSET_CA_PREPAID]),\n payable_account=choice(self.accounts_by_role[LIABILITY_CL_ACC_PAYABLE]),\n terms=choice(BillModel.TERM_CHOICES_VALID),\n date_draft=date_draft,\n additional_info=dict(),\n commit=True\n )\n\n self.logger.info(f'Creating entity bill {bill_model.bill_number}...')\n\n bill_items = [\n ItemTransactionModel(\n bill_model=bill_model,\n item_model=choice(self.expense_models),\n quantity=round(random() * randint(5, 15), 2),\n unit_cost=round(random() * randint(50, 100), 2),\n entity_unit=choice(self.entity_unit_models) if random() > .75 else None\n ) for _ in range(randint(1, 10))\n ]\n\n for bi in bill_items:\n bi.full_clean()\n\n bill_model.update_amount_due(itemtxs_qs=bill_items)\n bill_model.itemtransactionmodel_set.bulk_create(bill_items)\n bill_model.full_clean()\n bill_model.save()\n\n if random() > 0.25 and bill_model.amount_due:\n date_in_review = self.get_next_date(date_draft)\n bill_model.mark_as_review(commit=True, date_in_review=date_in_review)\n\n if random() > 0.50:\n date_approved = self.get_next_date(date_in_review)\n bill_model.mark_as_approved(commit=True,\n entity_slug=self.entity_model.slug,\n user_model=self.user_model,\n date_approved=date_approved)\n\n if random() > 0.25:\n paid_date = self.get_next_date(date_approved)\n bill_model.mark_as_paid(\n user_model=self.user_model,\n entity_slug=self.entity_model.slug,\n date_paid=paid_date,\n commit=True\n )\n elif random() > 0.8:\n void_date = self.get_next_date(date_approved)\n bill_model.mark_as_void(\n user_model=self.user_model,\n entity_slug=self.entity_model.slug,\n date_void=void_date,\n commit=True\n )\n elif random() > 0.8:\n canceled_date = self.get_next_date(date_in_review)\n bill_model.mark_as_canceled(date_canceled=canceled_date)\n\n def create_po(self, date_draft: date):\n\n po_model = self.entity_model.create_purchase_order(date_draft=date_draft)\n\n po_items = [\n ItemTransactionModel(\n po_model=po_model,\n item_model=choice(self.product_models),\n po_quantity=round(random() * randint(3, 10) + 3, 2),\n po_unit_cost=round(random() * randint(100, 800), 2),\n entity_unit=choice(self.entity_unit_models) if random() > .75 else None\n ) for _ in range(randint(1, 10))\n ]\n\n for poi in po_items:\n poi.full_clean()\n\n self.logger.info(f'Creating entity purchase order {po_model.po_number}...')\n po_items = po_model.itemtransactionmodel_set.bulk_create(po_items)\n po_model.update_state(itemtxs_qs=po_items)\n po_model.full_clean()\n po_model.save()\n\n # mark as approved...\n if random() > 0.25 and po_model.po_amount:\n date_review = self.get_next_date(date_draft)\n po_model.mark_as_review(commit=True, date_in_review=date_review)\n if random() > 0.5:\n date_approved = self.get_next_date(date_review)\n po_model.mark_as_approved(commit=True, date_approved=date_approved)\n if random() > 0.25:\n # add a PO bill...\n date_fulfilled = self.get_next_date(date_approved)\n date_bill_draft = date_fulfilled - timedelta(days=randint(1, 3))\n\n bill_model = self.entity_model.create_bill(\n vendor_model=choice(self.vendor_models),\n terms=choice(BillModel.TERM_CHOICES_VALID),\n date_draft=date_bill_draft,\n cash_account=choice(self.accounts_by_role[ASSET_CA_CASH]),\n prepaid_account=choice(self.accounts_by_role[ASSET_CA_PREPAID]),\n payable_account=choice(self.accounts_by_role[LIABILITY_CL_ACC_PAYABLE]),\n commit=True\n )\n\n for po_i in po_items:\n po_i.po_total_amount = round(po_i.po_total_amount, 2)\n po_i.total_amount = round(po_i.po_total_amount, 2)\n po_i.quantity = round(po_i.po_quantity, 2)\n po_i.unit_cost = round(po_i.po_unit_cost, 2)\n po_i.bill_model = bill_model\n po_i.po_item_status = ItemTransactionModel.STATUS_RECEIVED\n po_i.full_clean()\n\n bill_model.update_amount_due(itemtxs_qs=po_items)\n bill_model.full_clean()\n bill_model.update_state()\n bill_model.save()\n\n po_model.itemtransactionmodel_set.bulk_update(\n po_items,\n fields=[\n 'po_total_amount',\n 'total_amount',\n 'po_quantity',\n 'quantity',\n 'po_unit_cost',\n 'unit_cost',\n 'bill_model',\n 'po_item_status'\n ])\n\n if random() > 0.25:\n date_bill_review = self.get_next_date(date_bill_draft)\n bill_model.mark_as_review(commit=True, date_in_review=date_bill_review)\n if random() > 0.50:\n bill_approve_date = self.get_next_date(date_bill_review)\n bill_model.mark_as_approved(commit=True,\n entity_slug=self.entity_model.slug,\n user_model=self.user_model,\n date_approved=bill_approve_date)\n if random() > 0.25:\n bill_paid_date = self.get_next_date(bill_approve_date)\n bill_model.mark_as_paid(\n user_model=self.user_model,\n entity_slug=self.entity_model.slug,\n commit=True,\n date_paid=bill_paid_date)\n\n if random() > 0.20:\n for po_i in po_items:\n po_i.po_item_status = ItemTransactionModel.STATUS_RECEIVED\n po_i.po_item_status = ItemTransactionModel.STATUS_RECEIVED\n po_i.full_clean()\n\n # todo: can pass po items??..\n po_model.itemtransactionmodel_set.bulk_update(po_items,\n fields=[\n 'po_item_status',\n 'updated'\n ])\n po_model.mark_as_fulfilled(\n date_fulfilled=date_fulfilled,\n commit=True)\n\n self.entity_model.update_inventory(\n # user_model=self.user_model,\n commit=True)\n\n self.update_products()\n self.update_inventory()\n\n def create_invoice(self, date_draft: date):\n invoice_model = self.entity_model.create_invoice(\n customer_model=choice(self.customer_models),\n terms=choice(InvoiceModel.TERM_CHOICES_VALID),\n cash_account=choice(self.accounts_by_role[ASSET_CA_CASH]),\n prepaid_account=choice(self.accounts_by_role[ASSET_CA_RECEIVABLES]),\n payable_account=choice(self.accounts_by_role[LIABILITY_CL_DEFERRED_REVENUE]),\n date_draft=date_draft,\n additional_info=dict(),\n commit=True\n )\n self.logger.info(f'Creating entity invoice {invoice_model.invoice_number}...')\n\n invoice_items = list()\n\n for i in range(randint(1, 10)):\n item_model: ItemModel = choice(self.product_models)\n quantity = Decimal.from_float(round(random() * randint(1, 2), 2))\n entity_unit = choice(self.entity_unit_models) if random() > .75 else None\n margin = Decimal(random() + 3.5)\n avg_cost = item_model.get_average_cost()\n if item_model.is_product():\n if item_model.inventory_received is not None and item_model.inventory_received > 0.0:\n if quantity > item_model.inventory_received:\n quantity = item_model.inventory_received\n\n # reducing inventory qty...\n item_model.inventory_received -= quantity\n item_model.inventory_received_value -= avg_cost * quantity\n unit_cost = avg_cost * margin\n else:\n quantity = 0.0\n unit_cost = 0.0\n\n if all([\n quantity > 0.00,\n unit_cost > 0.00\n ]):\n itm = ItemTransactionModel(\n invoice_model=invoice_model,\n item_model=item_model,\n quantity=quantity,\n unit_cost=unit_cost,\n entity_unit=entity_unit\n )\n itm.full_clean()\n invoice_items.append(itm)\n\n invoice_items = invoice_model.itemtransactionmodel_set.bulk_create(invoice_items)\n invoice_model.update_amount_due(itemtxs_qs=invoice_items)\n invoice_model.full_clean()\n invoice_model.save()\n\n if random() > 0.25 and invoice_model.amount_due:\n date_review = self.get_next_date(date_draft)\n\n try:\n invoice_model.mark_as_review(commit=True, date_in_review=date_review)\n except InvoiceModelValidationError as e:\n # invoice cannot be marked as in review...\n return\n\n if random() > 0.50:\n date_approved = self.get_next_date(date_review)\n invoice_model.mark_as_approved(entity_slug=self.entity_model.slug,\n user_model=self.user_model,\n commit=True,\n date_approved=date_approved)\n if random() > 0.25:\n date_paid = self.get_next_date(date_approved)\n invoice_model.mark_as_paid(\n entity_slug=self.entity_model.slug,\n user_model=self.user_model,\n date_paid=date_paid,\n commit=True\n )\n self.entity_model.update_inventory(\n # user_model=self.user_model,\n commit=True\n )\n self.update_inventory()\n self.update_products()\n elif random() > 0.8:\n date_void = self.get_next_date(date_approved)\n invoice_model.mark_as_void(\n entity_slug=self.entity_model.slug,\n user_model=self.user_model,\n date_void=date_void,\n commit=True\n )\n elif random() > 0.8:\n date_canceled = self.get_next_date(date_review)\n invoice_model.mark_as_canceled(commit=True, date_canceled=date_canceled)\n\n def fund_entity(self):\n\n self.logger.info(f'Funding entity...')\n capital_acc = choice(self.accounts_by_role[EQUITY_CAPITAL])\n cash_acc = choice(self.bank_account_models).cash_account\n\n self.entity_model.deposit_capital(\n cash_account=cash_acc,\n capital_account=capital_acc,\n amount=self.capital_contribution,\n je_timestamp=self.start_date,\n je_posted=True,\n ledger_posted=True,\n description='Entity Funding for Sample Data',\n )\n\n def create_closing_entry(self):\n closing_date = self.start_date + timedelta(days=int(self.DAYS_FORWARD / 2))\n ce_model, ce_txs = self.entity_model.close_books_for_month(\n year=closing_date.year,\n month=closing_date.month\n )\n ce_model.mark_as_posted(commit=True)\n\n def recount_inventory(self):\n self.logger.info(f'Recounting inventory...')\n self.entity_model.update_inventory(\n # user_model=self.user_model,\n commit=True\n )"
},
{
"identifier": "EntityModel",
"path": "django_ledger/models/entity.py",
"snippet": "class EntityModel(EntityModelAbstract):\n \"\"\"\n Entity Model Base Class From Abstract\n \"\"\""
},
{
"identifier": "EntityModelQuerySet",
"path": "django_ledger/models/entity.py",
"snippet": "class EntityModelQuerySet(MP_NodeQuerySet):\n \"\"\"\n A custom defined EntityModel QuerySet.\n Inherits from the Materialized Path Node QuerySet Class from Django Treebeard.\n \"\"\"\n\n def hidden(self):\n \"\"\"\n A QuerySet of all hidden EntityModel.\n\n Returns\n -------\n EntityModelQuerySet\n A filtered QuerySet of hidden EntityModels only.\n \"\"\"\n return self.filter(hidden=True)\n\n def visible(self):\n \"\"\"\n A Queryset of all visible EntityModel.\n\n Returns\n -------\n EntityModelQuerySet\n A filtered QuerySet of visible EntityModels only.\n \"\"\"\n return self.filter(hidden=False)"
}
] | from datetime import date, timedelta
from decimal import Decimal
from itertools import cycle
from logging import getLogger, DEBUG
from random import randint, choice
from typing import Optional
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.test import TestCase
from django.test.client import Client
from django.utils.timezone import get_default_timezone
from django_ledger.io.data_generator import EntityDataGenerator
from django_ledger.models.entity import EntityModel, EntityModelQuerySet | 7,675 |
UserModel = get_user_model()
class DjangoLedgerBaseTest(TestCase):
FY_STARTS = None
CAPITAL_CONTRIBUTION = None
START_DATE = None
DAYS_FORWARD = 9 * 30
TX_QUANTITY = 50
user_model = None
TEST_DATA = list()
CLIENT = None
TZ = None
N = None
USER_EMAIL = None
PASSWORD = None
USERNAME = None
logger = None
accrual_cycle = cycle([True, False])
@classmethod
def setUpTestData(cls):
cls.logger = getLogger(__name__)
cls.logger.setLevel(level=DEBUG)
cls.USERNAME: str = 'testuser'
cls.PASSWORD: str = 'NeverUseThisPassword12345'
cls.USER_EMAIL: str = '[email protected]'
cls.N: int = 2
cls.DAYS_FWD: int = randint(180, 180 * 3)
cls.TZ = get_default_timezone()
cls.START_DATE = cls.get_random_date()
cls.CLIENT = Client(enforce_csrf_checks=False)
try:
cls.user_model = UserModel.objects.get(username=cls.USERNAME)
except ObjectDoesNotExist:
cls.user_model = UserModel.objects.create_user(
username=cls.USERNAME,
password=cls.PASSWORD,
email=cls.USER_EMAIL,
)
cls.FY_STARTS = list(str(i) for i in range(1, 13))
cls.TEST_DATA = list()
cls.CAPITAL_CONTRIBUTION = Decimal('50000.00')
cls.ENTITY_MODEL_QUERYSET: Optional[EntityModelQuerySet] = None
cls.create_entity_models(n=cls.N)
cls.populate_entity_models()
@classmethod
def get_random_date(cls) -> date:
return date(
year=choice(range(1990, 2020)),
month=choice(range(1, 13)),
day=choice(range(1, 28))
)
@classmethod
def login_client(cls):
# cls.logger.info('Logging in client...')
cls.CLIENT.login(
username=cls.USERNAME,
password=cls.PASSWORD
)
@classmethod
def logout_client(cls):
# cls.logger.info('Logging out client...')
cls.CLIENT.logout()
@classmethod
def refresh_test_data(cls, n: int = None):
N = n if n else cls.N
cls.TEST_DATA = [cls.get_random_entity_data() for _ in range(N)]
@classmethod
def get_random_entity_data(cls) -> dict:
return {
'slug': f'a-cool-slug-{randint(10000, 99999)}',
'name': f'Testing Inc-{randint(100000, 999999)}',
'address_1': f'{randint(100000, 999999)} Main St',
'address_2': f'Suite {randint(1000, 9999)}',
'city': 'Charlotte',
'state': 'NC',
'zip_code': '28202',
'country': 'US',
'email': '[email protected]',
'website': 'http://www.mytestingco.com',
'fy_start_month': choice(cls.FY_STARTS),
'admin': cls.user_model,
'accrual_method': next(cls.accrual_cycle)
}
|
UserModel = get_user_model()
class DjangoLedgerBaseTest(TestCase):
FY_STARTS = None
CAPITAL_CONTRIBUTION = None
START_DATE = None
DAYS_FORWARD = 9 * 30
TX_QUANTITY = 50
user_model = None
TEST_DATA = list()
CLIENT = None
TZ = None
N = None
USER_EMAIL = None
PASSWORD = None
USERNAME = None
logger = None
accrual_cycle = cycle([True, False])
@classmethod
def setUpTestData(cls):
cls.logger = getLogger(__name__)
cls.logger.setLevel(level=DEBUG)
cls.USERNAME: str = 'testuser'
cls.PASSWORD: str = 'NeverUseThisPassword12345'
cls.USER_EMAIL: str = '[email protected]'
cls.N: int = 2
cls.DAYS_FWD: int = randint(180, 180 * 3)
cls.TZ = get_default_timezone()
cls.START_DATE = cls.get_random_date()
cls.CLIENT = Client(enforce_csrf_checks=False)
try:
cls.user_model = UserModel.objects.get(username=cls.USERNAME)
except ObjectDoesNotExist:
cls.user_model = UserModel.objects.create_user(
username=cls.USERNAME,
password=cls.PASSWORD,
email=cls.USER_EMAIL,
)
cls.FY_STARTS = list(str(i) for i in range(1, 13))
cls.TEST_DATA = list()
cls.CAPITAL_CONTRIBUTION = Decimal('50000.00')
cls.ENTITY_MODEL_QUERYSET: Optional[EntityModelQuerySet] = None
cls.create_entity_models(n=cls.N)
cls.populate_entity_models()
@classmethod
def get_random_date(cls) -> date:
return date(
year=choice(range(1990, 2020)),
month=choice(range(1, 13)),
day=choice(range(1, 28))
)
@classmethod
def login_client(cls):
# cls.logger.info('Logging in client...')
cls.CLIENT.login(
username=cls.USERNAME,
password=cls.PASSWORD
)
@classmethod
def logout_client(cls):
# cls.logger.info('Logging out client...')
cls.CLIENT.logout()
@classmethod
def refresh_test_data(cls, n: int = None):
N = n if n else cls.N
cls.TEST_DATA = [cls.get_random_entity_data() for _ in range(N)]
@classmethod
def get_random_entity_data(cls) -> dict:
return {
'slug': f'a-cool-slug-{randint(10000, 99999)}',
'name': f'Testing Inc-{randint(100000, 999999)}',
'address_1': f'{randint(100000, 999999)} Main St',
'address_2': f'Suite {randint(1000, 9999)}',
'city': 'Charlotte',
'state': 'NC',
'zip_code': '28202',
'country': 'US',
'email': '[email protected]',
'website': 'http://www.mytestingco.com',
'fy_start_month': choice(cls.FY_STARTS),
'admin': cls.user_model,
'accrual_method': next(cls.accrual_cycle)
}
| def get_random_entity_model(self) -> EntityModel: | 1 | 2023-10-20 01:07:20+00:00 | 12k |
hitz-zentroa/This-is-not-a-Dataset | run.py | [
{
"identifier": "load_model",
"path": "load_model.py",
"snippet": "def load_model(\n inference: bool,\n model_weights_name_or_path: str,\n quantization: Optional[int] = None,\n use_lora: bool = False,\n lora_weights_name_or_path: Optional[str] = None,\n lora_target_modules: Optional[List[str]] = [\"all\"],\n lora_r: Optional[int] = 8,\n lora_alpha: Optional[int] = 16,\n lora_dropout: Optional[float] = 0.05,\n torch_dtype: Optional[str] = None,\n force_auto_device_map: bool = False,\n use_gradient_checkpointing: bool = False,\n trust_remote_code: bool = False,\n use_flash_attention: bool = False,\n use_better_transformer: bool = False,\n fsdp_training: bool = False,\n max_memory_MB: Optional[int] = None,\n) -> Tuple[PreTrainedModel, PreTrainedTokenizerBase]:\n \"\"\"\n Load any Decoder model for training.\n\n Args:\n inference (`bool`):\n Whether to load the model for inference or training. If set to `True`, the model will be loaded\n in evaluation mode. In this case, if use_lora is set to `True`, you must provide the path to the\n LoRA weights. Defaults to `False`.\n model_weights_name_or_path (`str`):\n The path to your local model weights and tokenizer or huggingface model name.\n The list of labels to add to the tokenizer. Defaults to `None`.\n quantization (`int`, optional):\n '4' or '8' for 4 bits or 8 bits quantization or None for 16/32bits training. Defaults to `None`.\n\n Requires bitsandbytes library: https://github.com/TimDettmers/bitsandbytes\n use_lora (`bool`, optional):\n Whether to use LORA. Defaults to False.\n\n See https://arxiv.org/pdf/2106.09685.pdf for more details.\n\n Requires huggingface PEFT library: https://github.com/huggingface/peft\n lora_weights_name_or_path (`Optional[str]`, optional):\n The name or path to the pre-trained LORA model weights. You can also provide\n a huggingface hub model name to load the weights from there. If not provided,\n the weights will be initialized randomly, this requires training the model.\n Defaults to `None`.\n lora_target_modules (`Optional[List[str]]`, optional):\n The list of modules to apply LORA to. If not provided, we will use PEFT\n default modules. Defaults to `None`.\n lora_r (`Optional[int]`, optional):\n Lora attention dimension. Defaults to `8`.\n lora_alpha (`Optional[int]`, optional):\n The alpha parameter for Lora scaling. Defaults to `16`.\n lora_dropout (`Optional[float]`, optional):\n The dropout probability for Lora layers. Defaults to 0.05.\n torch_dtype (`Optional[str]`, optional):\n Override the default `torch.dtype` and load the model under this dtype. If\n `auto` is passed, the dtype will be automatically derived from the model's\n weights. Defaults to `None`.\n force_auto_device_map (`bool`, optional):\n Whether to force the use of the auto device map. If set to True, the model will be split across\n GPUs and CPU to fit the model in memory. If set to False, a full copy of the model will be loaded\n into each GPU. Defaults to False.\n use_gradient_checkpointing (`bool`, optiona):\n Whether to use gradient checkpointing for training\n trust_remote_code (`bool`, optional):\n Trust the remote code from HuggingFace model hub. Defaults to False.\n use_flash_attention (`bool`, optional):\n Whether to use Flash Attention. Defaults to True. Flash attention must be installed, see:\n 'https://github.com/Dao-AILab/flash-attention' for more details.\n use_better_transformer (`bool`, optional):\n Whether to transform the model using Better Transformer library:\n https://huggingface.co/docs/optimum/bettertransformer/overview. Requires optimum\n 'https://huggingface.co/docs/optimum/installation'. Only supported for inference!\n Defaults to False.\n fsdp_training: (`bool`, optional):\n Whether Fully Sharded Data Parallelism is enabled for training. Defaults to False.\n Used to prevent casting layers to fp32 if the model is already in fp16, which causes\n an error: ValueError: Must flatten tensors with uniform dtype but got torch.float16 and torch.float32\n max_memory_MB (`int`):\n Free memory per gpu in MB. Used to compute the device map when force_auto_device_map is set to True.\n Raises:\n `ValueError`:\n is raised when `int8_quantization=True` but `use_lora=False`.\n\n Returns:\n `Tuple[PreTrainedModel, PreTrainedTokenizerBase]`:\n The loaded model and tokenizer.\n \"\"\"\n\n # Sanity checks\n\n if isinstance(quantization, str):\n quantization = int(quantization)\n assert (quantization is None) or (\n quantization in [4, 8]\n ), f\"Quantization must be 4 or 8, or None for FP32/FP16 training. You passed: {quantization}\"\n\n if not inference and quantization is not None and not use_lora:\n raise ValueError(\n \"'Quantization' == 4/8 is only supported with LoRA. If you want \"\n \"to train a 4/8bits quantified model, you must set `use_lora=True`. If you want to \"\n \"use a 4/8 bits optimizer, set `quantization=None` and choose a 4/8 bit optimizer using 'optim' \"\n \"argument (e.g 'adamw_bnb_8bit', 'lion_8bit', 'paged_adamw_8bit', ...).\"\n )\n\n if inference and use_lora and lora_weights_name_or_path is None:\n raise ValueError(\n \"You must provide the path to the LoRA weights when loading the model for inference.\"\n )\n\n if use_better_transformer and not inference:\n logging.warning(\n \"Better Transformer is only supported for inference. Better Transformers does not support \"\n \"attention mask for training, therefore it is not compatible with CoLLIE training. See \"\n \"https://huggingface.co/docs/optimum/bettertransformer/overview for more details. We will \"\n \"set use_better_transformer=False.\"\n )\n use_better_transformer = False\n\n if use_better_transformer and use_flash_attention:\n raise ValueError(\n \"You cannot use both Flash Attention and Better Transformer flags. Flash Attention is already part of\"\n \" Better Transformers, so you can just set use_better_transformer=True to use Flash Attention. The Flash\"\n \" Attention flag is intended for patching HuggingFace models.\"\n )\n\n if lora_weights_name_or_path is not None and not use_lora:\n logging.warning(\n \"You provided a path to LoRA weights but use_lora is set to False. We will set use_lora=True.\"\n )\n use_lora = True\n\n logging.info(f\"Loading model model from {model_weights_name_or_path}\")\n\n MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.update(\n {\n \"stablelm_epoch\": \"LlamaForCausalLM\",\n }\n )\n\n # Get the device map config\n\n device_map, max_memory = get_device_map(\n force_auto_device_map=force_auto_device_map,\n max_memory_MB=max_memory_MB,\n use_better_transformer=use_better_transformer,\n )\n\n # Load the model config\n\n if use_lora:\n config = AutoConfig.from_pretrained(\n model_weights_name_or_path,\n trust_remote_code=trust_remote_code,\n pretraining_tp=1, # Fix mat1 and mat2 shapes cannot be multiplied error with LLaMA-2\n # See https://github.com/huggingface/transformers/pull/24906\n )\n else:\n config = AutoConfig.from_pretrained(\n model_weights_name_or_path,\n trust_remote_code=trust_remote_code,\n )\n\n # Load the model tokenizer\n\n tokenizer: PreTrainedTokenizerBase = AutoTokenizer.from_pretrained(\n model_weights_name_or_path,\n add_eos_token=True,\n trust_remote_code=trust_remote_code,\n legacy=True, # This library was developed with the legacy tokenizer.\n # It might or might not work with the latest updates to the T5 tokenizers. So we set legacy=True to be safe.\n )\n\n if tokenizer.pad_token_id is None:\n if \"<|padding|>\" in tokenizer.get_vocab():\n # StabilityLM specific fix\n tokenizer.add_special_tokens({\"pad_token\": \"<|padding|>\"})\n elif tokenizer.unk_token is not None:\n logging.warning(\n \"Tokenizer does not have a pad token, we will use the unk token as pad token.\"\n )\n tokenizer.pad_token_id = tokenizer.unk_token_id\n else:\n logging.warning(\n \"Tokenizer does not have a pad token. We will use the eos token as pad token.\"\n )\n tokenizer.pad_token_id = tokenizer.eos_token_id\n\n # Load the model weights\n\n # Get the quantization config\n quant_args = {}\n torch_dtype = (\n torch_dtype if torch_dtype in [\"auto\", None] else getattr(torch, torch_dtype)\n )\n\n if quantization is not None:\n quant_args = (\n {\"load_in_4bit\": True} if quantization == 4 else {\"load_in_8bit\": True}\n )\n if quantization == 4:\n bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n if torch_dtype in [\"auto\", None]\n else torch_dtype,\n )\n\n else:\n bnb_config = BitsAndBytesConfig(\n load_in_8bit=True,\n )\n logging.info(\n f\"Bits and Bytes config: {json.dumps(bnb_config.to_dict(),indent=4,ensure_ascii=False)}\"\n )\n else:\n logging.info(f\"Loading model with dtype: {torch_dtype}\")\n bnb_config = None\n\n # Get the correct load function for each model_type\n if config.model_type in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES:\n logging.warning(\n f\"Model {model_weights_name_or_path} is a encoder-decoder model. We will load it as a Seq2SeqLM model.\"\n )\n\n load_fn = AutoModelForSeq2SeqLM\n model_type = \"seq2seq\"\n\n elif config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES:\n logging.warning(\n f\"Model {model_weights_name_or_path} is an decoder-only model. We will load it as a CausalLM model.\"\n )\n\n load_fn = AutoModelForCausalLM\n tokenizer.padding_side = \"left\"\n model_type = \"causal\"\n\n else:\n raise ValueError(\n f\"Model {model_weights_name_or_path} of type {config.model_type} is not supported by CoLLIE.\"\n \"Supported models are:\\n\"\n f\"Seq2SeqLM: {MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES}\\n\"\n f\"CausalLM: {MODEL_FOR_CAUSAL_LM_MAPPING_NAMES}\\n\"\n )\n\n # Load the model weights\n # Flash attention 2 was added to HuggingFace transformers very recently. Let's add it as kwargs to the load function\n # so if it is set to False, we can load the model in older versions of transformers.\n if use_flash_attention:\n kwargs = {\"use_flash_attention_2\": True}\n else:\n kwargs = {}\n\n model: PreTrainedModel = load_fn.from_pretrained(\n pretrained_model_name_or_path=model_weights_name_or_path,\n device_map=device_map,\n max_memory=max_memory,\n quantization_config=bnb_config,\n torch_dtype=torch_dtype,\n config=config,\n trust_remote_code=trust_remote_code,\n **quant_args,\n **kwargs,\n )\n\n logging.info(f\"Model dtype: {model.dtype}\")\n logging.info(\n \"Total model memory footprint: \"\n + str(model.get_memory_footprint() / 1e6)\n + \" MB\"\n )\n\n # Prepare the model for k-bit training and enable gradient checkpointing\n if quantization is not None and not inference:\n from peft import prepare_model_for_kbit_training\n\n model = prepare_model_for_kbit_training(\n model, use_gradient_checkpointing=use_gradient_checkpointing\n )\n else:\n if use_gradient_checkpointing and not inference:\n model.gradient_checkpointing_enable()\n\n # Load LoRA weights\n if use_lora:\n from peft import LoraConfig, PeftModel, TaskType, get_peft_model\n\n if not inference:\n model.enable_input_require_grads() # Enables the gradients for the input embeddings\n\n if lora_weights_name_or_path is None:\n logging.info(\n \"No pretrained LORA weights provided, we will initialize the weights randomly.\"\n )\n\n if lora_target_modules is None or (\n lora_target_modules is not None and len(lora_target_modules) == 0\n ):\n logging.warning(\n \"No target modules provided, will use the default modules for the\"\n \" model in huggingface PEFT library. \"\n )\n lora_target_modules = None\n\n if lora_target_modules == [\"all\"]:\n logging.warning(\n \"You provided 'all' as target modules, we will use all the model to which LoRA can be applied.\"\n )\n lora_target_modules = find_all_linear_names(\n model, quantization=quantization\n )\n\n lora_config = LoraConfig(\n r=lora_r,\n lora_alpha=lora_alpha,\n lora_dropout=lora_dropout,\n bias=\"none\",\n task_type=TaskType.CAUSAL_LM\n if model_type == \"causal\"\n else TaskType.SEQ_2_SEQ_LM,\n target_modules=lora_target_modules,\n )\n\n model = get_peft_model(model, lora_config)\n\n else:\n logging.info(\n f\"Loading pretrained LORA weights from {lora_weights_name_or_path}\"\n )\n\n model = PeftModel.from_pretrained(model, lora_weights_name_or_path)\n\n logging.info(f\"\\nLoRA config:\\n{model.peft_config}\\n\")\n\n if inference:\n if use_lora:\n if quantization is None:\n # If we are not using quantization, we merge the LoRA layers into the model for faster inference.\n # This is not possible if we are using 4/8 bit quantization.\n logging.info(\"Merging LoRA layers into the model for faster inference.\")\n model = model.merge_and_unload()\n else:\n logging.info(\n \"Quantization is enabled, we will not merge LoRA layers into the model. Inference will be slower.\"\n )\n else:\n trainable_params, total_params, trainable_percentage = get_trainable_parameters(\n model\n )\n logging.info(\n f\"---> Trainable params: {trainable_params} || all params: {total_params} ||\"\n f\" trainable%: {round(trainable_percentage,6)}\\n\"\n )\n\n return model, tokenizer"
},
{
"identifier": "get_dataloader",
"path": "dataset.py",
"snippet": "def get_dataloader(\n tokenizer: PreTrainedTokenizerBase,\n split: str,\n is_encoder_decoder: bool = False,\n max_length: int = 512,\n conv_template: str = None,\n batch_size: int = 1,\n prompt_loss_weight: float = 0.05,\n add_bos_token: bool = False,\n num_workers: int = min(8, os.cpu_count()),\n pattern: str = None,\n only_affirmative: bool = False,\n only_negative: bool = False,\n only_non_distractor: bool = False,\n only_distractor: bool = False,\n) -> DataLoader:\n \"\"\"\n Get a dataloader for a dataset.\n\n Args:\n tokenizer (`PreTrainedTokenizerBase`):\n The tokenizer to use.\n split ('list'):\n The split to load (train, dev, test, all).\n is_encoder_decoder (`bool`, optional):\n Whether the model is an encoder-decoder model. Defaults to `False`.\n max_length (`int`, optional):\n The maximum length of the input. Defaults to `2048`.\n conv_template (`str`, optional):\n The conversation template to use. Defaults to `None`. If `None` we will return the prompt.\n batch_size (`int`, optional):\n The batch size. Defaults to `1`.\n prompt_loss_weight (`float`, optional):\n The weight of the prompt tokens in the loss. If set to '0.05' the prompt tokens will have a total weight\n of 5% in the loss while the result tokens will have a total weight of 95%. Defaults to `0.05`.\n add_bos_token (`bool`, optional):\n Whether to add the beginning of sentence token to the input. Defaults to `False`.\n num_workers (`int`, optional):\n The number of workers to use for the dataloader. Defaults to `0`.\n pattern (`str`, optional):\n The pattern to use for training. Defaults to `None`.\n only_affirmative (`bool`, optional):\n Whether to only load affirmative examples for training. Defaults to `False`.\n only_negative (`bool`, optional):\n Whether to only load negative examples for training. Defaults to `False`.\n only_non_distractor (`bool`, optional):\n Whether to only load non-distractor examples for training. Defaults to `False`.\n only_distractor (`bool`, optional):\n Whether to only load distractor examples for training. Defaults to `False`.\n\n\n Returns:\n `DataLoader`: The dataloader.\n \"\"\"\n\n data_collator = DataCollatorForSeq2Seq(\n tokenizer,\n padding=True,\n label_pad_token_id=-100, # tokenizer.pad_token_id,\n # pad_to_multiple_of=8, # May be faster on some hardware\n )\n\n dataset = ThisIsNotADataset(\n tokenizer=tokenizer,\n split=split,\n is_encoder_decoder=is_encoder_decoder,\n max_length=max_length,\n conv_template=conv_template,\n prompt_loss_weight=prompt_loss_weight,\n add_bos_token=add_bos_token,\n pattern=pattern,\n only_affirmative=only_affirmative,\n only_negative=only_negative,\n only_non_distractor=only_non_distractor,\n only_distractor=only_distractor,\n )\n\n return DataLoader(\n dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=split == \"train\",\n collate_fn=data_collator,\n pin_memory=True,\n )"
},
{
"identifier": "evaluate",
"path": "evaluate.py",
"snippet": "def evaluate(predictions_path: str, output_path: Optional[str] = None) -> dict:\n \"\"\"\n Evaluate the predictions of a model\n Args:\n predictions_path: Path to the predictions file. It should be a jsonl with the fields: 'pattern_id',\n 'pattern', 'test_id', 'negation_type', 'semantic_type', 'syntactic_scope', 'isDistractor',\n 'label', 'sentence', 'prediction'\n output_path: Path to the output file. If None, the output will be printed to stdout\n Returns:\n A dictionary with the scores\n The scorer will output the following metrics:\n - **all_affirmations**: Accuracy of the model in affirmative sentences\n - **all_negations**: Accuracy of the model in negative sentences\n - **all**: (Overall) Accuracy of the model in all sentences\n - **input_affirmation**: Accuracy of the model in affirmative sentences without distractors\n - **input_negation**: Accuracy of the model in negative sentences without distractors\n - **distractor_affirmation**: Accuracy of the model in affirmative sentences with distractors\n - **distractor_negation**: Accuracy of the model in negative sentences with distractors\n - **Negation_analysis**: Fine-grained analysis of the model in negative sentences (verbal, analytic,\n clausal, non_verbal, synthetic, subclausal negation types)\n - **Synonymy1, Hypernymy, Part...**: Fine-grained analysis of the model in each pattern\n \"\"\"\n\n print(\n \"\"\"\n*************************************** Running evaluation ***************************************\nThe scorer will output the following metrics:\n - **all_affirmations**: Accuracy of the model in affirmative sentences\n - **all_negations**: Accuracy of the model in negative sentences\n - **all**: (Overall) Accuracy of the model in all sentences\n - **input_affirmation**: Accuracy of the model in affirmative sentences without distractors\n - **input_negation**: Accuracy of the model in negative sentences without distractors\n - **distractor_affirmation**: Accuracy of the model in affirmative sentences with distractors\n - **distractor_negation**: Accuracy of the model in negative sentences with distractors\n - **Negation_analysis**: Fine-grained analysis of the model in negative sentences (verbal, analytic,\n clausal, non_verbal, synthetic, subclausal negation types)\n - **Synonymy1, Hypernymy, Part...**: Fine-grained analysis of the model in each pattern\n**************************************************************************************************\n \"\"\"\n )\n dataset_pattern = {\n \"Synonymy1\": [],\n \"Antonymy1\": [],\n \"Synonymy2\": [],\n \"Antonymy2\": [],\n \"Hypernymy\": [],\n \"Part\": [],\n \"Substance\": [],\n \"Member\": [],\n \"Agent\": [],\n \"Instrument\": [],\n \"Result\": [],\n }\n\n scorer = Scorer()\n coherence_scorer = Coherence_Scorer()\n\n coherence_scorer.from_file(predictions_path)\n with open(predictions_path, \"r\", encoding=\"utf8\") as file:\n for line in file:\n example = json.loads(line.strip())\n pattern = example[\"pattern\"]\n dataset_pattern[pattern].append(example)\n scorer.add_example(\n negation_type=example[\"negation_type\"],\n semantic_type=example[\"semantic_type\"],\n syntactic_scope=example[\"syntactic_scope\"],\n isDistractor=example[\"isDistractor\"],\n gold_label=example[\"label\"],\n predicted_label=example[\"prediction\"],\n )\n\n scores = scorer.compute_scores()\n coherence_scorer = Coherence_Scorer.from_file(predictions_path)\n scores[\"coherence_scores\"] = coherence_scorer.compute_scores()\n\n for pattern in dataset_pattern:\n scorer = Scorer()\n coherence_scorer = Coherence_Scorer()\n coherence_scorer.add_pattern(dataset_pattern[pattern])\n for example in dataset_pattern[pattern]:\n scorer.add_example(\n negation_type=example[\"negation_type\"],\n semantic_type=example[\"semantic_type\"],\n syntactic_scope=example[\"syntactic_scope\"],\n isDistractor=example[\"isDistractor\"],\n gold_label=example[\"label\"],\n predicted_label=example[\"prediction\"],\n )\n scores[pattern] = scorer.compute_scores()\n scores[pattern][\"coherence_scores\"] = coherence_scorer.compute_scores()\n\n if output_path is not None:\n print(f\"Saving scores to {output_path}\")\n with open(output_path, \"w\", encoding=\"utf8\") as file:\n print(json.dumps(scores, ensure_ascii=False, indent=4), file=file)\n else:\n print(json.dumps(scores, ensure_ascii=False, indent=4))\n\n print(\"*** Evaluation finished ***\")\n return scores"
},
{
"identifier": "DataTrainingArguments",
"path": "config.py",
"snippet": "class DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n do_predict_full_dataset: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to run predictions on the full dataset. If True, the model will be evaluated on the \"\n \"full dataset. If False, the model will be evaluated on the test set. Defaults to False.\"\n },\n )\n max_seq_length: int = field(\n default=512,\n metadata={\n \"help\": (\n \"The maximum total input sequence length after tokenization. Sequences\"\n \" longer than this will be truncated, sequences shorter will be padded.\"\n )\n },\n )\n\n prompt_loss_weight: float = field(\n default=0.05,\n metadata={\n \"help\": (\n \"The weight of the prompt tokens in the loss. If set to '0.05' the prompt tokens will have a total\"\n \" weight of 5% in the loss while the result tokens will have a total weight of 95%. Only used for\"\n \" computing the loss in the training data. Defaults to `0.05`.\"\n )\n },\n )\n\n force_auto_device_map: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to force the use of the auto device map. If set to True, the model will be split across \"\n \"GPUs and CPU to fit the model in memory. If set to False, a full copy of the model will be loaded \"\n \"into each GPU. Defaults to False.\"\n )\n },\n )\n\n pattern: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"The pattern to use for training. If not specified, all patterns will be used.\"\n ),\n \"choices\": [\n \"Synonymy1\",\n \"Antonymy1\",\n \"Synonymy2\",\n \"Antonymy2\",\n \"Hypernymy\",\n \"Part\",\n \"Substance\",\n \"Member\",\n \"Agent\",\n \"Instrument\",\n \"Result\",\n ],\n },\n )\n\n only_affirmative: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load affirmative examples for training. Defaults to `False`.\"\n )\n },\n )\n\n only_negative: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load negative examples for training. Defaults to `False`.\"\n )\n },\n )\n\n only_non_distractor: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load non-distractor examples for training. Defaults to `False`.\"\n )\n },\n )\n\n only_distractor: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load distractor examples for training. Defaults to `False`.\"\n )\n },\n )"
},
{
"identifier": "ModelArguments",
"path": "config.py",
"snippet": "class ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.\n \"\"\"\n\n model_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The local path or huggingface hub name of the model and tokenizer to use.\"\n },\n )\n\n torch_dtype: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"Override the default `torch.dtype` and load the model under this\"\n \" dtype. If `auto` is passed, the dtype will be automatically derived\"\n \" from the model's weights.\"\n ),\n \"choices\": [\"auto\", \"bfloat16\", \"float16\", \"float32\"],\n },\n )\n\n use_lora: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to use LoRA. If True, the model will be trained with LoRA: https://arxiv.org/abs/2106.09685\"\n )\n },\n )\n\n quantization: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"Whether to use '4' or '8' bit quantization. Requires bitsandbytes library:\"\n \" https://github.com/TimDettmers/bitsandbytes\"\n )\n },\n )\n lora_weights_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"If the model has been trained with LoRA, \"\n \"path or huggingface hub name or local path to the pretrained weights.\"\n )\n },\n )\n\n lora_r: Optional[int] = field(\n default=8,\n metadata={\"help\": \"Lora attention dimension.\"},\n )\n\n lora_alpha: Optional[float] = field(\n default=16,\n metadata={\"help\": \"The alpha parameter for Lora scaling.\"},\n )\n lora_dropout: Optional[float] = field(\n default=0.05,\n metadata={\"help\": \"The dropout probability for Lora layers.\"},\n )\n\n lora_target_modules: Optional[List[str]] = field(\n default_factory=list,\n metadata={\n \"help\": (\n \"The target modules to which LoRA will be applied. If not specified, We\"\n \" will use the default modules for the model in huggingface PEFT library.\"\n )\n },\n )\n\n conversation_template: str = field(\n default=None,\n metadata={\n \"help\": (\n \"The config template to use to generate conversations. See \"\n \"https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py for more details\"\n )\n },\n )\n\n add_bos_token: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to add the BOS token to the beginning of the prompt (Encoder-only models). Defaults to False.\"\n )\n },\n )\n\n use_flash_attention: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to use the FlashAttention. If True, we will use FlashAttention. Be careful, not all models \"\n \"support FlashAttention. See https://github.com/huggingface/transformers/issues/26350. \"\n \"Defaults to False.\"\n )\n },\n )"
},
{
"identifier": "get_optimizer",
"path": "optimizer.py",
"snippet": "def get_optimizer(training_args: Seq2SeqTrainingArguments, model: PreTrainedModel):\n decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)\n decay_parameters = [name for name in decay_parameters if \"bias\" not in name]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if (n in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": training_args.weight_decay,\n },\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if (n not in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n\n optimizer_kwargs = {\"lr\": training_args.learning_rate}\n\n adam_kwargs = {\n \"betas\": (training_args.adam_beta1, training_args.adam_beta2),\n \"eps\": training_args.adam_epsilon,\n }\n if training_args.optim == OptimizerNames.ADAFACTOR:\n from transformers.optimization import Adafactor\n\n optimizer_cls = Adafactor\n optimizer_kwargs.update({\"scale_parameter\": False, \"relative_step\": False})\n elif training_args.optim == OptimizerNames.ADAMW_HF:\n from transformers.optimization import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n elif training_args.optim in [\n OptimizerNames.ADAMW_TORCH,\n OptimizerNames.ADAMW_TORCH_FUSED,\n ]:\n from torch.optim import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n if training_args.optim == OptimizerNames.ADAMW_TORCH_FUSED:\n optimizer_kwargs.update({\"fused\": True})\n elif training_args.optim == OptimizerNames.ADAMW_TORCH_XLA:\n try:\n from torch_xla.amp.syncfree import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\"Trainer failed to import syncfree AdamW from torch_xla.\")\n elif training_args.optim == OptimizerNames.ADAMW_APEX_FUSED:\n try:\n from apex.optimizers import FusedAdam\n\n optimizer_cls = FusedAdam\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\n \"Trainer tried to instantiate apex FusedAdam but apex is not installed!\"\n )\n elif training_args.optim in [\n OptimizerNames.ADAMW_BNB,\n OptimizerNames.ADAMW_8BIT,\n OptimizerNames.PAGED_ADAMW,\n OptimizerNames.PAGED_ADAMW_8BIT,\n OptimizerNames.LION,\n OptimizerNames.LION_8BIT,\n OptimizerNames.PAGED_LION,\n OptimizerNames.PAGED_LION_8BIT,\n ]:\n try:\n from bitsandbytes.optim import AdamW, Lion\n\n is_paged = False\n optim_bits = 32\n optimizer_cls = None\n additional_optim_kwargs = adam_kwargs\n if \"paged\" in training_args.optim:\n is_paged = True\n if \"8bit\" in training_args.optim:\n optim_bits = 8\n if \"adam\" in training_args.optim:\n optimizer_cls = AdamW\n elif \"lion\" in training_args.optim:\n optimizer_cls = Lion\n additional_optim_kwargs = {\n \"betas\": (training_args.adam_beta1, training_args.adam_beta2)\n }\n\n bnb_kwargs = {\"is_paged\": is_paged, \"optim_bits\": optim_bits}\n optimizer_kwargs.update(additional_optim_kwargs)\n optimizer_kwargs.update(bnb_kwargs)\n except ImportError:\n raise ValueError(\n \"Trainer tried to instantiate bnb optimizer but bnb is not installed!\"\n )\n elif training_args.optim == OptimizerNames.ADAMW_BNB:\n try:\n from bitsandbytes.optim import Adam8bit\n\n optimizer_cls = Adam8bit\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\n \"Trainer tried to instantiate bnb Adam8bit but bnb is not installed!\"\n )\n elif training_args.optim == OptimizerNames.ADAMW_ANYPRECISION:\n raise NotImplementedError(\"AdamWAnyprecision is not supported\")\n elif training_args.optim == OptimizerNames.SGD:\n optimizer_cls = torch.optim.SGD\n elif training_args.optim == OptimizerNames.ADAGRAD:\n optimizer_cls = torch.optim.Adagrad\n else:\n raise ValueError(\n f\"Trainer cannot instantiate unsupported optimizer: {training_args.optim}\"\n )\n\n optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)\n if optimizer_cls.__name__ == \"Adam8bit\":\n import bitsandbytes\n\n manager = bitsandbytes.optim.GlobalOptimManager.get_instance()\n\n skipped = 0\n for module in model.modules():\n if isinstance(module, nn.Embedding):\n skipped += sum(\n {p.data_ptr(): p.numel() for p in module.parameters()}.values()\n )\n print(f\"skipped {module}: {skipped / 2 ** 20}M params\")\n manager.register_module_override(module, \"weight\", {\"optim_bits\": 32})\n print(f\"bitsandbytes: will optimize {module} in fp32\")\n print(f\"skipped: {skipped / 2 ** 20}M params\")\n\n return optimizer"
}
] | from load_model import load_model
from dataset import get_dataloader
from evaluate import evaluate
from config import DataTrainingArguments, ModelArguments
from transformers import (
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
get_scheduler,
)
from tqdm import tqdm
from accelerate import Accelerator, find_executable_batch_size
from typing import List
from optimizer import get_optimizer
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from transformers.modeling_utils import unwrap_model
import torch
import os
import wandb
import gc
import json
import math
import sys
import logging | 10,305 | skip_special_tokens=False,
clean_up_tokenization_spaces=False,
)
)
print(f"*** Sample of batch 0 ***")
print(f"-- Model inputs --\n{model_inputs}")
print(f"*** End of sample ***\n")
first = False
if not predict_with_generate:
if not model.config.is_encoder_decoder:
logits = model(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
).logits
else:
encoder_output = model.get_encoder()(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
)
decoder_args = {
"attention_mask": batch["attention_mask"],
"use_cache": False,
"encoder_outputs": encoder_output,
}
gen_inputs = model.prepare_inputs_for_generation(
input_ids=torch.tensor(
[[tokenizer.pad_token_id]] * len(batch["input_ids"])
).to(batch["input_ids"].device),
**decoder_args,
)
logits = model(
**gen_inputs,
).logits
logits = logits[:, -1, :]
logits = torch.nn.functional.softmax(logits, dim=-1)
logits = logits[:, [yes_id, no_id]]
logits = logits[:, 0] / (logits[:, 0] + logits[:, 1])
preds = logits > 0.5
preds = accelerator.gather(preds).cpu().tolist()
logits = accelerator.gather(logits).cpu().tolist()
if accelerator.is_local_main_process:
if accelerator.num_processes > 1:
# Remove duplicated in last batch if we are in a distributed setting
if step == len(dataloader) - 1:
preds = preds[: (len(dataloader.dataset) - samples_seen)]
logits = logits[: (len(dataloader.dataset) - samples_seen)]
else:
samples_seen += len(batch)
all_preds.extend(preds)
all_scores.extend(logits)
else:
preds = model.generate(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
max_new_tokens=6,
)
preds = accelerator.gather(
accelerator.pad_across_processes(
preds,
dim=1,
pad_index=tokenizer.pad_token_id,
)
).cpu()
inputs_ids = accelerator.gather(
accelerator.pad_across_processes(
batch["input_ids"],
dim=1,
pad_index=tokenizer.pad_token_id,
)
).cpu()
preds = preds[:, len(inputs_ids[0]) :]
if accelerator.is_local_main_process:
if accelerator.num_processes > 1:
# Remove duplicated in last batch if we are in a distributed setting
if step == len(dataloader) - 1:
preds = preds[: (len(dataloader.dataset) - samples_seen)]
else:
samples_seen += len(batch)
preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# print(preds)
for pred in preds:
pred = pred.lower()
if "true" in pred:
all_preds.append(True)
else:
all_preds.append(False)
if accelerator.is_local_main_process:
with open(output_path, "w", encoding="utf8") as f:
for pred in all_preds if not return_scores else all_scores:
print(pred, file=f)
if not return_scores:
json_dataset = dataloader.dataset.get_jsonl()
assert len(json_dataset) == len(all_preds)
with open(
os.path.splitext(output_path)[0] + ".jsonl", "w", encoding="utf8"
) as f:
for json_line, pred in zip(json_dataset, all_preds):
json_line["prediction"] = bool(pred)
print(json.dumps(json_line, ensure_ascii=False), file=f)
model.train()
def main(
model_args: ModelArguments,
|
def clean_cache():
"""Clean cache to avoid memory leak.
This fixes this issue: https://github.com/huggingface/transformers/issues/22801"""
print(f"Cleaning GPU memory. Current memory usage: {torch.cuda.memory_allocated()}")
torch.cuda.empty_cache()
gc.collect()
torch.cuda.empty_cache()
print(f"GPU memory usage after cleaning: {torch.cuda.memory_allocated()}")
def compute_loss(model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if "labels" in inputs:
labels = inputs.pop("labels")
else:
raise ValueError("You should supply a labels key to compute the loss")
if "loss_weight_mask" in inputs:
loss_weight_mask = inputs.pop("loss_weight_mask")
else:
raise ValueError("You should supply a loss_weight_mask key to compute the loss")
if unwrap_model(model).config.is_encoder_decoder:
outputs = model(labels=labels, **inputs)
else:
outputs = model(**inputs)
logits = outputs["logits"] if isinstance(outputs, dict) else outputs[0]
model_name = unwrap_model(model)._get_name()
if (
model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values()
or model_name == "PeftModelForCausalLM"
):
logits = logits[..., :-1, :].contiguous()
labels = labels[..., 1:].contiguous()
loss_weight_mask = loss_weight_mask[..., 1:].contiguous()
logits = logits.view(-1, logits.size(-1))
labels = labels.view(-1)
loss_weight_mask = loss_weight_mask.view(-1)
loss_fct = torch.nn.CrossEntropyLoss(reduction="none", ignore_index=-100)
loss = loss_fct(logits, labels)
loss = torch.sum(loss * loss_weight_mask) / torch.sum(loss_weight_mask)
return (loss, outputs) if return_outputs else loss
def gen_predictions(
model,
tokenizer,
true_tokens_ids: List[int],
false_tokens_ids: List[int],
dataloader,
output_path,
accelerator,
print_first=False,
predict_with_generate=False,
return_scores=False,
):
if predict_with_generate and return_scores:
raise ValueError(
"return_scores is not supported when predict_with_generate is True"
)
model.eval()
with torch.no_grad():
samples_seen: int = 0
yes_id = true_tokens_ids[0]
no_id = false_tokens_ids[0]
all_preds = []
all_scores = []
first = True
for step, batch in enumerate(
tqdm(dataloader, f"Inference on {os.path.basename(output_path)}")
):
if print_first and accelerator.is_local_main_process:
### DEBUG ###
if print_first and first and accelerator.is_main_process:
decodeable_inputs = batch.input_ids.clone()
decodeable_inputs[
decodeable_inputs == -100
] = tokenizer.pad_token_id
model_inputs = "\n".join(
tokenizer.batch_decode(
decodeable_inputs,
skip_special_tokens=False,
clean_up_tokenization_spaces=False,
)
)
print(f"*** Sample of batch 0 ***")
print(f"-- Model inputs --\n{model_inputs}")
print(f"*** End of sample ***\n")
first = False
if not predict_with_generate:
if not model.config.is_encoder_decoder:
logits = model(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
).logits
else:
encoder_output = model.get_encoder()(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
)
decoder_args = {
"attention_mask": batch["attention_mask"],
"use_cache": False,
"encoder_outputs": encoder_output,
}
gen_inputs = model.prepare_inputs_for_generation(
input_ids=torch.tensor(
[[tokenizer.pad_token_id]] * len(batch["input_ids"])
).to(batch["input_ids"].device),
**decoder_args,
)
logits = model(
**gen_inputs,
).logits
logits = logits[:, -1, :]
logits = torch.nn.functional.softmax(logits, dim=-1)
logits = logits[:, [yes_id, no_id]]
logits = logits[:, 0] / (logits[:, 0] + logits[:, 1])
preds = logits > 0.5
preds = accelerator.gather(preds).cpu().tolist()
logits = accelerator.gather(logits).cpu().tolist()
if accelerator.is_local_main_process:
if accelerator.num_processes > 1:
# Remove duplicated in last batch if we are in a distributed setting
if step == len(dataloader) - 1:
preds = preds[: (len(dataloader.dataset) - samples_seen)]
logits = logits[: (len(dataloader.dataset) - samples_seen)]
else:
samples_seen += len(batch)
all_preds.extend(preds)
all_scores.extend(logits)
else:
preds = model.generate(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
max_new_tokens=6,
)
preds = accelerator.gather(
accelerator.pad_across_processes(
preds,
dim=1,
pad_index=tokenizer.pad_token_id,
)
).cpu()
inputs_ids = accelerator.gather(
accelerator.pad_across_processes(
batch["input_ids"],
dim=1,
pad_index=tokenizer.pad_token_id,
)
).cpu()
preds = preds[:, len(inputs_ids[0]) :]
if accelerator.is_local_main_process:
if accelerator.num_processes > 1:
# Remove duplicated in last batch if we are in a distributed setting
if step == len(dataloader) - 1:
preds = preds[: (len(dataloader.dataset) - samples_seen)]
else:
samples_seen += len(batch)
preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# print(preds)
for pred in preds:
pred = pred.lower()
if "true" in pred:
all_preds.append(True)
else:
all_preds.append(False)
if accelerator.is_local_main_process:
with open(output_path, "w", encoding="utf8") as f:
for pred in all_preds if not return_scores else all_scores:
print(pred, file=f)
if not return_scores:
json_dataset = dataloader.dataset.get_jsonl()
assert len(json_dataset) == len(all_preds)
with open(
os.path.splitext(output_path)[0] + ".jsonl", "w", encoding="utf8"
) as f:
for json_line, pred in zip(json_dataset, all_preds):
json_line["prediction"] = bool(pred)
print(json.dumps(json_line, ensure_ascii=False), file=f)
model.train()
def main(
model_args: ModelArguments, | data_args: DataTrainingArguments, | 3 | 2023-10-18 10:24:48+00:00 | 12k |
SKYeve/Transcript-Combiner | pull_notes.py | [
{
"identifier": "YoudaoNoteConvert",
"path": "convert.py",
"snippet": "class YoudaoNoteConvert(object):\n \"\"\"\n 有道云笔记 xml或者json 内容转换为 markdown 内容\n \"\"\"\n\n @staticmethod\n def covert_html_to_markdown(file_path) -> str:\n \"\"\"\n 转换 HTML 为 MarkDown\n :param file_path:\n :return:\n \"\"\"\n with open(file_path, 'rb') as f:\n content_str = f.read().decode('utf-8')\n from markdownify import markdownify as md\n # 如果换行符丢失,使用 md(content_str.replace('<br>', '<br><br>').replace('</div>', '</div><br><br>')).rstrip()\n new_content = md(content_str)\n base = os.path.splitext(file_path)[0]\n new_file_path = ''.join([base, MARKDOWN_SUFFIX])\n os.rename(file_path, new_file_path)\n with open(new_file_path, 'wb') as f:\n f.write(new_content.encode())\n return new_file_path\n\n @staticmethod\n def covert_xml_to_markdown_content(file_path):\n # 使用 xml.etree.ElementTree 将 xml 文件转换为对象\n element_tree = ET.parse(file_path)\n note_element = element_tree.getroot() # note Element\n\n # list_item 的 id 与 type 的对应\n list_item = {}\n for child in note_element[0]:\n if 'list' in child.tag:\n list_item[child.attrib['id']] = child.attrib['type']\n\n body_element = note_element[1] # Element\n new_content_list = []\n for element in list(body_element):\n text = XmlElementConvert.get_text_by_key(list(element))\n name = element.tag.replace('{http://note.youdao.com}', '').replace('-', '_')\n convert_func = getattr(XmlElementConvert, 'convert_{}_func'.format(name), None)\n # 如果没有转换,只保留文字\n if not convert_func:\n new_content_list.append(text)\n continue\n line_content = convert_func(text=text, element=element, list_item=list_item)\n new_content_list.append(line_content)\n return f'\\r\\n\\r\\n'.join(new_content_list) # 换行 1 行\n\n @staticmethod\n def covert_xml_to_markdown(file_path) -> str:\n \"\"\"\n 转换 XML 为 MarkDown\n :param file_path:\n :return:\n \"\"\"\n base = os.path.splitext(file_path)[0]\n new_file_path = ''.join([base, MARKDOWN_SUFFIX])\n # 如果文件为空,结束\n if os.path.getsize(file_path) == 0:\n os.rename(file_path, new_file_path)\n return False\n\n new_content = YoudaoNoteConvert.covert_xml_to_markdown_content(file_path)\n with open(new_file_path, 'wb') as f:\n f.write(new_content.encode('utf-8'))\n # 删除旧文件\n if os.path.exists(file_path):\n os.remove(file_path)\n return new_file_path\n \n \n @staticmethod\n def covert_json_to_markdown_content(file_path):\n new_content_list = []\n # 加载json文件\n with open(file_path, 'r', encoding='utf-8') as f:\n json_data = json.load(f)\n \n json_contents = json_data['5']\n for content in json_contents:\n type = content.get('6')\n # 根据类型处理,无类型的为普通文本\n if type:\n convert_func = getattr(jsonConvert(), 'convert_{}_func'.format(type), None)\n # 如果没有转换,显示错误\n if not convert_func:\n # line_content = f\"该类型{type},不支持转换!!!\"\n line_content = jsonConvert().convert_text_func(content)\n else:\n line_content = convert_func(content)\n else:\n line_content = jsonConvert().convert_text_func(content)\n \n # 判断是否有内容\n if line_content:\n new_content_list.append(line_content)\n return f'\\r\\n\\r\\n'.join(new_content_list) # 换行 1 行\n\n @staticmethod\n def covert_json_to_markdown(file_path) -> str:\n \"\"\"\n 转换 Json 为 MarkDown\n :param file_path:\n :return:\n \"\"\"\n base = os.path.splitext(file_path)[0]\n new_file_path = ''.join([base, MARKDOWN_SUFFIX])\n # 如果文件为空,结束\n if os.path.getsize(file_path) == 0:\n os.rename(file_path, new_file_path)\n return False\n new_content = YoudaoNoteConvert.covert_json_to_markdown_content(file_path)\n with open(new_file_path, 'wb') as f:\n f.write(new_content.encode('utf-8'))\n # 删除旧文件\n if os.path.exists(file_path):\n os.remove(file_path)\n return new_file_path\n \n \n @staticmethod\n def markdown_filter(file_path):\n filter_list = [' ']\n with open(file_path, 'r', encoding='utf-8') as f:\n content = f.read()\n \n for filter_text in filter_list:\n new_content = content.replace(filter_text,'')\n \n with open(file_path, 'wb') as f:\n f.write(new_content.encode('utf-8')) "
},
{
"identifier": "YoudaoNoteApi",
"path": "youDaoNoteApi.py",
"snippet": "class YoudaoNoteApi(object):\r\n \"\"\"\r\n 有道云笔记 API 封装\r\n 原理:https://depp.wang/2020/06/11/how-to-find-the-api-of-a-website-eg-note-youdao-com/\r\n \"\"\"\r\n\r\n ROOT_ID_URL = 'https://note.youdao.com/yws/api/personal/file?method=getByPath&keyfrom=web&cstk={cstk}'\r\n DIR_MES_URL = 'https://note.youdao.com/yws/api/personal/file/{dir_id}?all=true&f=true&len=1000&sort=1' \\\r\n '&isReverse=false&method=listPageByParentId&keyfrom=web&cstk={cstk}'\r\n FILE_URL = 'https://note.youdao.com/yws/api/personal/sync?method=download&_system=macos&_systemVersion=&' \\\r\n '_screenWidth=1280&_screenHeight=800&_appName=ynote&_appuser=0123456789abcdeffedcba9876543210&' \\\r\n '_vendor=official-website&_launch=16&_firstTime=&_deviceId=0123456789abcdef&_platform=web&' \\\r\n '_cityCode=110000&_cityName=&sev=j1&keyfrom=web&cstk={cstk}'\r\n\r\n def __init__(self, cookies_path=None):\r\n \"\"\"\r\n 初始化\r\n :param cookies_path:\r\n \"\"\"\r\n self.session = requests.session() # 使用 session 维持有道云笔记的登陆状态\r\n self.session.headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '\r\n 'Chrome/100.0.4896.88 Safari/537.36',\r\n 'Accept': '*/*',\r\n 'Accept-Encoding': 'gzip, deflate',\r\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\r\n 'sec-ch-ua': '\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"100\", \"Google Chrome\";v=\"100\"',\r\n 'sec-ch-ua-mobile': '?0',\r\n 'sec-ch-ua-platform': '\"macOS\"',\r\n }\r\n\r\n self.cookies_path = cookies_path if cookies_path else 'cookies.json'\r\n self.cstk = None\r\n\r\n def login_by_cookies(self) -> str:\r\n \"\"\"\r\n 使用 Cookies 登录,其实就是设置 Session 的 Cookies\r\n :return: error_msg\r\n \"\"\"\r\n try:\r\n cookies = self._covert_cookies()\r\n except Exception as err:\r\n return format(err)\r\n for cookie in cookies:\r\n self.session.cookies.set(name=cookie[0], value=cookie[1], domain=cookie[2], path=cookie[3])\r\n self.cstk = cookies[0][1] if cookies[0][0] == 'YNOTE_CSTK' else None # cstk 用于请求时接口验证\r\n if not self.cstk:\r\n return 'YNOTE_CSTK 字段为空'\r\n print('本次使用 Cookies 登录')\r\n\r\n def _covert_cookies(self) -> list:\r\n \"\"\"\r\n 读取 cookies 文件的 cookies,并转换为字典\r\n :return: cookies\r\n \"\"\"\r\n with open(self.cookies_path, 'rb') as f:\r\n json_str = f.read().decode('utf-8')\r\n\r\n try:\r\n cookies_dict = json.loads(json_str) # 将字符串转换为字典\r\n cookies = cookies_dict['cookies']\r\n except Exception:\r\n raise Exception('转换「{}」为字典时出现错误'.format(self.cookies_path))\r\n return cookies\r\n\r\n def http_post(self, url, data=None, files=None):\r\n \"\"\"\r\n 封装 post 请求\r\n :param url:\r\n :param data:\r\n :param files:\r\n :return: response\r\n \"\"\"\r\n return self.session.post(url, data=data, files=files)\r\n\r\n def http_get(self, url):\r\n \"\"\"\r\n 封装 get 请求\r\n :param url:\r\n :return: response\r\n \"\"\"\r\n return self.session.get(url)\r\n\r\n def get_root_dir_info_id(self) -> dict:\r\n \"\"\"\r\n 获取有道云笔记根目录信息\r\n :return: {\r\n 'fileEntry': {'id': 'test_root_id', 'name': 'ROOT', ...},\r\n ...\r\n }\r\n \"\"\"\r\n data = {'path': '/', 'entire': 'true', 'purge': 'false', 'cstk': self.cstk}\r\n return self.http_post(self.ROOT_ID_URL.format(cstk=self.cstk), data=data).json()\r\n\r\n def get_dir_info_by_id(self, dir_id) -> dict:\r\n \"\"\"\r\n 根据目录 ID 获取目录下所有文件信息\r\n :return: {\r\n 'count': 3,\r\n 'entries': [\r\n {'fileEntry': {'id': 'test_dir_id', 'name': 'test_dir', 'dir': true, ...}},\r\n {'fileEntry': {'id': 'test_note_id', 'name': 'test_note', 'dir': false, ...}}\r\n ...\r\n ]\r\n }\r\n \"\"\"\r\n url = self.DIR_MES_URL.format(dir_id=dir_id, cstk=self.cstk)\r\n return self.http_get(url).json()\r\n\r\n def get_file_by_id(self, file_id):\r\n \"\"\"\r\n 根据文件 ID 获取文件内容\r\n :param file_id:\r\n :return: response,内容为笔记字节码\r\n \"\"\"\r\n data = {'fileId': file_id, 'version': -1, 'convert': 'true', 'editorType': 1, 'cstk': self.cstk}\r\n url = self.FILE_URL.format(cstk=self.cstk)\r\n return self.http_post(url, data=data)\r\n \r\n def checkin(self):\r\n \"\"\" 签到领空间\r\n return: {\r\n \"multiple\": 1,\r\n \"originSpace\": 2097152,\r\n \"total\": 424673280,\r\n \"time\": 1692543594831,\r\n \"success\": 1,\r\n \"space\": 2097152\r\n } \r\n \"\"\"\r\n checkin_url = 'https://note.youdao.com/yws/mapi/user?method=checkin'\r\n return self.http_post(checkin_url,data={})\r\n \r\n def note_rename(self,note_name,file_id):\r\n url = f'https://note.youdao.com/yws/api/personal/sync?method=push&name={note_name}fileId={file_id}&domain=0&rootVersion=-1&sessionId=&modifyTime=1692786849&transactionId={file_id}&transactionTime=1692786849&editorVersion=1692267502000&tags=&_system=windows&_systemVersion=&_screenWidth=1920&_screenHeight=1080&_appName=ynote&_appuser=019623eb3bfaff1f5ddc278090f8420d&_vendor=official-website&_launch=22279&_firstTime=2023/08/19 11:24:10&_deviceId=8cf8855c4105f937&_platform=web&_cityCode=440300&_cityName=深圳&sev=j1&sec=v1&keyfrom=web&cstk={self.cstk}'"
},
{
"identifier": "PullImages",
"path": "pull_images.py",
"snippet": "class PullImages():\r\n def __init__(self, youdaonote_api=None, smms_secret_token: str=None, is_relative_path: bool=None):\r\n self.youdaonote_api = youdaonote_api\r\n self.smms_secret_token = smms_secret_token\r\n self.is_relative_path = is_relative_path # 是否使用相对路径\r\n if not self.smms_secret_token and not self.is_relative_path:\r\n self.load_config()\r\n if not self.youdaonote_api:\r\n self.login()\r\n \r\n def migration_ydnote_url(self, file_path):\r\n \"\"\"\r\n 迁移有道云笔记文件 URL\r\n :param file_path:\r\n :return:\r\n \"\"\"\r\n with open(file_path, 'rb') as f:\r\n content = f.read().decode('utf-8')\r\n\r\n # 图片\r\n image_urls = REGEX_IMAGE_URL.findall(content)\r\n if len(image_urls) > 0:\r\n print('正在转换有道云笔记「{}」中的有道云图片链接...'.format(file_path))\r\n for index,image_url in enumerate(image_urls):\r\n image_path = self._get_new_image_path(file_path, image_url,index)\r\n if image_url == image_path:\r\n continue\r\n #将绝对路径替换为相对路径,实现满足 Obsidian 格式要求\r\n #将 image_path 路径中 images 之前的路径去掉,只保留以 images 开头的之后的路径\r\n if self.is_relative_path:\r\n image_path = image_path[image_path.find(IMAGES):]\r\n \r\n image_path = self.url_encode(image_path)\r\n content = content.replace(image_url, image_path)\r\n\r\n # 附件\r\n attach_name_and_url_list = REGEX_ATTACH.findall(content)\r\n if len(attach_name_and_url_list) > 0:\r\n print('正在转换有道云笔记「{}」中的有道云附件链接...'.format(file_path))\r\n for attach_name_and_url in attach_name_and_url_list:\r\n attach_url = attach_name_and_url[1]\r\n attach_path = self._download_attach_url(file_path, attach_url, attach_name_and_url[0])\r\n if not attach_path:\r\n continue\r\n # 将 attach_path 路径中 attachments 之前的路径去掉,只保留以 attachments 开头的之后的路径\r\n if self.is_relative_path:\r\n attach_path = attach_path[attach_path.find(ATTACH):]\r\n content = content.replace(attach_url, attach_path)\r\n\r\n with open(file_path, 'wb') as f:\r\n f.write(content.encode())\r\n return\r\n\r\n def _get_new_image_path(self, file_path, image_url,index) -> str:\r\n \"\"\"\r\n 将图片链接转换为新的链接\r\n :param file_path:\r\n :param image_url:\r\n :return: new_image_path\r\n \"\"\"\r\n # 当 smms_secret_token 为空(不上传到 SM.MS),下载到图片到本地\r\n if not self.smms_secret_token:\r\n image_path = self._download_image_url(file_path, image_url,index)\r\n return image_path or image_url\r\n\r\n # smms_secret_token 不为空,上传到 SM.MS\r\n new_file_url, error_msg = ImageUpload.upload_to_smms(youdaonote_api=self.youdaonote_api, image_url=image_url,\r\n smms_secret_token=self.smms_secret_token)\r\n # 如果上传失败,仍下载到本地\r\n if not error_msg:\r\n return new_file_url\r\n print(error_msg)\r\n image_path = self._download_image_url(file_path, image_url,index)\r\n return image_path or image_url\r\n \r\n def _download_image_url(self, file_path, url,index) -> str:\r\n \"\"\"\r\n 下载文件到本地,返回本地路径\r\n :param file_path:\r\n :param url:\r\n :param attach_name:\r\n :return: path\r\n \"\"\"\r\n try:\r\n response = self.youdaonote_api.http_get(url)\r\n except requests.exceptions.ProxyError as err:\r\n error_msg = '网络错误,「{}」下载失败。错误提示:{}'.format(url, format(err))\r\n print(error_msg)\r\n return ''\r\n\r\n content_type = response.headers.get('Content-Type')\r\n file_type = '图片'\r\n if response.status_code != 200 or not content_type:\r\n error_msg = '下载「{}」失败!{}可能已失效,可浏览器登录有道云笔记后,查看{}是否能正常加载'.format(url, file_type,\r\n file_type)\r\n print(error_msg)\r\n return ''\r\n\r\n # 默认下载图片到 images 文件夹\r\n file_dirname = IMAGES\r\n # 后缀 png 和 jpeg 后可能出现 ; `**.png;`, 原因未知\r\n content_type_arr = content_type.split('/')\r\n file_suffix = '.' + content_type_arr[1].replace(';', '') if len(content_type_arr) == 2 else \"jpg\"\r\n local_file_dir = os.path.join(os.path.dirname(file_path),file_dirname)\r\n\r\n if not os.path.exists(local_file_dir):\r\n os.mkdir(local_file_dir)\r\n \r\n file_name = os.path.basename(os.path.splitext(file_path)[0])\r\n file_name = self._optimize_file_name(file_name)\r\n #请求后的真实的URL中才有东西\r\n realUrl = parse.parse_qs(urlparse(response.url).query)\r\n real_filename = realUrl.get('filename')\r\n if real_filename:\r\n # dict 不为空时,去获取真实文件名称\r\n read_file_name = real_filename[0]\r\n file_suffix = '.' + read_file_name.split('.')[-1]\r\n file_name = os.path.basename(os.path.splitext(file_path)[0]) + '_image_' + str(index) + file_suffix \r\n else:\r\n file_name = os.path.basename(os.path.splitext(file_path)[0]) + '_image_' + str(index) + file_suffix\r\n \r\n local_file_path = os.path.join(local_file_dir, file_name)\r\n # 使md附件或者图片的路径分隔符为\"/\"\r\n local_file_path = local_file_path.replace('\\\\', '/')\r\n \r\n try:\r\n with open(local_file_path, 'wb') as f:\r\n f.write(response.content) # response.content 本身就为字节类型\r\n print('已将{}「{}」转换为「{}」'.format(file_type, url, local_file_path))\r\n except:\r\n error_msg = '{} {}有误!'.format(url, file_type)\r\n print(error_msg)\r\n return ''\r\n \r\n return local_file_path\r\n \r\n \r\n\r\n def _download_attach_url(self, file_path, url,attach_name=None) -> str:\r\n \"\"\"\r\n 下载文件到本地,返回本地路径\r\n :param file_path:\r\n :param url:\r\n :param attach_name:\r\n :return: path\r\n \"\"\"\r\n try:\r\n response = self.youdaonote_api.http_get(url)\r\n except requests.exceptions.ProxyError as err:\r\n error_msg = '网络错误,「{}」下载失败。错误提示:{}'.format(url, format(err))\r\n print(error_msg)\r\n return ''\r\n\r\n content_type = response.headers.get('Content-Type')\r\n file_type = '附件'\r\n if response.status_code != 200 or not content_type:\r\n error_msg = '下载「{}」失败!{}可能已失效,可浏览器登录有道云笔记后,查看{}是否能正常加载'.format(url, file_type,file_type)\r\n print(error_msg)\r\n return ''\r\n\r\n file_dirname = ATTACH\r\n attach_name = self._optimize_file_name(attach_name)\r\n file_suffix = attach_name\r\n local_file_dir = os.path.join(os.path.dirname(file_path),file_dirname)\r\n\r\n if not os.path.exists(local_file_dir):\r\n os.mkdir(local_file_dir)\r\n\r\n local_file_path: str = os.path.join(local_file_dir,file_suffix)\r\n # 使md附件或者图片的路径分隔符为\"/\"\r\n local_file_path = local_file_path.replace('\\\\', '/')\r\n \r\n try:\r\n with open(local_file_path, 'wb') as f:\r\n f.write(response.content) # response.content 本身就为字节类型\r\n print('已将{}「{}」转换为「{}」'.format(file_type, url, local_file_path))\r\n except:\r\n error_msg = '{} {}有误!'.format(url, file_type)\r\n print(error_msg)\r\n return ''\r\n\r\n return local_file_path\r\n \r\n def _optimize_file_name(self, name) -> str:\r\n \"\"\"\r\n 优化文件名,替换下划线\r\n :param name:\r\n :return:\r\n \"\"\"\r\n # 去除换行符,首尾的空格,文件名有空格识别不出图片\r\n name = name.strip()\r\n regex_symbol = re.compile(r'[\\\\/:\\*\\?\"<>\\|、]') # 符号:\\ / : * ? \" < > | ( )\r\n name = regex_symbol.sub('_', name)\r\n return name\r\n\r\n \r\n def login(self):\r\n self.youdaonote_api = YoudaoNoteApi()\r\n error_msg = self.youdaonote_api.login_by_cookies()\r\n if error_msg:\r\n return '', error_msg\r\n \r\n def load_config(self):\r\n config_dict, error_msg = covert_config(CONFIG_PATH)\r\n self.smms_secret_token = config_dict['smms_secret_token']\r\n self.is_relative_path = config_dict['is_relative_path']\r\n \r\n def more_pull_images(self,md_dir: str):\r\n \"\"\"遍历文件夹的md文件,拉取md文件有道云的图片和附件\r\n\r\n Args:\r\n md_dir (str): md文件的目录\r\n \"\"\"\r\n file_path = md_dir + \"/**/*.md\"\r\n # 匹配当前目录下所有的txt文件\r\n file_list = glob.glob(file_path,recursive=True)\r\n print(file_list)\r\n for md_file in file_list:\r\n self.migration_ydnote_url(md_file)\r\n \r\n @classmethod\r\n def url_encode(cls,path: str):\r\n \"\"\"对一些特殊字符url编码\r\n Args:\r\n path (str): \r\n \"\"\"\r\n path = path.replace(' ','%20')\r\n return path\r"
},
{
"identifier": "FileActionEnum",
"path": "public.py",
"snippet": "class FileActionEnum(Enum):\n CONTINUE = \"跳过\"\n ADD = \"新增\"\n UPDATE = \"更新\""
},
{
"identifier": "covert_config",
"path": "public.py",
"snippet": "def covert_config(config_path=None) -> Tuple[dict, str]:\n \"\"\"\n 转换配置文件为 dict\n :param config_path: config 文件路径\n :return: (config_dict, error_msg)\n \"\"\"\n config_path = config_path if config_path else CONFIG_PATH\n with open(config_path, 'rb') as f:\n config_str = f.read().decode('utf-8')\n\n try:\n config_dict = json.loads(config_str)\n except:\n return {}, '请检查「config.json」格式是否为 utf-8 格式的 json!建议使用 Sublime 编辑「config.json」'\n\n key_list = ['local_dir', 'ydnote_dir', 'smms_secret_token', 'is_relative_path']\n if key_list != list(config_dict.keys()):\n return {}, '请检查「config.json」的 key 是否分别为 local_dir, ydnote_dir, smms_secret_token, is_relative_path'\n return config_dict, ''"
}
] | import json
import logging
import os
import re
import sys
import time
import traceback
import xml.etree.ElementTree as ET
import requests
from enum import Enum
from typing import Tuple
from convert import YoudaoNoteConvert
from youDaoNoteApi import YoudaoNoteApi
from pull_images import PullImages
from public import FileActionEnum
from public import covert_config | 8,321 | :param modify_time:
:return:
"""
youdao_file_suffix = os.path.splitext(file_name)[1] # 笔记后缀
note_type = self.judge_type(file_id,youdao_file_suffix)
# print(f"{file_name}:{note_type}")
is_note = True if note_type == 1 or note_type == 2 else False
original_file_path = os.path.join(local_dir, file_name).replace('\\', '/') # 原后缀路径
# 生成.md后缀的文件的绝对路径
local_file_path = os.path.join(local_dir, ''.join([os.path.splitext(file_name)[0], MARKDOWN_SUFFIX])).replace(
'\\', '/') if is_note else original_file_path
# 如果有有道云笔记是「note」类型,则提示类型
tip = f'| 原文件: {file_name} | 类型:{note_type}'
file_action = self._get_file_action(local_file_path, modify_time)
if file_action == FileActionEnum.CONTINUE:
return
if file_action == FileActionEnum.UPDATE:
# 考虑到使用 f.write() 直接覆盖原文件,在 Windows 下报错(WinError 183),先将其删除
os.remove(local_file_path)
try:
self._pull_file(file_id, original_file_path, note_type)
print('{}「{}」{}'.format(file_action.value, local_file_path, tip))
except Exception as error:
print('{}「{}」失败!请检查文件!错误提示:{}'.format(file_action.value, original_file_path, format(error)))
def _judge_is_note(self, file_id, youdao_file_suffix):
"""
判断是否是 note 类型
:param file_id:
:param youdao_file_suffix:
:return:
"""
is_note = False
# 1、如果文件是 .note 类型
if youdao_file_suffix == NOTE_SUFFIX:
is_note = True
# 2、如果文件没有类型后缀,但以 `<?xml` 开头
if not youdao_file_suffix:
response = self.youdaonote_api.get_file_by_id(file_id)
content = response.content[:5]
is_note = True if content == b"<?xml" else False
return is_note
# def judge_type(self, noteType: int, orgEditorType: int) -> int:
# """
# 判断返回内容
# :param entryType: int
# :param orgEditorType: int
# :return: note_type: int
# """
# note_type = 0
# # 返回xml格式的note笔记内容,noteType == 0 and orgEditorType == 1
# if noteType == 0 and orgEditorType == 1:
# note_type = 1
# # 返回json格式的note笔记内容
# elif (noteType == 7 or noteType == 5) and orgEditorType == 1:
# note_type = 2
# # 返回md文件内容
# elif noteType == 0 and orgEditorType == 0:
# note_type = 3
# return note_type
def judge_type(self,file_id: str ,youdao_file_suffix: str) -> int:
"""
判断返回内容
:param entryType: int
:param orgEditorType: int
:return: note_type: int
"""
note_type = 0
is_xml = False
if youdao_file_suffix == ".note":
response = self.youdaonote_api.get_file_by_id(file_id)
content = response.content[:5]
is_xml = True if content == b"<?xml" else False
if is_xml: # xml类型
note_type = 1
else: # json类型
note_type = 2
elif youdao_file_suffix == ".md":
note_type = 3
else:
print(f"文件后缀「{youdao_file_suffix}」不识别,请检查!")
return note_type
def _pull_file(self, file_id, file_path, note_type):
"""
下载文件
:param file_id:
:param file_path:
:param itype:
:return:
"""
# 1、所有的都先下载
response = self.youdaonote_api.get_file_by_id(file_id)
with open(file_path, 'wb') as f:
f.write(response.content) # response.content 本身就是字节类型
new_file_path = ""
# 2、如果文件是 note 类型,将其转换为 MarkDown 类型
if note_type == 1:
try:
new_file_path = YoudaoNoteConvert.covert_xml_to_markdown(file_path)
except ET.ParseError:
print(f'{file_path} 笔记应该为 17 年以前新建,格式为 html,将转换为 Markdown ...')
new_file_path = YoudaoNoteConvert.covert_html_to_markdown(file_path)
except Exception:
print(f'{file_path} 笔记转换 MarkDown 失败,将跳过')
elif note_type == 2:
new_file_path = YoudaoNoteConvert.covert_json_to_markdown(file_path)
elif note_type == 3:
YoudaoNoteConvert.markdown_filter(file_path)
new_file_path = file_path
# 迁移附件和图片
if os.path.exists(new_file_path):
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
MARKDOWN_SUFFIX = '.md'
NOTE_SUFFIX = '.note'
CONFIG_PATH = 'config.json'
class YoudaoNotePull(object):
"""
有道云笔记 Pull 封装
"""
CONFIG_PATH = 'config.json'
def __init__(self):
self.root_local_dir = None # 本地文件根目录
self.youdaonote_api = None
self.smms_secret_token = None
self.is_relative_path = None # 是否使用相对路径
def get_ydnote_dir_id(self):
"""
获取有道云笔记根目录或指定目录 ID
:return:
"""
config_dict, error_msg = covert_config(CONFIG_PATH)
if error_msg:
return '', error_msg
local_dir, error_msg = self._check_local_dir(local_dir=config_dict['local_dir'])
if error_msg:
return '', error_msg
self.root_local_dir = local_dir
self.youdaonote_api = YoudaoNoteApi()
error_msg = self.youdaonote_api.login_by_cookies()
if error_msg:
return '', error_msg
self.smms_secret_token = config_dict['smms_secret_token']
self.is_relative_path = config_dict['is_relative_path']
return self._get_ydnote_dir_id(ydnote_dir=config_dict['ydnote_dir'])
def pull_dir_by_id_recursively(self, dir_id, local_dir):
"""
根据目录 ID 循环遍历下载目录下所有文件
:param dir_id:
:param local_dir: 本地目录
:return: error_msg
"""
dir_info = self.youdaonote_api.get_dir_info_by_id(dir_id)
try:
entries = dir_info['entries']
except KeyError:
raise KeyError('有道云笔记修改了接口地址,此脚本暂时不能使用!请提 issue')
for entry in entries:
file_entry = entry['fileEntry']
id = file_entry['id']
file_name = file_entry['name']
file_name = self._optimize_file_name(file_name)
# noteType = file_entry['noteType']
# orgEditorType = file_entry['orgEditorType']
if file_entry['dir']:
sub_dir = os.path.join(local_dir, file_name).replace('\\', '/')
# 判断本地文件夹是否存在
if not os.path.exists(sub_dir):
os.mkdir(sub_dir)
self.pull_dir_by_id_recursively(id, sub_dir)
else:
modify_time = file_entry['modifyTimeForSort']
self._add_or_update_file(id, file_name, local_dir, modify_time)
def _check_local_dir(self, local_dir, test_default_dir=None) -> Tuple[str, str]:
"""
检查本地文件夹
:param local_dir: 本地文件夹名(绝对路径)
:return: local_dir, error_msg
"""
# 如果没有指定本地文件夹,当前目录新增 youdaonote 目录
if not local_dir:
add_dir = test_default_dir if test_default_dir else 'youdaonote'
# 兼容 Windows 系统,将路径分隔符(\\)替换为 /
local_dir = os.path.join(os.getcwd(), add_dir).replace('\\', '/')
# 如果指定的本地文件夹不存在,创建文件夹
if not os.path.exists(local_dir):
try:
os.mkdir(local_dir)
except:
return '', '请检查「{}」上层文件夹是否存在,并使用绝对路径!'.format(local_dir)
return local_dir, ''
def _get_ydnote_dir_id(self, ydnote_dir) -> Tuple[str, str]:
"""
获取指定有道云笔记指定目录 ID
:param ydnote_dir: 指定有道云笔记指定目录
:return: dir_id, error_msg
"""
root_dir_info = self.youdaonote_api.get_root_dir_info_id()
root_dir_id = root_dir_info['fileEntry']['id']
# 如果不指定文件夹,取根目录 ID
if not ydnote_dir:
return root_dir_id, ''
dir_info = self.youdaonote_api.get_dir_info_by_id(root_dir_id)
for entry in dir_info['entries']:
file_entry = entry['fileEntry']
if file_entry['name'] == ydnote_dir:
return file_entry['id'], ''
return '', '有道云笔记指定顶层目录不存在'
def _add_or_update_file(self, file_id, file_name, local_dir, modify_time):
"""
新增或更新文件
:param file_id:
:param file_name:
:param local_dir:
:param modify_time:
:return:
"""
youdao_file_suffix = os.path.splitext(file_name)[1] # 笔记后缀
note_type = self.judge_type(file_id,youdao_file_suffix)
# print(f"{file_name}:{note_type}")
is_note = True if note_type == 1 or note_type == 2 else False
original_file_path = os.path.join(local_dir, file_name).replace('\\', '/') # 原后缀路径
# 生成.md后缀的文件的绝对路径
local_file_path = os.path.join(local_dir, ''.join([os.path.splitext(file_name)[0], MARKDOWN_SUFFIX])).replace(
'\\', '/') if is_note else original_file_path
# 如果有有道云笔记是「note」类型,则提示类型
tip = f'| 原文件: {file_name} | 类型:{note_type}'
file_action = self._get_file_action(local_file_path, modify_time)
if file_action == FileActionEnum.CONTINUE:
return
if file_action == FileActionEnum.UPDATE:
# 考虑到使用 f.write() 直接覆盖原文件,在 Windows 下报错(WinError 183),先将其删除
os.remove(local_file_path)
try:
self._pull_file(file_id, original_file_path, note_type)
print('{}「{}」{}'.format(file_action.value, local_file_path, tip))
except Exception as error:
print('{}「{}」失败!请检查文件!错误提示:{}'.format(file_action.value, original_file_path, format(error)))
def _judge_is_note(self, file_id, youdao_file_suffix):
"""
判断是否是 note 类型
:param file_id:
:param youdao_file_suffix:
:return:
"""
is_note = False
# 1、如果文件是 .note 类型
if youdao_file_suffix == NOTE_SUFFIX:
is_note = True
# 2、如果文件没有类型后缀,但以 `<?xml` 开头
if not youdao_file_suffix:
response = self.youdaonote_api.get_file_by_id(file_id)
content = response.content[:5]
is_note = True if content == b"<?xml" else False
return is_note
# def judge_type(self, noteType: int, orgEditorType: int) -> int:
# """
# 判断返回内容
# :param entryType: int
# :param orgEditorType: int
# :return: note_type: int
# """
# note_type = 0
# # 返回xml格式的note笔记内容,noteType == 0 and orgEditorType == 1
# if noteType == 0 and orgEditorType == 1:
# note_type = 1
# # 返回json格式的note笔记内容
# elif (noteType == 7 or noteType == 5) and orgEditorType == 1:
# note_type = 2
# # 返回md文件内容
# elif noteType == 0 and orgEditorType == 0:
# note_type = 3
# return note_type
def judge_type(self,file_id: str ,youdao_file_suffix: str) -> int:
"""
判断返回内容
:param entryType: int
:param orgEditorType: int
:return: note_type: int
"""
note_type = 0
is_xml = False
if youdao_file_suffix == ".note":
response = self.youdaonote_api.get_file_by_id(file_id)
content = response.content[:5]
is_xml = True if content == b"<?xml" else False
if is_xml: # xml类型
note_type = 1
else: # json类型
note_type = 2
elif youdao_file_suffix == ".md":
note_type = 3
else:
print(f"文件后缀「{youdao_file_suffix}」不识别,请检查!")
return note_type
def _pull_file(self, file_id, file_path, note_type):
"""
下载文件
:param file_id:
:param file_path:
:param itype:
:return:
"""
# 1、所有的都先下载
response = self.youdaonote_api.get_file_by_id(file_id)
with open(file_path, 'wb') as f:
f.write(response.content) # response.content 本身就是字节类型
new_file_path = ""
# 2、如果文件是 note 类型,将其转换为 MarkDown 类型
if note_type == 1:
try:
new_file_path = YoudaoNoteConvert.covert_xml_to_markdown(file_path)
except ET.ParseError:
print(f'{file_path} 笔记应该为 17 年以前新建,格式为 html,将转换为 Markdown ...')
new_file_path = YoudaoNoteConvert.covert_html_to_markdown(file_path)
except Exception:
print(f'{file_path} 笔记转换 MarkDown 失败,将跳过')
elif note_type == 2:
new_file_path = YoudaoNoteConvert.covert_json_to_markdown(file_path)
elif note_type == 3:
YoudaoNoteConvert.markdown_filter(file_path)
new_file_path = file_path
# 迁移附件和图片
if os.path.exists(new_file_path): | pull_image = PullImages(self.youdaonote_api,self.smms_secret_token,self.is_relative_path) | 2 | 2023-10-17 11:21:50+00:00 | 12k |
S-LoRA/S-LoRA | slora/server/api_server.py | [
{
"identifier": "build_prompt",
"path": "slora/server/build_prompt.py",
"snippet": "async def build_prompt(request) -> str:\n if not _fastchat_available:\n raise ModuleNotFoundError(\n \"fastchat is not installed. Please install fastchat to use \"\n \"the chat completion and conversation APIs: `$ pip install 'fschat[model_worker,webui]'`\"\n )\n if version.parse(fastchat.__version__) < version.parse(\"0.2.23\"):\n raise ImportError(\n f\"fastchat version is low. Current version: {fastchat.__version__} \"\n \"Please upgrade fastchat to use: `$ pip install 'fschat[model_worker,webui]'`\")\n\n conv = get_conversation_template(request.model)\n conv = Conversation(\n name=conv.name,\n system_template=conv.system_template,\n system_message=conv.system_message,\n roles=conv.roles,\n messages=list(conv.messages), # prevent in-place modification\n offset=conv.offset,\n sep_style=SeparatorStyle(conv.sep_style),\n sep=conv.sep,\n sep2=conv.sep2,\n stop_str=conv.stop_str,\n stop_token_ids=conv.stop_token_ids,\n )\n\n if isinstance(request.messages, str):\n prompt = request.messages\n else:\n for message in request.messages:\n msg_role = message[\"role\"]\n if msg_role == \"system\":\n conv.system_message = message[\"content\"]\n elif msg_role == \"user\":\n conv.append_message(conv.roles[0], message[\"content\"])\n elif msg_role == \"assistant\":\n conv.append_message(conv.roles[1], message[\"content\"])\n else:\n raise ValueError(f\"Unknown role: {msg_role}\")\n # Add a blank message for the assistant. Meaning it's the assistant's turn to talk.\n conv.append_message(conv.roles[1], None)\n prompt = conv.get_prompt()\n\n return prompt"
},
{
"identifier": "SamplingParams",
"path": "slora/server/sampling_params.py",
"snippet": "class SamplingParams:\n\n def __init__(\n self,\n do_sample: bool = False,\n presence_penalty: float = 0.0,\n frequency_penalty: float = 0.0,\n temperature: float = 1.0,\n top_p: float = 1.0,\n top_k: int = -1, # -1 is for all \n ignore_eos: bool = False,\n max_new_tokens: int = 16,\n stop_sequences: Optional[Union[str, List[str]]] = None # 停止句子条件\n ) -> None:\n self.do_sample = do_sample\n self.presence_penalty = presence_penalty\n self.frequency_penalty = frequency_penalty\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.ignore_eos = ignore_eos\n self.max_new_tokens = max_new_tokens\n self.stop_sequences = stop_sequences\n if self.do_sample == False:\n self.temperature = 1.0\n self.top_p = 1.0\n self.top_k = 1\n if self.temperature >= 0.0 and self.temperature < _SAMPLING_EPS: # temperature is too slow, change to greedy search\n self.temperature = 1.0\n self.top_k = 1\n return\n \n def verify(self):\n if self.presence_penalty < 0.0:\n raise ValueError(f\"presence_penalty must >= 0.0, got {self.presence_penalty}\")\n if self.frequency_penalty < 0.0:\n raise ValueError(f\"frequency_penalty must >= 0.0, got {self.frequency_penalty}\")\n if self.temperature <= 0.0:\n raise ValueError(f\"temperature must > 0.0, got {self.temperature}\")\n if self.top_p <= 0.0 or self.top_p > 1.0:\n raise ValueError(f\"top_p must in (0.0, 1.0], got {self.top_p}\")\n if self.top_k < -1 or self.top_k == 0:\n raise ValueError(f\"top_k must be -1 (disable), or at least 1, got {self.top_k}.\")\n if self.max_new_tokens < 1:\n raise ValueError(f\"max_new_tokens must be at least 1 , got {self.max_new_tokens}.\")\n return\n\n def stop_sentences_to_token_ids(self, tokenizer):\n if self.stop_sequences is None:\n self.stop_sequences = []\n else:\n if isinstance(self.stop_sequences, str):\n self.stop_sequences = [self.stop_sequences]\n new_stop_sequences = []\n for stop_str in self.stop_sequences:\n stop_str_ids = tokenizer.encode(stop_str)\n if stop_str_ids is not None and len(stop_str_ids) >= 1: # remove bos_token_id\n stop_str_ids = stop_str_ids[1:]\n if len(stop_str_ids) > 0:\n new_stop_sequences.append(stop_str_ids)\n self.stop_sequences = new_stop_sequences\n return\n \n def to_dict(self):\n ret = {}\n ret[\"do_sample\"] = self.do_sample\n ret[\"presence_penalty\"] = self.presence_penalty\n ret[\"frequency_penalty\"] = self.frequency_penalty\n ret[\"temperature\"] = self.temperature\n ret[\"top_p\"] = self.top_p\n ret[\"top_k\"] = self.top_k\n # if self.ignore_eos is not None:\n # ret[\"ignore_eos\"] = self.ignore_eos\n # if self.max_tokens is not None:\n # ret[\"max_tokens\"] = self.max_tokens\n return ret"
},
{
"identifier": "HttpServerManager",
"path": "slora/server/httpserver/manager.py",
"snippet": "class HttpServerManager:\n def __init__(\n self,\n model_weightdir,\n tokenizor_mode,\n router_port,\n httpserver_port,\n total_token_num,\n max_req_input_len,\n max_req_total_len,\n trust_remote_code,\n dummy=False,\n ):\n context = zmq.asyncio.Context(2)\n self.send_to_router = context.socket(zmq.PUSH)\n self.send_to_router.connect(f\"tcp://127.0.0.1:{router_port}\")\n\n self.recv_from_detokenization = context.socket(zmq.PULL)\n self.recv_from_detokenization.bind(f\"tcp://127.0.0.1:{httpserver_port}\")\n\n try: \n self.tokenizer = get_tokenizer(model_weightdir, tokenizor_mode, trust_remote_code=trust_remote_code) \n except:\n if dummy:\n self.tokenizer = get_tokenizer(\"huggyllama/llama-7b\", tokenizor_mode) \n\n self.req_id_to_out_inf = {} # value type (out_str, metadata, finished, event)\n\n self.total_token_num = total_token_num\n self.max_req_input_len = max_req_input_len\n self.max_req_total_len = max_req_total_len\n\n async def generate(self, adapter_dir, prompt, sampling_params, request_id):\n\n prompt_ids = self.tokenizer.encode(prompt)\n prompt_tokens = len(prompt_ids)\n if prompt_tokens > self.max_req_input_len:\n raise ValueError(\n f\"the input prompt token len {prompt_tokens} is too long > {self.max_req_input_len}\"\n )\n req_total_len = prompt_tokens + sampling_params.max_new_tokens\n if req_total_len > self.max_req_total_len:\n raise ValueError(\n f\"the req token total len (input len + output len) is too long > max_req_total_len:{self.max_req_total_len}\"\n )\n if req_total_len + 1 > self.total_token_num:\n raise ValueError(\n f\"the req token total len + 1 (input len + output len + 1) is too long > max_total_token_num:{self.total_token_num}\"\n )\n \n sampling_params.stop_sentences_to_token_ids(self.tokenizer)\n\n self.send_to_router.send_pyobj((adapter_dir, prompt_ids, sampling_params, request_id))\n event = asyncio.Event()\n self.req_id_to_out_inf[request_id] = (\"\", {}, False, event)\n while True:\n try:\n await asyncio.wait_for(event.wait(), timeout=5)\n except asyncio.TimeoutError:\n pass\n event.clear()\n # request_id is aborted by the backend system for traffic control\n if request_id not in self.req_id_to_out_inf:\n yield \"\", {}, -1\n break\n out_str, metadata, finished, _ = self.req_id_to_out_inf[request_id]\n if len(metadata) != 0:\n self.req_id_to_out_inf[request_id] = (\"\", {}, finished, event)\n metadata[\"prompt_tokens\"] = prompt_tokens\n yield out_str, metadata, finished\n if finished:\n try:\n del self.req_id_to_out_inf[request_id]\n except:\n pass\n break\n return\n\n async def abort(self, request_id):\n abort_req = AbortReq(req_id=request_id)\n self.send_to_router.send_pyobj(abort_req)\n try:\n del self.req_id_to_out_inf[request_id]\n except:\n pass\n return\n\n async def handle_loop(self):\n while True:\n recv_ans:Union(BatchStrOut, BatchAbortReq) = await self.recv_from_detokenization.recv_pyobj()\n assert isinstance(recv_ans, (BatchStrOut, BatchAbortReq)), f\"error recv type {type(recv_ans)}\"\n if isinstance(recv_ans, BatchStrOut):\n for req_id, text, metadata, finished, abort in recv_ans.reqs_infs:\n try:\n if not abort:\n _, _, _, event = self.req_id_to_out_inf[req_id]\n self.req_id_to_out_inf[req_id] = (\n text,\n metadata,\n finished,\n event,\n )\n event.set()\n else:\n del self.req_id_to_out_inf[req_id]\n except:\n pass\n elif isinstance(recv_ans, BatchAbortReq):\n print(\"abort reqs:\", recv_ans.reqs)\n for req_id in recv_ans.reqs:\n try:\n del self.req_id_to_out_inf[req_id]\n except:\n pass\n\n return"
},
{
"identifier": "start_detokenization_process",
"path": "slora/server/detokenization/manager.py",
"snippet": "def start_detokenization_process(args, detokenization_port, httpserver_port, pipe_writer, trust_remote_code):\n try:\n router = DeTokenizationManager(args.model_dir, args.tokenizer_mode,\n detokenization_port=detokenization_port, httpserver_port=httpserver_port,\n trust_remote_code=trust_remote_code, dummy=args.dummy)\n except Exception as e:\n pipe_writer.send(str(e))\n raise\n pipe_writer.send('init ok')\n loop = asyncio.get_event_loop()\n loop.run_until_complete(router.handle_loop())\n return"
},
{
"identifier": "start_router_process",
"path": "slora/server/router/manager.py",
"snippet": "def start_router_process(args, router_port, detokenization_port, model_rpc_ports, mode, pipe_writer):\n input_params = InputParams(max_req_total_len=args.max_req_total_len,\n # kv cache manager parameters\n max_total_token_num=args.max_total_token_num,\n pool_size_lora=args.pool_size_lora,\n batch_max_tokens=args.batch_max_tokens,\n running_max_req_size=args.running_max_req_size,\n # heuristic\n swap=args.swap,\n prefetch=args.prefetch,\n prefetch_size=args.prefetch_size,\n scheduler=args.scheduler,\n profile=args.profile,\n batch_num_adapters=args.batch_num_adapters,\n enable_abort=args.enable_abort,\n # mem_ratio=args.mem_ratio,\n dummy=args.dummy,\n no_lora_swap=args.no_lora_swap,\n no_lora_compute=args.no_lora_compute,\n no_kernel=args.no_kernel,\n no_mem_pool=args.no_mem_pool,\n bmm=args.bmm,\n no_lora=args.no_lora,\n fair_weights=args.fair_weights,\n )\n\n try:\n router = RouterManager(\n args.model_dir,\n args.lora_dirs,\n load_way=\"HF\",\n world_size=args.tp,\n eos_id=args.eos_id,\n router_port=router_port,\n detokenization_port=detokenization_port,\n model_rpc_ports=model_rpc_ports,\n input_params=input_params,\n mode=mode,\n log_stats = not args.disable_log_stats,\n log_stats_interval = args.log_stats_interval,\n )\n \n asyncio.run(router.wait_to_model_ready())\n if input_params.profile:\n asyncio.run(router.profile_prefill())\n if input_params.scheduler == \"pets\" and input_params.profile:\n router.req_queue.alpha = router.alpha_model\n router.req_queue.beta = router.beta_model\n elif input_params.scheduler == \"pets\":\n # loading from file\n cache_dir = os.path.expanduser(\"~/.cache/slora\")\n router.req_queue.alpha = AlphaModel.from_file(cache_dir+\"/profile_results.pkl\")\n router.req_queue.beta = BetaModel.from_file(cache_dir+\"/profile_results.pkl\")\n \n except Exception as e:\n import traceback\n err_str = '\\n'.join(traceback.format_exception(e))\n pipe_writer.send(err_str)\n router.clean_up()\n raise\n\n pipe_writer.send('init ok')\n \n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n loop.create_task(router.loop_for_fwd())\n loop.run_until_complete(router.loop_for_netio_req())\n return"
},
{
"identifier": "alloc_can_use_network_port",
"path": "slora/utils/net_utils.py",
"snippet": "def alloc_can_use_network_port(num=3, used_nccl_port=None):\n port_list = []\n for port in range(10000, 65536):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n result = s.connect_ex((\"localhost\", port))\n if result != 0 and port != used_nccl_port:\n port_list.append(port)\n\n if len(port_list) == num:\n return port_list\n return None"
},
{
"identifier": "setting",
"path": "slora/common/configs/config.py",
"snippet": "_DEFAULT_MAX_INPUT_ADD_OUTPUT_LEN = 1024 * 5"
},
{
"identifier": "ChatCompletionRequest",
"path": "slora/server/api_models.py",
"snippet": "class ChatCompletionRequest(BaseModel):\n # The openai api native parameters\n model: str\n messages: List[Dict[str, str]]\n function_call: Optional[str] = 'none'\n temperature: Optional[float] = 1\n top_p: Optional[float] = 1.0\n n: Optional[int] = 1\n stream: Optional[bool] = False\n stop: Optional[Union[str, List[str]]] = None\n max_tokens: Optional[int] = 16\n presence_penalty: Optional[float] = 0.0\n frequency_penalty: Optional[float] = 0.0\n logit_bias: Optional[Dict[str, float]] = None\n user: Optional[str] = None\n\n # Additional parameters supported by S-LoRA\n do_sample: Optional[bool] = False\n top_k: Optional[int] = -1\n ignore_eos: Optional[bool] = False"
},
{
"identifier": "UsageInfo",
"path": "slora/server/api_models.py",
"snippet": "class UsageInfo(BaseModel):\n prompt_tokens: int = 0\n completion_tokens: Optional[int] = 0\n total_tokens: int = 0"
},
{
"identifier": "ChatMessage",
"path": "slora/server/api_models.py",
"snippet": "class ChatMessage(BaseModel):\n role: str\n content: str"
},
{
"identifier": "ChatCompletionResponseChoice",
"path": "slora/server/api_models.py",
"snippet": "class ChatCompletionResponseChoice(BaseModel):\n index: int\n message: ChatMessage\n finish_reason: Optional[Literal[\"stop\", \"length\", \"function_call\"]] = None"
},
{
"identifier": "ChatCompletionResponse",
"path": "slora/server/api_models.py",
"snippet": "class ChatCompletionResponse(BaseModel):\n id: str = Field(default_factory=lambda: f\"chatcmpl-{uuid.uuid4().hex}\")\n object: str = \"chat.completion\"\n created: int = Field(default_factory=lambda: int(time.time()))\n model: str\n choices: List[ChatCompletionResponseChoice]\n usage: UsageInfo"
},
{
"identifier": "DeltaMessage",
"path": "slora/server/api_models.py",
"snippet": "class DeltaMessage(BaseModel):\n role: Optional[str] = None\n content: Optional[str] = None"
},
{
"identifier": "ChatCompletionStreamResponse",
"path": "slora/server/api_models.py",
"snippet": "class ChatCompletionStreamResponse(BaseModel):\n id: str = Field(default_factory=lambda: f\"chatcmpl-{uuid.uuid4().hex}\")\n object: str = \"chat.completion.chunk\"\n created: int = Field(default_factory=lambda: int(time.time()))\n model: str\n choices: List[ChatCompletionStreamResponseChoice]"
},
{
"identifier": "ChatCompletionStreamResponseChoice",
"path": "slora/server/api_models.py",
"snippet": "class ChatCompletionStreamResponseChoice(BaseModel):\n index: int\n delta: DeltaMessage\n finish_reason: Optional[Literal[\"stop\", \"length\"]] = None"
},
{
"identifier": "ModelProphet",
"path": "slora/mprophet/measure.py",
"snippet": "class ModelProphet:\n name: str\n model_config: ModelConfig\n\n\n def __init__(self, name: str, config=None, model_dir=None):\n self.name = name\n self.model_config = ModelConfig(name, config=config, model_dir=model_dir)\n\n\n # weights size\n def get_layer_size(self, dtype=\"fp16\"):\n dbytes = get_num_bytes(dtype)\n m = self.model_config\n \n if \"opt\" in self.name.lower():\n size = dbytes * (\n # self-attention:\n m.hidden_size ** 2 * 3 + m.hidden_size ** 2 +\n # mlp\n m.hidden_size * m.ffn_embed_dim * 2 +\n # layer norm\n m.hidden_size * 4)\n return size\n elif \"llama\" in self.name.lower():\n size = dbytes * (\n # self-attention:\n m.hidden_size ** 2 * 3 + m.hidden_size ** 2 +\n # mlp\n m.hidden_size * m.ffn_embed_dim * 3 +\n # layer norm\n m.hidden_size * 4)\n return size\n else:\n raise NotImplementedError\n\n\n def get_model_size(self, dtype=\"fp16\"):\n return self.get_layer_size(dtype) * self.model_config.num_hidden_layers\n\n\n def print_layer_size(self, dtype=\"fp16\"):\n size = self.get_layer_size(dtype)\n print(f\"layer size for dtype {dtype}:\\n{size / GB:.3f} GB\")\n\n\n def print_model_size(self, dtype=\"fp16\"):\n size = self.get_model_size(dtype)\n print(f\"model size for dtype {dtype}:\\n{size / GB:.3f} GB\")\n\n\n # I/O\n def get_layer_load_time(self, dtype=\"fp16\", bandwidth=1 * GB):\n size = self.get_layer_size(dtype)\n return size / bandwidth\n\n\n def get_full_load_time(self, preload=0, bandwidth=1 * GB):\n layer_t = self.get_layer_load_time(bandwidth=bandwidth)\n full_t = self.model_config.num_hidden_layers * layer_t * (1 - preload)\n\n\n def print_layer_load_time(self, dtype=\"fp16\", bandwidth=1 * GB):\n t = self.get_layer_load_time(dtype, bandwidth)\n print(f\"layer loading time for dtype {dtype} and bandwidth {bandwidth / GB:.2f} GB/s:\\n{t:.3f} s\")\n\n\n # memory\n def get_peak_working_memory(self, bs, context_len, dtype=\"fp16\", tiling_dim = None):\n # if using tiling for attention\n if tiling_dim is not None:\n attn_block_dim = tiling_dim\n else:\n attn_block_dim = context_len\n\n dbytes = get_num_bytes(dtype)\n m = self.model_config\n mem = dbytes * bs * max(# attention\n 3 * context_len * m.hidden_size +\n m.n_head * attn_block_dim ** 2 +\n context_len * m.hidden_size +\n context_len * m.hidden_size,\n # mlp\n context_len * m.hidden_size * 4\n )\n return mem\n\n\n def get_kv_cache_size(self, bs, context_len, dtype=\"fp16\"):\n dbytes = get_num_bytes(dtype)\n m = self.model_config\n return bs * 2 * context_len * m.hidden_size * dbytes\n\n\n # compute\n def get_layer_flops(self, token_id, bs, context_len):\n if \"opt\" in self.name:\n input_len = context_len if token_id == 0 else 1\n # Q, K, V\n m = self.model_config\n flops = 3 * bs * input_len * m.hidden_size * m.hidden_size * 2\n # attention\n head_dim = m.hidden_size // m.n_head\n flops += bs * m.n_head * input_len * head_dim * context_len * 2\n flops += bs * m.n_head * input_len * context_len * head_dim * 2\n # aggregate\n flops += bs * input_len * m.hidden_size * m.hidden_size * 2\n # mlp\n flops += bs * input_len * m.hidden_size * m.hidden_size * 4 * 2\n flops += bs * input_len * m.hidden_size * 4 * m.hidden_size * 2\n else:\n raise NotImplementedError\n return flops\n\n\n def get_layer_inference_time(self, token_id, bs, context_len, tflops=None, gpu=None, dtype=\"fp16\"):\n assert not (tflops is None and gpu is None)\n if tflops is None:\n tflops = TFLOPS[gpu]\n flops = self.get_layer_flops(token_id, bs, context_len)\n return flops / T / tflops\n\n\n def get_prefill_time(self, context_len, bs):\n layer_t = self.get_layer_inference_time(0, bs, context_len, gpu=\"3090\")\n return layer_t * self.model_config.num_hidden_layers\n\n\n def print_layer_inference_time(self, token_id, bs, context_len, tflops=None, gpu=None, dtype=\"fp16\"):\n t = self.get_layer_inference_time(token_id, bs, context_len, tflops, gpu, dtype)\n print(f\"layer inference time for token {token_id} with bs {bs} and context length {context_len}:\\n{t:.3f} s\")\n\n\n # others\n def print_model_stats(self, token_id, bs, context_len, tflops):\n print(f\"===== Stats for model {self.name} =====\")\n self.print_layer_size()\n self.print_layer_load_time(bandwidth=1 * GB)\n self.print_layer_inference_time(token_id, bs, context_len, tflops)\n print()"
},
{
"identifier": "LoRAProphet",
"path": "slora/mprophet/lora_stats.py",
"snippet": "class LoRAProphet:\n\n def __init__(self, name: str, base_name: str,\n lora_config=None, adapter_dir=None,\n base_config=None, base_model_dir=None):\n self.name = name\n self.lora_config = LoRAConfig(name, config=lora_config, weight_dir=adapter_dir)\n\n self.base_name = base_name\n self.base_model = ModelProphet(base_name, config=base_config, model_dir=base_model_dir)\n self.base_config = self.base_model.model_config\n\n\n def get_layer_size(self, dtype=\"fp16\"):\n dbytes = get_num_bytes(dtype)\n m = self.base_config\n size = dbytes * (m.hidden_size * self.lora_config.rank * 2 * 4)\n return size\n\n\n def get_adapter_size(self, dtype=\"fp16\"):\n return self.get_layer_size(dtype) * self.base_config.num_hidden_layers\n\n\n def get_base_size(self, dtype=\"fp16\"):\n return self.base_model.get_model_size(dtype=dtype)"
}
] | import asyncio
import time
import torch
import uvloop
import sys
import argparse
import json
import uuid
import multiprocessing as mp
import uvicorn
from .build_prompt import build_prompt
from http import HTTPStatus
from typing import AsyncGenerator
from fastapi import BackgroundTasks, FastAPI, Request
from fastapi.responses import Response, StreamingResponse, JSONResponse
from .sampling_params import SamplingParams
from .httpserver.manager import HttpServerManager
from .detokenization.manager import start_detokenization_process
from .router.manager import start_router_process
from slora.utils.net_utils import alloc_can_use_network_port
from slora.common.configs.config import setting
from .api_models import (
ChatCompletionRequest,
UsageInfo,
ChatMessage,
ChatCompletionResponseChoice,
ChatCompletionResponse,
DeltaMessage,
ChatCompletionStreamResponse,
ChatCompletionStreamResponseChoice,
)
from slora.mprophet.measure import ModelProphet
from slora.mprophet.lora_stats import LoRAProphet | 7,453 | sampling_params = SamplingParams(**sample_params_dict)
sampling_params.verify()
if "req_id" in request_dict:
request_id = request_dict["req_id"]
else:
request_id = uuid.uuid4().hex
results_generator = httpserver_manager.generate(adapter_dir, prompt, sampling_params, request_id)
# Streaming case
async def stream_results() -> AsyncGenerator[bytes, None]:
async for request_output, metadata, finished in results_generator:
ret = {
"token": {
"id": metadata.get("id", None),
"text": request_output,
"logprob": metadata.get("logprob", None),
"special": False
},
"generated_text": None,
"finished": finished,
"details": None
}
yield ("data:" + json.dumps(ret, ensure_ascii=False) + f"\n\n").encode(
"utf-8"
)
async def abort_request() -> None:
await httpserver_manager.abort(request_id)
background_tasks = BackgroundTasks()
# Abort the request if the client disconnects.
background_tasks.add_task(abort_request)
return StreamingResponse(
stream_results(), media_type="text/event-stream", background=background_tasks
)
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
async def chat_completions(
request: ChatCompletionRequest, raw_request: Request
) -> Response:
global isFirst
if isFirst:
loop = asyncio.get_event_loop()
loop.create_task(httpserver_manager.handle_loop())
isFirst = False
if request.logit_bias is not None:
return create_error_response(
HTTPStatus.BAD_REQUEST,
"The logit_bias parameter is not currently supported",
)
if request.n > 1:
return create_error_response(
HTTPStatus.BAD_REQUEST, "The n parameter currently only supports 1"
)
if request.function_call != "none":
return create_error_response(
HTTPStatus.BAD_REQUEST, "The function call feature is not supported"
)
created_time = int(time.time())
prompt = await build_prompt(request)
sampling_params = SamplingParams(
do_sample=request.do_sample,
presence_penalty=request.presence_penalty,
frequency_penalty=request.frequency_penalty,
temperature=request.temperature,
top_p=request.top_p,
top_k=request.top_k,
ignore_eos=request.ignore_eos,
max_new_tokens=request.max_tokens,
stop_sequences=request.stop
)
sampling_params.verify()
request_id = f"chatcmpl-{uuid.uuid4().hex}"
results_generator = httpserver_manager.generate(prompt, sampling_params, request_id)
# Non-streaming case
if not request.stream:
final_output = []
prompt_tokens = -1
completion_tokens = 0
async for request_output, metadata in results_generator:
if await raw_request.is_disconnected():
# Abort the request if the client disconnects.
await httpserver_manager.abort(request_id)
return Response(status_code=499)
completion_tokens += 1
if prompt_tokens == -1:
prompt_tokens = metadata["prompt_tokens"]
final_output.append(request_output)
usage = UsageInfo(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens
)
chat_message = ChatMessage(role="assistant", content="".join(final_output))
choice = ChatCompletionResponseChoice(index=0, message=chat_message)
resp = ChatCompletionResponse(
id=request_id,
created=created_time,
model=request.model,
choices=[choice],
usage=usage
)
return resp
# Streaming case
async def stream_results() -> AsyncGenerator[bytes, None]:
async for request_output, metadata in results_generator:
delta_message = DeltaMessage(role="assistant", content=request_output)
| # Adapted from vllm/entrypoints/api_server.py
# of the vllm-project/vllm GitHub repository.
#
# Copyright 2023 ModelTC Team
# Copyright 2023 vLLM Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
GB = 1024 ** 3
MB = 1024 ** 2
TIMEOUT_KEEP_ALIVE = 5 # seconds.
app = FastAPI()
isFirst = True
def create_error_response(status_code: HTTPStatus, message: str) -> JSONResponse:
return JSONResponse({"message": message}, status_code=status_code.value)
@app.get("/healthz")
@app.get("/health")
def healthcheck():
return "OK"
@app.post("/generate")
async def generate(request: Request) -> Response:
global isFirst
if isFirst:
loop = asyncio.get_event_loop()
loop.create_task(httpserver_manager.handle_loop())
isFirst = False
request_dict = await request.json()
adapter_dir = request_dict["lora_dir"] if "lora_dir" in request_dict else None
prompt = request_dict.pop("inputs")
sample_params_dict = request_dict["parameters"]
return_details = sample_params_dict.pop("return_details", False)
sampling_params = SamplingParams(**sample_params_dict)
sampling_params.verify()
if "req_id" in request_dict:
request_id = request_dict["req_id"]
else:
request_id = uuid.uuid4().hex
results_generator = httpserver_manager.generate(adapter_dir, prompt, sampling_params, request_id)
# Non-streaming case
final_output = []
count_output_tokens = 0
tokens = []
async for request_output, metadata, finished in results_generator:
count_output_tokens += 1
if finished == -1:
return Response(status_code=499)
if await request.is_disconnected():
# Abort the request if the client disconnects.
await httpserver_manager.abort(request_id)
return Response(status_code=499)
final_output.append(request_output)
if return_details:
metadata["text"] = request_output
tokens.append(metadata)
assert final_output is not None
ret = {
"generated_text": ["".join(final_output)],
"count_output_tokens": count_output_tokens,
}
if return_details:
ret["tokens"] = tokens
return Response(content=json.dumps(ret, ensure_ascii=False).encode("utf-8"))
@app.post("/generate_stream")
async def generate_stream(request: Request) -> Response:
global isFirst
if isFirst:
loop = asyncio.get_event_loop()
loop.create_task(httpserver_manager.handle_loop())
isFirst = False
request_dict = await request.json()
adapter_dir = request_dict["lora_dir"] if "lora_dir" in request_dict else None
prompt = request_dict.pop("inputs")
sample_params_dict = request_dict["parameters"]
return_details = sample_params_dict.pop("return_details", False)
sampling_params = SamplingParams(**sample_params_dict)
sampling_params.verify()
if "req_id" in request_dict:
request_id = request_dict["req_id"]
else:
request_id = uuid.uuid4().hex
results_generator = httpserver_manager.generate(adapter_dir, prompt, sampling_params, request_id)
# Streaming case
async def stream_results() -> AsyncGenerator[bytes, None]:
async for request_output, metadata, finished in results_generator:
ret = {
"token": {
"id": metadata.get("id", None),
"text": request_output,
"logprob": metadata.get("logprob", None),
"special": False
},
"generated_text": None,
"finished": finished,
"details": None
}
yield ("data:" + json.dumps(ret, ensure_ascii=False) + f"\n\n").encode(
"utf-8"
)
async def abort_request() -> None:
await httpserver_manager.abort(request_id)
background_tasks = BackgroundTasks()
# Abort the request if the client disconnects.
background_tasks.add_task(abort_request)
return StreamingResponse(
stream_results(), media_type="text/event-stream", background=background_tasks
)
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
async def chat_completions(
request: ChatCompletionRequest, raw_request: Request
) -> Response:
global isFirst
if isFirst:
loop = asyncio.get_event_loop()
loop.create_task(httpserver_manager.handle_loop())
isFirst = False
if request.logit_bias is not None:
return create_error_response(
HTTPStatus.BAD_REQUEST,
"The logit_bias parameter is not currently supported",
)
if request.n > 1:
return create_error_response(
HTTPStatus.BAD_REQUEST, "The n parameter currently only supports 1"
)
if request.function_call != "none":
return create_error_response(
HTTPStatus.BAD_REQUEST, "The function call feature is not supported"
)
created_time = int(time.time())
prompt = await build_prompt(request)
sampling_params = SamplingParams(
do_sample=request.do_sample,
presence_penalty=request.presence_penalty,
frequency_penalty=request.frequency_penalty,
temperature=request.temperature,
top_p=request.top_p,
top_k=request.top_k,
ignore_eos=request.ignore_eos,
max_new_tokens=request.max_tokens,
stop_sequences=request.stop
)
sampling_params.verify()
request_id = f"chatcmpl-{uuid.uuid4().hex}"
results_generator = httpserver_manager.generate(prompt, sampling_params, request_id)
# Non-streaming case
if not request.stream:
final_output = []
prompt_tokens = -1
completion_tokens = 0
async for request_output, metadata in results_generator:
if await raw_request.is_disconnected():
# Abort the request if the client disconnects.
await httpserver_manager.abort(request_id)
return Response(status_code=499)
completion_tokens += 1
if prompt_tokens == -1:
prompt_tokens = metadata["prompt_tokens"]
final_output.append(request_output)
usage = UsageInfo(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens
)
chat_message = ChatMessage(role="assistant", content="".join(final_output))
choice = ChatCompletionResponseChoice(index=0, message=chat_message)
resp = ChatCompletionResponse(
id=request_id,
created=created_time,
model=request.model,
choices=[choice],
usage=usage
)
return resp
# Streaming case
async def stream_results() -> AsyncGenerator[bytes, None]:
async for request_output, metadata in results_generator:
delta_message = DeltaMessage(role="assistant", content=request_output)
| stream_choice = ChatCompletionStreamResponseChoice( | 14 | 2023-11-05 04:08:36+00:00 | 12k |
Yuliang-Liu/Monkey | data_generation/grit/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/centernet.py | [
{
"identifier": "heatmap_focal_loss_jit",
"path": "data_generation/grit/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/heatmap_focal_loss.py",
"snippet": "def heatmap_focal_loss(\n inputs,\n targets,\n pos_inds,\n labels,\n alpha: float = -1,\n beta: float = 4,\n gamma: float = 2,\n reduction: str = 'sum',\n sigmoid_clamp: float = 1e-4,\n ignore_high_fp: float = -1.,\n):\ndef binary_heatmap_focal_loss(\n inputs,\n targets,\n pos_inds,\n alpha: float = -1,\n beta: float = 4,\n gamma: float = 2,\n sigmoid_clamp: float = 1e-4,\n ignore_high_fp: float = -1.,\n):"
},
{
"identifier": "binary_heatmap_focal_loss",
"path": "data_generation/grit/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/heatmap_focal_loss.py",
"snippet": "def binary_heatmap_focal_loss(\n inputs,\n targets,\n pos_inds,\n alpha: float = -1,\n beta: float = 4,\n gamma: float = 2,\n sigmoid_clamp: float = 1e-4,\n ignore_high_fp: float = -1.,\n):\n \"\"\"\n Args:\n inputs: (sum_l N*Hl*Wl,)\n targets: (sum_l N*Hl*Wl,)\n pos_inds: N\n Returns:\n Loss tensor with the reduction option applied.\n \"\"\"\n pred = torch.clamp(inputs.sigmoid_(), min=sigmoid_clamp, max=1-sigmoid_clamp)\n neg_weights = torch.pow(1 - targets, beta)\n for i, ind in enumerate(pos_inds):\n if ind >= pred.shape[0]:\n print('%'*100)\n print(pred.shape, ind, pos_inds)\n pos_inds[i] = pred.shape[0] - 1\n pos_pred = pred[pos_inds] # N\n pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, gamma)\n neg_loss = torch.log(1 - pred) * torch.pow(pred, gamma) * neg_weights\n if ignore_high_fp > 0:\n not_high_fp = (pred < ignore_high_fp).float()\n neg_loss = not_high_fp * neg_loss\n\n pos_loss = - pos_loss.sum()\n neg_loss = - neg_loss.sum()\n\n if alpha >= 0:\n pos_loss = alpha * pos_loss\n neg_loss = (1 - alpha) * neg_loss\n\n return pos_loss, neg_loss"
},
{
"identifier": "IOULoss",
"path": "data_generation/grit/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/iou_loss.py",
"snippet": "class IOULoss(nn.Module):\n def __init__(self, loc_loss_type='iou'):\n super(IOULoss, self).__init__()\n self.loc_loss_type = loc_loss_type\n\n def forward(self, pred, target, weight=None, reduction='sum'):\n pred_left = pred[:, 0]\n pred_top = pred[:, 1]\n pred_right = pred[:, 2]\n pred_bottom = pred[:, 3]\n\n target_left = target[:, 0]\n target_top = target[:, 1]\n target_right = target[:, 2]\n target_bottom = target[:, 3]\n\n target_aera = (target_left + target_right) * \\\n (target_top + target_bottom)\n pred_aera = (pred_left + pred_right) * \\\n (pred_top + pred_bottom)\n\n w_intersect = torch.min(pred_left, target_left) + \\\n torch.min(pred_right, target_right)\n h_intersect = torch.min(pred_bottom, target_bottom) + \\\n torch.min(pred_top, target_top)\n\n g_w_intersect = torch.max(pred_left, target_left) + \\\n torch.max(pred_right, target_right)\n g_h_intersect = torch.max(pred_bottom, target_bottom) + \\\n torch.max(pred_top, target_top)\n ac_uion = g_w_intersect * g_h_intersect\n\n area_intersect = w_intersect * h_intersect\n area_union = target_aera + pred_aera - area_intersect\n\n ious = (area_intersect + 1.0) / (area_union + 1.0)\n gious = ious - (ac_uion - area_union) / ac_uion\n if self.loc_loss_type == 'iou':\n losses = -torch.log(ious)\n elif self.loc_loss_type == 'linear_iou':\n losses = 1 - ious\n elif self.loc_loss_type == 'giou':\n losses = 1 - gious\n else:\n raise NotImplementedError\n\n if weight is not None:\n losses = losses * weight\n else:\n losses = losses\n\n if reduction == 'sum':\n return losses.sum()\n elif reduction == 'batch':\n return losses.sum(dim=[1])\n elif reduction == 'none':\n return losses\n else:\n raise NotImplementedError"
},
{
"identifier": "ml_nms",
"path": "data_generation/grit/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/ml_nms.py",
"snippet": "def ml_nms(boxlist, nms_thresh, max_proposals=-1,\n score_field=\"scores\", label_field=\"labels\"):\n \"\"\"\n Performs non-maximum suppression on a boxlist, with scores specified\n in a boxlist field via score_field.\n Arguments:\n boxlist(BoxList)\n nms_thresh (float)\n max_proposals (int): if > 0, then only the top max_proposals are kept\n after non-maximum suppression\n score_field (str)\n \"\"\"\n if nms_thresh <= 0:\n return boxlist\n if boxlist.has('pred_boxes'):\n boxes = boxlist.pred_boxes.tensor\n labels = boxlist.pred_classes\n else:\n boxes = boxlist.proposal_boxes.tensor\n labels = boxlist.proposal_boxes.tensor.new_zeros(\n len(boxlist.proposal_boxes.tensor))\n scores = boxlist.scores\n \n keep = batched_nms(boxes, scores, labels, nms_thresh)\n if max_proposals > 0:\n keep = keep[: max_proposals]\n boxlist = boxlist[keep]\n return boxlist"
},
{
"identifier": "debug_train",
"path": "data_generation/grit/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/debug.py",
"snippet": "def debug_train(\n images, gt_instances, flattened_hms, reg_targets, labels, pos_inds,\n shapes_per_level, locations, strides):\n '''\n images: N x 3 x H x W\n flattened_hms: LNHiWi x C\n shapes_per_level: L x 2 [(H_i, W_i)]\n locations: LNHiWi x 2\n '''\n reg_inds = torch.nonzero(\n reg_targets.max(dim=1)[0] > 0).squeeze(1)\n N = len(images)\n images = _imagelist_to_tensor(images)\n repeated_locations = [torch.cat([loc] * N, dim=0) \\\n for loc in locations]\n locations = torch.cat(repeated_locations, dim=0)\n gt_hms = _decompose_level(flattened_hms, shapes_per_level, N)\n masks = flattened_hms.new_zeros((flattened_hms.shape[0], 1))\n masks[pos_inds] = 1\n masks = _decompose_level(masks, shapes_per_level, N)\n for i in range(len(images)):\n image = images[i].detach().cpu().numpy().transpose(1, 2, 0)\n color_maps = []\n for l in range(len(gt_hms)):\n color_map = _get_color_image(\n gt_hms[l][i].detach().cpu().numpy())\n color_maps.append(color_map)\n cv2.imshow('gthm_{}'.format(l), color_map)\n blend = _blend_image_heatmaps(image.copy(), color_maps)\n if gt_instances is not None:\n bboxes = gt_instances[i].gt_boxes.tensor\n for j in range(len(bboxes)):\n bbox = bboxes[j]\n cv2.rectangle(\n blend, \n (int(bbox[0]), int(bbox[1])),\n (int(bbox[2]), int(bbox[3])),\n (0, 0, 255), 3, cv2.LINE_AA)\n \n for j in range(len(pos_inds)):\n image_id, l = _ind2il(pos_inds[j], shapes_per_level, N)\n if image_id != i:\n continue\n loc = locations[pos_inds[j]]\n cv2.drawMarker(\n blend, (int(loc[0]), int(loc[1])), (0, 255, 255),\n markerSize=(l + 1) * 16)\n \n for j in range(len(reg_inds)):\n image_id, l = _ind2il(reg_inds[j], shapes_per_level, N)\n if image_id != i:\n continue\n ltrb = reg_targets[reg_inds[j]]\n ltrb *= strides[l]\n loc = locations[reg_inds[j]]\n bbox = [(loc[0] - ltrb[0]), (loc[1] - ltrb[1]),\n (loc[0] + ltrb[2]), (loc[1] + ltrb[3])]\n cv2.rectangle(\n blend, \n (int(bbox[0]), int(bbox[1])),\n (int(bbox[2]), int(bbox[3])),\n (255, 0, 0), 1, cv2.LINE_AA) \n cv2.circle(blend, (int(loc[0]), int(loc[1])), 2, (255, 0, 0), -1)\n\n cv2.imshow('blend', blend)\n cv2.waitKey()"
},
{
"identifier": "debug_test",
"path": "data_generation/grit/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/debug.py",
"snippet": "def debug_test(\n images, logits_pred, reg_pred, agn_hm_pred=[], preds=[], \n vis_thresh=0.3, debug_show_name=False, mult_agn=False):\n '''\n images: N x 3 x H x W\n class_target: LNHiWi x C\n cat_agn_heatmap: LNHiWi\n shapes_per_level: L x 2 [(H_i, W_i)]\n '''\n N = len(images)\n for i in range(len(images)):\n image = images[i].detach().cpu().numpy().transpose(1, 2, 0)\n result = image.copy().astype(np.uint8)\n pred_image = image.copy().astype(np.uint8)\n color_maps = []\n L = len(logits_pred)\n for l in range(L):\n if logits_pred[0] is not None:\n stride = min(image.shape[0], image.shape[1]) / min(\n logits_pred[l][i].shape[1], logits_pred[l][i].shape[2])\n else:\n stride = min(image.shape[0], image.shape[1]) / min(\n agn_hm_pred[l][i].shape[1], agn_hm_pred[l][i].shape[2])\n stride = stride if stride < 60 else 64 if stride < 100 else 128\n if logits_pred[0] is not None:\n if mult_agn:\n logits_pred[l][i] = logits_pred[l][i] * agn_hm_pred[l][i]\n color_map = _get_color_image(\n logits_pred[l][i].detach().cpu().numpy())\n color_maps.append(color_map)\n cv2.imshow('predhm_{}'.format(l), color_map)\n\n if debug_show_name:\n from detectron2.data.datasets.lvis_v1_categories import LVIS_CATEGORIES \n cat2name = [x['name'] for x in LVIS_CATEGORIES]\n for j in range(len(preds[i].scores) if preds is not None else 0):\n if preds[i].scores[j] > vis_thresh:\n bbox = preds[i].proposal_boxes[j] \\\n if preds[i].has('proposal_boxes') else \\\n preds[i].pred_boxes[j]\n bbox = bbox.tensor[0].detach().cpu().numpy().astype(np.int32)\n cat = int(preds[i].pred_classes[j]) \\\n if preds[i].has('pred_classes') else 0\n cl = COLORS[cat, 0, 0]\n cv2.rectangle(\n pred_image, (int(bbox[0]), int(bbox[1])), \n (int(bbox[2]), int(bbox[3])), \n (int(cl[0]), int(cl[1]), int(cl[2])), 2, cv2.LINE_AA)\n if debug_show_name:\n txt = '{}{:.1f}'.format(\n cat2name[cat] if cat > 0 else '', \n preds[i].scores[j])\n font = cv2.FONT_HERSHEY_SIMPLEX\n cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]\n cv2.rectangle(\n pred_image,\n (int(bbox[0]), int(bbox[1] - cat_size[1] - 2)),\n (int(bbox[0] + cat_size[0]), int(bbox[1] - 2)), \n (int(cl[0]), int(cl[1]), int(cl[2])), -1)\n cv2.putText(\n pred_image, txt, (int(bbox[0]), int(bbox[1] - 2)), \n font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA)\n\n\n if agn_hm_pred[l] is not None:\n agn_hm_ = agn_hm_pred[l][i, 0, :, :, None].detach().cpu().numpy()\n agn_hm_ = (agn_hm_ * np.array([255, 255, 255]).reshape(\n 1, 1, 3)).astype(np.uint8)\n cv2.imshow('agn_hm_{}'.format(l), agn_hm_)\n blend = _blend_image_heatmaps(image.copy(), color_maps)\n cv2.imshow('blend', blend)\n cv2.imshow('preds', pred_image)\n cv2.waitKey()"
},
{
"identifier": "reduce_sum",
"path": "data_generation/grit/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/utils.py",
"snippet": "def reduce_sum(tensor):\n world_size = get_world_size()\n if world_size < 2:\n return tensor\n tensor = tensor.clone()\n torch.distributed.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM)\n return tensor"
},
{
"identifier": "_transpose",
"path": "data_generation/grit/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/utils.py",
"snippet": "def _transpose(training_targets, num_loc_list):\n '''\n This function is used to transpose image first training targets to \n level first ones\n :return: level first training targets\n '''\n for im_i in range(len(training_targets)):\n training_targets[im_i] = torch.split(\n training_targets[im_i], num_loc_list, dim=0)\n\n targets_level_first = []\n for targets_per_level in zip(*training_targets):\n targets_level_first.append(\n torch.cat(targets_per_level, dim=0))\n return targets_level_first"
},
{
"identifier": "CenterNetHead",
"path": "data_generation/grit/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/centernet_head.py",
"snippet": "class CenterNetHead(nn.Module):\n @configurable\n def __init__(self, \n # input_shape: List[ShapeSpec],\n in_channels,\n num_levels,\n *,\n num_classes=80,\n with_agn_hm=False,\n only_proposal=False,\n norm='GN',\n num_cls_convs=4,\n num_box_convs=4,\n num_share_convs=0,\n use_deformable=False,\n prior_prob=0.01):\n super().__init__()\n self.num_classes = num_classes\n self.with_agn_hm = with_agn_hm\n self.only_proposal = only_proposal\n self.out_kernel = 3\n\n head_configs = {\n \"cls\": (num_cls_convs if not self.only_proposal else 0, \\\n use_deformable),\n \"bbox\": (num_box_convs, use_deformable),\n \"share\": (num_share_convs, use_deformable)}\n\n # in_channels = [s.channels for s in input_shape]\n # assert len(set(in_channels)) == 1, \\\n # \"Each level must have the same channel!\"\n # in_channels = in_channels[0]\n channels = {\n 'cls': in_channels,\n 'bbox': in_channels,\n 'share': in_channels,\n }\n for head in head_configs:\n tower = []\n num_convs, use_deformable = head_configs[head]\n channel = channels[head]\n for i in range(num_convs):\n if use_deformable and i == num_convs - 1:\n conv_func = DFConv2d\n else:\n conv_func = nn.Conv2d\n tower.append(conv_func(\n in_channels if i == 0 else channel,\n channel, \n kernel_size=3, stride=1,\n padding=1, bias=True\n ))\n if norm == 'GN' and channel % 32 != 0:\n tower.append(nn.GroupNorm(25, channel))\n elif norm != '':\n tower.append(get_norm(norm, channel))\n tower.append(nn.ReLU())\n self.add_module('{}_tower'.format(head),\n nn.Sequential(*tower))\n\n self.bbox_pred = nn.Conv2d(\n in_channels, 4, kernel_size=self.out_kernel,\n stride=1, padding=self.out_kernel // 2\n )\n\n self.scales = nn.ModuleList(\n [Scale(init_value=1.0) for _ in range(num_levels)])\n\n for modules in [\n self.cls_tower, self.bbox_tower,\n self.share_tower,\n self.bbox_pred,\n ]:\n for l in modules.modules():\n if isinstance(l, nn.Conv2d):\n torch.nn.init.normal_(l.weight, std=0.01)\n torch.nn.init.constant_(l.bias, 0)\n \n torch.nn.init.constant_(self.bbox_pred.bias, 8.)\n prior_prob = prior_prob\n bias_value = -math.log((1 - prior_prob) / prior_prob)\n\n if self.with_agn_hm:\n self.agn_hm = nn.Conv2d(\n in_channels, 1, kernel_size=self.out_kernel,\n stride=1, padding=self.out_kernel // 2\n )\n torch.nn.init.constant_(self.agn_hm.bias, bias_value)\n torch.nn.init.normal_(self.agn_hm.weight, std=0.01)\n\n if not self.only_proposal:\n cls_kernel_size = self.out_kernel\n self.cls_logits = nn.Conv2d(\n in_channels, self.num_classes,\n kernel_size=cls_kernel_size, \n stride=1,\n padding=cls_kernel_size // 2,\n )\n\n torch.nn.init.constant_(self.cls_logits.bias, bias_value)\n torch.nn.init.normal_(self.cls_logits.weight, std=0.01)\n\n @classmethod\n def from_config(cls, cfg, input_shape):\n ret = {\n # 'input_shape': input_shape,\n 'in_channels': [s.channels for s in input_shape][0],\n 'num_levels': len(input_shape),\n 'num_classes': cfg.MODEL.CENTERNET.NUM_CLASSES,\n 'with_agn_hm': cfg.MODEL.CENTERNET.WITH_AGN_HM,\n 'only_proposal': cfg.MODEL.CENTERNET.ONLY_PROPOSAL,\n 'norm': cfg.MODEL.CENTERNET.NORM,\n 'num_cls_convs': cfg.MODEL.CENTERNET.NUM_CLS_CONVS,\n 'num_box_convs': cfg.MODEL.CENTERNET.NUM_BOX_CONVS,\n 'num_share_convs': cfg.MODEL.CENTERNET.NUM_SHARE_CONVS,\n 'use_deformable': cfg.MODEL.CENTERNET.USE_DEFORMABLE,\n 'prior_prob': cfg.MODEL.CENTERNET.PRIOR_PROB,\n }\n return ret\n\n def forward(self, x):\n clss = []\n bbox_reg = []\n agn_hms = []\n for l, feature in enumerate(x):\n feature = self.share_tower(feature)\n cls_tower = self.cls_tower(feature)\n bbox_tower = self.bbox_tower(feature)\n if not self.only_proposal:\n clss.append(self.cls_logits(cls_tower))\n else:\n clss.append(None)\n\n if self.with_agn_hm:\n agn_hms.append(self.agn_hm(bbox_tower))\n else:\n agn_hms.append(None)\n reg = self.bbox_pred(bbox_tower)\n reg = self.scales[l](reg)\n bbox_reg.append(F.relu(reg))\n \n return clss, bbox_reg, agn_hms"
}
] | import math
import json
import copy
import numpy as np
import torch
from typing import List, Dict
from torch import nn
from torch.nn import functional as F
from detectron2.modeling.proposal_generator.build import PROPOSAL_GENERATOR_REGISTRY
from detectron2.layers import ShapeSpec, cat
from detectron2.structures import Instances, Boxes
from detectron2.modeling import detector_postprocess
from detectron2.utils.comm import get_world_size
from detectron2.config import configurable
from ..layers.heatmap_focal_loss import heatmap_focal_loss_jit
from ..layers.heatmap_focal_loss import binary_heatmap_focal_loss
from ..layers.iou_loss import IOULoss
from ..layers.ml_nms import ml_nms
from ..debug import debug_train, debug_test
from .utils import reduce_sum, _transpose
from .centernet_head import CenterNetHead | 7,617 | 'score_thresh': cfg.MODEL.CENTERNET.INFERENCE_TH,
'loc_loss_type': cfg.MODEL.CENTERNET.LOC_LOSS_TYPE,
'hm_min_overlap': cfg.MODEL.CENTERNET.HM_MIN_OVERLAP,
'min_radius': cfg.MODEL.CENTERNET.MIN_RADIUS,
'hm_focal_alpha': cfg.MODEL.CENTERNET.HM_FOCAL_ALPHA,
'hm_focal_beta': cfg.MODEL.CENTERNET.HM_FOCAL_BETA,
'loss_gamma': cfg.MODEL.CENTERNET.LOSS_GAMMA,
'reg_weight': cfg.MODEL.CENTERNET.REG_WEIGHT,
'not_norm_reg': cfg.MODEL.CENTERNET.NOT_NORM_REG,
'with_agn_hm': cfg.MODEL.CENTERNET.WITH_AGN_HM,
'only_proposal': cfg.MODEL.CENTERNET.ONLY_PROPOSAL,
'as_proposal': cfg.MODEL.CENTERNET.AS_PROPOSAL,
'not_nms': cfg.MODEL.CENTERNET.NOT_NMS,
'pos_weight': cfg.MODEL.CENTERNET.POS_WEIGHT,
'neg_weight': cfg.MODEL.CENTERNET.NEG_WEIGHT,
'sigmoid_clamp': cfg.MODEL.CENTERNET.SIGMOID_CLAMP,
'ignore_high_fp': cfg.MODEL.CENTERNET.IGNORE_HIGH_FP,
'center_nms': cfg.MODEL.CENTERNET.CENTER_NMS,
'sizes_of_interest': cfg.MODEL.CENTERNET.SOI,
'more_pos': cfg.MODEL.CENTERNET.MORE_POS,
'more_pos_thresh': cfg.MODEL.CENTERNET.MORE_POS_THRESH,
'more_pos_topk': cfg.MODEL.CENTERNET.MORE_POS_TOPK,
'pre_nms_topk_train': cfg.MODEL.CENTERNET.PRE_NMS_TOPK_TRAIN,
'pre_nms_topk_test': cfg.MODEL.CENTERNET.PRE_NMS_TOPK_TEST,
'post_nms_topk_train': cfg.MODEL.CENTERNET.POST_NMS_TOPK_TRAIN,
'post_nms_topk_test': cfg.MODEL.CENTERNET.POST_NMS_TOPK_TEST,
'nms_thresh_train': cfg.MODEL.CENTERNET.NMS_TH_TRAIN,
'nms_thresh_test': cfg.MODEL.CENTERNET.NMS_TH_TEST,
'no_reduce': cfg.MODEL.CENTERNET.NO_REDUCE,
'debug': cfg.DEBUG,
'vis_thresh': cfg.VIS_THRESH,
'pixel_mean': cfg.MODEL.PIXEL_MEAN,
'pixel_std': cfg.MODEL.PIXEL_STD,
'device': cfg.MODEL.DEVICE,
'centernet_head': CenterNetHead(
cfg, [input_shape[f] for f in cfg.MODEL.CENTERNET.IN_FEATURES]),
}
return ret
def forward(self, images, features_dict, gt_instances):
features = [features_dict[f] for f in self.in_features]
clss_per_level, reg_pred_per_level, agn_hm_pred_per_level = \
self.centernet_head(features)
grids = self.compute_grids(features)
shapes_per_level = grids[0].new_tensor(
[(x.shape[2], x.shape[3]) for x in reg_pred_per_level])
if not self.training:
return self.inference(
images, clss_per_level, reg_pred_per_level,
agn_hm_pred_per_level, grids)
else:
pos_inds, labels, reg_targets, flattened_hms = \
self._get_ground_truth(
grids, shapes_per_level, gt_instances)
# logits_pred: M x F, reg_pred: M x 4, agn_hm_pred: M
logits_pred, reg_pred, agn_hm_pred = self._flatten_outputs(
clss_per_level, reg_pred_per_level, agn_hm_pred_per_level)
if self.more_pos:
# add more pixels as positive if \
# 1. they are within the center3x3 region of an object
# 2. their regression losses are small (<self.more_pos_thresh)
pos_inds, labels = self._add_more_pos(
reg_pred, gt_instances, shapes_per_level)
losses = self.losses(
pos_inds, labels, reg_targets, flattened_hms,
logits_pred, reg_pred, agn_hm_pred)
proposals = None
if self.only_proposal:
agn_hm_pred_per_level = [x.sigmoid() for x in agn_hm_pred_per_level]
proposals = self.predict_instances(
grids, agn_hm_pred_per_level, reg_pred_per_level,
images.image_sizes, [None for _ in agn_hm_pred_per_level])
elif self.as_proposal: # category specific bbox as agnostic proposals
clss_per_level = [x.sigmoid() for x in clss_per_level]
proposals = self.predict_instances(
grids, clss_per_level, reg_pred_per_level,
images.image_sizes, agn_hm_pred_per_level)
if self.only_proposal or self.as_proposal:
for p in range(len(proposals)):
proposals[p].proposal_boxes = proposals[p].get('pred_boxes')
proposals[p].objectness_logits = proposals[p].get('scores')
proposals[p].remove('pred_boxes')
proposals[p].remove('scores')
proposals[p].remove('pred_classes')
if self.debug:
debug_train(
[self.denormalizer(x) for x in images],
gt_instances, flattened_hms, reg_targets,
labels, pos_inds, shapes_per_level, grids, self.strides)
return proposals, losses
def losses(
self, pos_inds, labels, reg_targets, flattened_hms,
logits_pred, reg_pred, agn_hm_pred):
'''
Inputs:
pos_inds: N
labels: N
reg_targets: M x 4
flattened_hms: M x C
logits_pred: M x C
reg_pred: M x 4
agn_hm_pred: M x 1 or None
N: number of positive locations in all images
M: number of pixels from all FPN levels
C: number of classes
'''
assert (torch.isfinite(reg_pred).all().item())
num_pos_local = pos_inds.numel()
num_gpus = get_world_size()
if self.no_reduce:
total_num_pos = num_pos_local * num_gpus
else:
|
__all__ = ["CenterNet"]
INF = 100000000
@PROPOSAL_GENERATOR_REGISTRY.register()
class CenterNet(nn.Module):
@configurable
def __init__(self,
# input_shape: Dict[str, ShapeSpec],
in_channels=256,
*,
num_classes=80,
in_features=("p3", "p4", "p5", "p6", "p7"),
strides=(8, 16, 32, 64, 128),
score_thresh=0.05,
hm_min_overlap=0.8,
loc_loss_type='giou',
min_radius=4,
hm_focal_alpha=0.25,
hm_focal_beta=4,
loss_gamma=2.0,
reg_weight=2.0,
not_norm_reg=True,
with_agn_hm=False,
only_proposal=False,
as_proposal=False,
not_nms=False,
pos_weight=1.,
neg_weight=1.,
sigmoid_clamp=1e-4,
ignore_high_fp=-1.,
center_nms=False,
sizes_of_interest=[[0,80],[64,160],[128,320],[256,640],[512,10000000]],
more_pos=False,
more_pos_thresh=0.2,
more_pos_topk=9,
pre_nms_topk_train=1000,
pre_nms_topk_test=1000,
post_nms_topk_train=100,
post_nms_topk_test=100,
nms_thresh_train=0.6,
nms_thresh_test=0.6,
no_reduce=False,
debug=False,
vis_thresh=0.5,
pixel_mean=[103.530,116.280,123.675],
pixel_std=[1.0,1.0,1.0],
device='cuda',
centernet_head=None,
):
super().__init__()
self.num_classes = num_classes
self.in_features = in_features
self.strides = strides
self.score_thresh = score_thresh
self.min_radius = min_radius
self.hm_focal_alpha = hm_focal_alpha
self.hm_focal_beta = hm_focal_beta
self.loss_gamma = loss_gamma
self.reg_weight = reg_weight
self.not_norm_reg = not_norm_reg
self.with_agn_hm = with_agn_hm
self.only_proposal = only_proposal
self.as_proposal = as_proposal
self.not_nms = not_nms
self.pos_weight = pos_weight
self.neg_weight = neg_weight
self.sigmoid_clamp = sigmoid_clamp
self.ignore_high_fp = ignore_high_fp
self.center_nms = center_nms
self.sizes_of_interest = sizes_of_interest
self.more_pos = more_pos
self.more_pos_thresh = more_pos_thresh
self.more_pos_topk = more_pos_topk
self.pre_nms_topk_train = pre_nms_topk_train
self.pre_nms_topk_test = pre_nms_topk_test
self.post_nms_topk_train = post_nms_topk_train
self.post_nms_topk_test = post_nms_topk_test
self.nms_thresh_train = nms_thresh_train
self.nms_thresh_test = nms_thresh_test
self.no_reduce = no_reduce
self.debug = debug
self.vis_thresh = vis_thresh
if self.center_nms:
self.not_nms = True
self.iou_loss = IOULoss(loc_loss_type)
assert (not self.only_proposal) or self.with_agn_hm
# delta for rendering heatmap
self.delta = (1 - hm_min_overlap) / (1 + hm_min_overlap)
if centernet_head is None:
self.centernet_head = CenterNetHead(
in_channels=in_channels,
num_levels=len(in_features),
with_agn_hm=with_agn_hm,
only_proposal=only_proposal)
else:
self.centernet_head = centernet_head
if self.debug:
pixel_mean = torch.Tensor(pixel_mean).to(
torch.device(device)).view(3, 1, 1)
pixel_std = torch.Tensor(pixel_std).to(
torch.device(device)).view(3, 1, 1)
self.denormalizer = lambda x: x * pixel_std + pixel_mean
@classmethod
def from_config(cls, cfg, input_shape):
ret = {
# 'input_shape': input_shape,
'in_channels': input_shape[
cfg.MODEL.CENTERNET.IN_FEATURES[0]].channels,
'num_classes': cfg.MODEL.CENTERNET.NUM_CLASSES,
'in_features': cfg.MODEL.CENTERNET.IN_FEATURES,
'strides': cfg.MODEL.CENTERNET.FPN_STRIDES,
'score_thresh': cfg.MODEL.CENTERNET.INFERENCE_TH,
'loc_loss_type': cfg.MODEL.CENTERNET.LOC_LOSS_TYPE,
'hm_min_overlap': cfg.MODEL.CENTERNET.HM_MIN_OVERLAP,
'min_radius': cfg.MODEL.CENTERNET.MIN_RADIUS,
'hm_focal_alpha': cfg.MODEL.CENTERNET.HM_FOCAL_ALPHA,
'hm_focal_beta': cfg.MODEL.CENTERNET.HM_FOCAL_BETA,
'loss_gamma': cfg.MODEL.CENTERNET.LOSS_GAMMA,
'reg_weight': cfg.MODEL.CENTERNET.REG_WEIGHT,
'not_norm_reg': cfg.MODEL.CENTERNET.NOT_NORM_REG,
'with_agn_hm': cfg.MODEL.CENTERNET.WITH_AGN_HM,
'only_proposal': cfg.MODEL.CENTERNET.ONLY_PROPOSAL,
'as_proposal': cfg.MODEL.CENTERNET.AS_PROPOSAL,
'not_nms': cfg.MODEL.CENTERNET.NOT_NMS,
'pos_weight': cfg.MODEL.CENTERNET.POS_WEIGHT,
'neg_weight': cfg.MODEL.CENTERNET.NEG_WEIGHT,
'sigmoid_clamp': cfg.MODEL.CENTERNET.SIGMOID_CLAMP,
'ignore_high_fp': cfg.MODEL.CENTERNET.IGNORE_HIGH_FP,
'center_nms': cfg.MODEL.CENTERNET.CENTER_NMS,
'sizes_of_interest': cfg.MODEL.CENTERNET.SOI,
'more_pos': cfg.MODEL.CENTERNET.MORE_POS,
'more_pos_thresh': cfg.MODEL.CENTERNET.MORE_POS_THRESH,
'more_pos_topk': cfg.MODEL.CENTERNET.MORE_POS_TOPK,
'pre_nms_topk_train': cfg.MODEL.CENTERNET.PRE_NMS_TOPK_TRAIN,
'pre_nms_topk_test': cfg.MODEL.CENTERNET.PRE_NMS_TOPK_TEST,
'post_nms_topk_train': cfg.MODEL.CENTERNET.POST_NMS_TOPK_TRAIN,
'post_nms_topk_test': cfg.MODEL.CENTERNET.POST_NMS_TOPK_TEST,
'nms_thresh_train': cfg.MODEL.CENTERNET.NMS_TH_TRAIN,
'nms_thresh_test': cfg.MODEL.CENTERNET.NMS_TH_TEST,
'no_reduce': cfg.MODEL.CENTERNET.NO_REDUCE,
'debug': cfg.DEBUG,
'vis_thresh': cfg.VIS_THRESH,
'pixel_mean': cfg.MODEL.PIXEL_MEAN,
'pixel_std': cfg.MODEL.PIXEL_STD,
'device': cfg.MODEL.DEVICE,
'centernet_head': CenterNetHead(
cfg, [input_shape[f] for f in cfg.MODEL.CENTERNET.IN_FEATURES]),
}
return ret
def forward(self, images, features_dict, gt_instances):
features = [features_dict[f] for f in self.in_features]
clss_per_level, reg_pred_per_level, agn_hm_pred_per_level = \
self.centernet_head(features)
grids = self.compute_grids(features)
shapes_per_level = grids[0].new_tensor(
[(x.shape[2], x.shape[3]) for x in reg_pred_per_level])
if not self.training:
return self.inference(
images, clss_per_level, reg_pred_per_level,
agn_hm_pred_per_level, grids)
else:
pos_inds, labels, reg_targets, flattened_hms = \
self._get_ground_truth(
grids, shapes_per_level, gt_instances)
# logits_pred: M x F, reg_pred: M x 4, agn_hm_pred: M
logits_pred, reg_pred, agn_hm_pred = self._flatten_outputs(
clss_per_level, reg_pred_per_level, agn_hm_pred_per_level)
if self.more_pos:
# add more pixels as positive if \
# 1. they are within the center3x3 region of an object
# 2. their regression losses are small (<self.more_pos_thresh)
pos_inds, labels = self._add_more_pos(
reg_pred, gt_instances, shapes_per_level)
losses = self.losses(
pos_inds, labels, reg_targets, flattened_hms,
logits_pred, reg_pred, agn_hm_pred)
proposals = None
if self.only_proposal:
agn_hm_pred_per_level = [x.sigmoid() for x in agn_hm_pred_per_level]
proposals = self.predict_instances(
grids, agn_hm_pred_per_level, reg_pred_per_level,
images.image_sizes, [None for _ in agn_hm_pred_per_level])
elif self.as_proposal: # category specific bbox as agnostic proposals
clss_per_level = [x.sigmoid() for x in clss_per_level]
proposals = self.predict_instances(
grids, clss_per_level, reg_pred_per_level,
images.image_sizes, agn_hm_pred_per_level)
if self.only_proposal or self.as_proposal:
for p in range(len(proposals)):
proposals[p].proposal_boxes = proposals[p].get('pred_boxes')
proposals[p].objectness_logits = proposals[p].get('scores')
proposals[p].remove('pred_boxes')
proposals[p].remove('scores')
proposals[p].remove('pred_classes')
if self.debug:
debug_train(
[self.denormalizer(x) for x in images],
gt_instances, flattened_hms, reg_targets,
labels, pos_inds, shapes_per_level, grids, self.strides)
return proposals, losses
def losses(
self, pos_inds, labels, reg_targets, flattened_hms,
logits_pred, reg_pred, agn_hm_pred):
'''
Inputs:
pos_inds: N
labels: N
reg_targets: M x 4
flattened_hms: M x C
logits_pred: M x C
reg_pred: M x 4
agn_hm_pred: M x 1 or None
N: number of positive locations in all images
M: number of pixels from all FPN levels
C: number of classes
'''
assert (torch.isfinite(reg_pred).all().item())
num_pos_local = pos_inds.numel()
num_gpus = get_world_size()
if self.no_reduce:
total_num_pos = num_pos_local * num_gpus
else: | total_num_pos = reduce_sum( | 6 | 2023-11-09 14:31:48+00:00 | 12k |
OpenBMB/ProAgent | main.py | [
{
"identifier": "mock_function_call_list",
"path": "mock_agent.py",
"snippet": ""
},
{
"identifier": "logger",
"path": "ProAgent/loggers/logs.py",
"snippet": "class JsonFileHandler(logging.FileHandler):\nclass JsonFormatter(logging.Formatter):\nclass Logger(metaclass=Singleton):\nclass TypingConsoleHandler(logging.StreamHandler):\nclass ConsoleHandler(logging.StreamHandler):\nclass AutoGptFormatter(logging.Formatter):\n def __init__(self, filename, mode=\"a\", encoding=None, delay=False):\n def emit(self, record):\n def format(self, record):\n def __init__(self):\n def typewriter_log(\n self, title=\"\", title_color=\"\", content=\"\", speak_text=False, level=logging.INFO\n ):\n def debug(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n def info(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n def warn(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n def error(self, title, message=\"\"):\n def _log(\n self,\n title: str = \"\",\n title_color: str = \"\",\n message: str = \"\",\n level=logging.INFO,\n ):\n def set_level(self, level):\n def double_check(self, additionalText=None):\n def log_json(self, data: Any, file_name: str) -> None:\n def get_log_directory(self):\n def emit(self, record):\n def emit(self, record) -> None:\n def format(self, record: LogRecord) -> str:\ndef remove_color_codes(s: str) -> str:\ndef print_action_base(action: Action):\ndef print_action_tool(action: Action):"
},
{
"identifier": "Compiler",
"path": "ProAgent/n8n_parser/compiler.py",
"snippet": "class Compiler():\n \"\"\"和nodes.json交互,同时存储目前所有的数据结构\n \"\"\"\n\n\n def __init__(self, cfg: omegaconf.DictConfig, recorder: RunningRecoder):\n \"\"\"\n Initializes the class with the given configuration and recorder.\n\n Parameters:\n cfg (omegaconf.DictConfig): The configuration object.\n recorder (RunningRecoder): The recorder object.\n\n Returns:\n None\n \"\"\"\n self.cfg = cfg\n self.recorder = recorder\n\n self.nodes: List[n8nPythonNode] = []\n self.trigger_id = 0\n self.action_id = 0\n self.workflows: Dict[n8nPythonWorkflow] = {}\n self.mainWorkflow: n8nPythonWorkflow = n8nPythonWorkflow(\n implement_code = mainWorkflow_code\n )\n self.resolve()\n\n self.code_runner = n8nPythonCodeRunner()\n self.code_runner.flash( \n main_workflow = self.mainWorkflow,\n workflows=self.workflows,\n nodes = self.nodes\n )\n self.update_runtime()\n\n\n def resolve_integration(self, integration_json):\n \"\"\"\n Generates a function comment for the given function body.\n \n Args:\n integration_json (dict): A dictionary containing information about the integration.\n \n Returns:\n dict: A dictionary containing the resolved integration data.\n \n Raises:\n AssertionError: If the target resource name is not found in the integration data.\n \"\"\"\n integration_name = integration_json[\"name\"].split(\".\")[-1]\n integration_data = {}\n no_resource = True\n no_operation = True\n for property in integration_json[\"properties\"]:\n if property[\"name\"] == \"resource\":\n for resource in property[\"options\"]:\n integration_data[resource[\"value\"]] = {}\n no_resource = False\n break\n\n if no_resource:\n integration_data[\"default\"] = {}\n \n\n for property in integration_json[\"properties\"]:\n if property[\"name\"] == \"operation\":\n target_resource_name = \"default\"\n if \"displayOptions\" in property.keys():\n assert \"show\" in property[\"displayOptions\"].keys() and \"resource\" in property[\"displayOptions\"][\"show\"].keys()\n assert len(property[\"displayOptions\"][\"show\"][\"resource\"]) == 1\n target_resource_name = property[\"displayOptions\"][\"show\"][\"resource\"][0]\n\n assert target_resource_name in integration_data.keys(), f\"{target_resource_name} in {integration_data.keys()}\"\n\n target_resource = integration_data[target_resource_name]\n for operation in property[\"options\"]:\n operation_name = operation[\"value\"]\n operation_description = \"\"\n if \"description\" in operation.keys():\n operation_description = operation[\"description\"]\n node_type = NodeType.trigger if \"trigger\" in integration_name.lower() or \"webhook\" in integration_name.lower() else NodeType.action\n target_resource[operation_name] = n8nNodeMeta(\n node_type=node_type,\n integration_name=integration_name,\n resource_name=target_resource_name,\n operation_name=operation_name,\n operation_description=operation_description\n )\n no_operation = False\n\n if no_operation:\n assert no_resource\n node_type = NodeType.trigger if \"trigger\" in integration_name.lower() or \"webhook\" in integration_name.lower() else NodeType.action\n integration_data[\"default\"][\"default\"] = n8nNodeMeta(\n node_type=node_type,\n integration_name=integration_name,\n resource_name=\"default\",\n operation_name=\"default\",\n operation_description=\"\"\n )\n\n return integration_data\n\n def print_flatten_tools(self):\n \"\"\"\n Generates a function comment for the given function body in a markdown code block with the correct language syntax.\n\n Returns:\n str: The function comment in markdown format.\n \"\"\"\n output_description_list = []\n for k1, integration_name in enumerate(list(self.flattened_tools.keys())):\n operation_counter = 1\n data = self.flattened_tools[integration_name][\"data\"]\n des = self.flattened_tools[integration_name][\"meta\"][\"description\"]\n if integration_name in CONFIG.default_knowledge.keys():\n print(colored(f\"{integration_name} knowledge is found!\", color='light_yellow'))\n des += CONFIG.default_knowledge[integration_name]\n\n output_description_list.append(f\"{k1+1}.integration={integration_name}: {des}\")\n for k2,resource in enumerate(list( data.keys())):\n for k3, operation in enumerate(list(data[resource].keys())):\n new_line = f\" {k1+1}.{operation_counter}: \" + data[resource][operation].to_action_string()\n operation_counter += 1\n output_description_list.append(new_line)\n \n return \"\\n\".join(output_description_list)\n\n\n\n def resolve(self):\n \"\"\"\n Resolves the data from the configuration file.\n\n This function reads the configuration file and resolves the data based on the provided white list and available integrations. It populates the `json_data` list with the integration JSON objects that match the white list. For each integration JSON, it calls the `resolve_integration` function to further resolve the integration data. It also creates a flattened representation of the tools and their metadata in the `flattened_tools` dictionary. If a tool is marked as a pseudoNode, it prints a message indicating that it is being loaded. Finally, it calls the `print_flatten_tools` function and returns the output.\n\n Parameters:\n None\n\n Returns:\n None\n \"\"\"\n self.json_data = []\n self.flattened_tools = {}\n white_list = self.cfg.parser.nodes_whtie_list\n available_integrations = [item.split(\".\")[0] for item in self.cfg.parser.nodes_whtie_list]\n with open(self.cfg.parser.nodes_json_path, \"r\", encoding=\"utf-8\") as reader:\n integrations = json.load(reader)\n for integration_json in integrations:\n name = integration_json[\"name\"].split(\".\")[-1]\n if name not in available_integrations:\n continue\n self.json_data.append(integration_json)\n integration_data = self.resolve_integration(integration_json=integration_json)\n index = available_integrations.index(name)\n full_tool = white_list[index]\n splits = full_tool.split(\".\")\n if len(splits) > 1:\n for key in list(integration_data.keys()):\n if key != splits[1]:\n integration_data.pop(key)\n if len(splits) == 3:\n for action in list(integration_data[splits[1]].keys()):\n if action != splits[2]:\n integration_data[splits[1]].pop(action)\n\n integration_description = integration_json[\"description\"] if \"description\" in integration_json.keys() else \"\"\n self.flattened_tools[name] = {\n \"data\": integration_data,\n \"meta\": {\n \"description\": integration_description,\n \"node_json\": integration_json,\n },\n \"pseudoNode\": integration_json['pseudoNode'] if \"pseudoNode\" in integration_json.keys() else False\n }\n if self.flattened_tools[name]['pseudoNode']:\n print(colored(f\"load pseudoNode {name}\", color='cyan'))\n out = self.print_flatten_tools()\n\n\n\n def update_runtime(self):\n \"\"\"\n Updates the runtime by flashing the code and running it.\n\n Parameters:\n self (object): An instance of the class.\n \n Returns:\n None\n \"\"\"\n self.code_runner.flash( \n main_workflow = self.mainWorkflow,\n workflows=self.workflows,\n nodes = self.nodes\n )\n self.code_runner.run_code()\n\n def tool_call_handle(self, content:str, tool_name:str, tool_input:dict) -> Action:\n \"\"\"\n Handles a tool call by executing the specified tool with the given content, tool name, and tool input.\n\n Args:\n content (str): The content to be processed by the tool.\n tool_name (str): The name of the tool to be executed.\n tool_input (dict): The input parameters for the tool.\n\n Returns:\n Action: An Action object representing the result of the tool call.\n \"\"\"\n action = Action(\n content=content,\n tool_name=tool_name,\n )\n for react_key in [\"thought\",\"plan\",\"criticism\"]:\n if react_key in tool_input.keys():\n action.__setattr__(react_key, tool_input[react_key])\n tool_input.pop(react_key)\n\n action.tool_input = tool_input\n print_action_base(action)\n\n\n\n tool_status_code = ToolCallStatus.ToolCallSuccess\n tool_output = \"\"\n if tool_name == \"function_define\":\n tool_status_code, tool_output = self.handle_function_define(tool_input=tool_input)\n elif tool_name == \"function_rewrite_params\":\n tool_status_code, tool_output = self.handle_rewrite_params(tool_input=tool_input)\n elif tool_name == \"workflow_implment\":\n tool_status_code, tool_output = self.handle_workflow_implement(tool_input=tool_input)\n elif tool_name == \"ask_user_help\":\n tool_status_code, tool_output = self.ask_user_help(tool_input=tool_input)\n elif tool_name == \"task_submit\":\n tool_status_code, tool_output = self.task_submit(tool_input=tool_input)\n else:\n tool_status_code = ToolCallStatus.NoSuchTool\n tool_output = json.dumps({\"error\": f\"No such action {tool_name}\", \"result\": \"Nothing Happened\", \"status\": tool_status_code.name}, ensure_ascii=False)\n\n action.tool_output = tool_output\n action.tool_output_status = tool_status_code\n\n print_action_tool(action)\n\n if CONFIG.environment == ENVIRONMENT.Production:\n if self.recorder.is_final_cache():\n self.update_runtime()\n pass\n else:\n if tool_status_code == ToolCallStatus.ToolCallSuccess:\n self.update_runtime()\n\n self.recorder.regist_tool_call(\n action=action,\n now_code=self.code_runner.print_code()\n )\n \n return action\n\n def handle_workflow_implement(self, tool_input) -> (ToolCallStatus, str):\n \"\"\"\n Handles the implementation of a workflow.\n\n Parameters:\n tool_input (dict): A dictionary containing the tool input. It should have the following keys:\n - \"workflow_name\" (str): The name of the workflow to implement.\n - \"code\" (str): The code for the implementation.\n\n Returns:\n (ToolCallStatus, str): A tuple containing the tool call status and a JSON string.\n - ToolCallStatus (enum): The tool call status, indicating the success or failure of the operation.\n - str: A JSON string containing the result of the operation.\n\n Raises:\n None\n \"\"\"\n workflow_name = tool_input[\"workflow_name\"]\n implement_code = tool_input[\"code\"]\n\n if workflow_name == \"mainWorkflow\":\n self.mainWorkflow.implement_code = implement_code\n return ToolCallStatus.ToolCallSuccess, json.dumps({\"result\": \"mainWorkflow has been re-implemented\",\"status\": ToolCallStatus.ToolCallSuccess.name})\n else:\n if workflow_name in self.workflows.keys():\n self.workflows[workflow_name].implement_code = implement_code\n return ToolCallStatus.ToolCallSuccess, json.dumps({\"result\": f\"{workflow_name} has been re-implemented\",\"status\": ToolCallStatus.ToolCallSuccess.name})\n else:\n self.workflows[workflow_name] = n8nPythonWorkflow(\n workflow_name=workflow_name,\n workflow_type=WorkflowType.Sub,\n implement_code=implement_code\n )\n return ToolCallStatus.ToolCallSuccess, json.dumps({\"result\": f\"{workflow_name} has been added\",\"status\": ToolCallStatus.ToolCallSuccess.name})\n\n\n def handle_function_test(self, tool_input) -> (ToolCallStatus, str):\n \"\"\"\n Handles the function test.\n\n Args:\n self: The object instance.\n tool_input: The input data for the function test.\n\n Returns:\n A tuple containing the tool call status and a string.\n\n Raises:\n NotImplementedError: If the 'use_mock_input' flag is set to True.\n\n Comment:\n - Performs runtime format check for the input data.\n - Expects the input data to be a non-empty list of JSON objects.\n - Each item in the list should be a dictionary with the key 'json'.\n\n \"\"\"\n function_name = tool_input[\"target_function_name\"]\n use_mock_input = tool_input[\"use_mock_input\"]\n if use_mock_input:\n raise NotImplementedError\n else:\n input_data = tool_input[\"input_data\"]\n \n if type(input_data) != [] or len(input_data) == 0:\n output_status = ToolCallStatus.InputTypeError\n return output_status, json.dumps({\"error\": f\"Input must be a list of json(len>0), got {input_data}\", \"result\":\"Nothing Happened\", \"status\": output_status.name})\n for k, cont in enumerate(input_data):\n if type(cont) != dict or \"json\" not in cont.keys():\n output_status = ToolCallStatus.InputTypeError\n return output_status, json.dumps({\"error\": f\"Error of item {k}: all the items in the list must be a dict with key \\\"json\\\", got {cont}\", \"result\":\"Nothing Happened\", \"status\": output_status.name})\n\n\n def handle_rewrite_params(self, tool_input) -> (ToolCallStatus, str):\n \"\"\"\n Handle the rewriting of parameters for a given tool input.\n \n Args:\n tool_input (dict): The input data for the tool.\n \n Returns:\n tuple: A tuple containing the ToolCallStatus enum value and a string.\n - The ToolCallStatus indicates the status of the tool call.\n - The string contains the output of the tool call.\n \"\"\"\n function_name = tool_input[\"function_name\"]\n available_names = [node.get_name() for node in self.nodes]\n if function_name not in available_names:\n output_status = ToolCallStatus.NoSuchFunction\n return output_status, json.dumps({\"ERROR\": f\"Undefined Function {function_name}. Available functions = {available_names}.\", \"result\": \"Nothing happened.\", \"status\": output_status.name})\n \n for node in self.nodes:\n if node.get_name() == function_name:\n try:\n params = json.loads(tool_input[\"params\"], strict = False)\n except:\n output_status = ToolCallStatus.InputCannotParsed\n return output_status, json.dumps({\"ERROR\": f\"\\\"params\\\" field can't be parsed to json.\", \"result\": \"Nothing Happened\", \"status\": output_status.name})\n\n param_rewrite_status, output_str = node.parse_parameters(params)\n if param_rewrite_status != ToolCallStatus.ToolCallSuccess:\n return param_rewrite_status, output_str\n\n node.note_todo = tool_input[\"TODO\"]\n node.node_comments = tool_input[\"comments\"]\n return param_rewrite_status, output_str\n assert False\n\n \n\n def handle_function_define(self, tool_input) -> (ToolCallStatus, str):\n \"\"\"\n Handles the definition of a function.\n\n Args:\n tool_input (dict): The input data for the function definition.\n\n Returns:\n Tuple[ToolCallStatus, str]: A tuple containing the tool call status and the tool call result.\n\n Raises:\n AssertionError: If the \"functions\" key is not present in `tool_input`.\n \"\"\"\n assert \"functions\" in tool_input.keys()\n tool_call_status = []\n tool_call_result = []\n for k, transparent_function in enumerate(tool_input[\"functions\"]):\n integration_name = transparent_function[\"integration_name\"]\n resource_name = transparent_function[\"resource_name\"]\n operation_name = transparent_function[\"operation_name\"]\n comments = transparent_function[\"comments\"].strip()\n TODO = transparent_function[\"TODO\"]\n\n\n\n if integration_name not in self.flattened_tools.keys():\n tool_call_status.append(ToolCallStatus.NoSuchFunction)\n tool_call_result.append(f\"function {k} defined FAILED: not such integration {integration_name}\")\n continue\n if resource_name not in self.flattened_tools[integration_name][\"data\"].keys():\n tool_call_status.append(ToolCallStatus.NoSuchFunction)\n tool_call_result.append(f\"function {k} defined FAILED: not such resource {integration_name}->{resource_name}\")\n continue\n if operation_name not in self.flattened_tools[integration_name][\"data\"][resource_name].keys():\n tool_call_status.append(ToolCallStatus.NoSuchFunction)\n tool_call_result.append(f\"function {k} defined FAILED: not such operation {integration_name}->{resource_name}->{operation_name}\")\n continue\n \n node_type = self.flattened_tools[integration_name][\"data\"][resource_name][operation_name].node_type\n if node_type == NodeType.action:\n node_id = self.action_id\n self.action_id += 1\n else:\n node_id = self.trigger_id\n self.trigger_id += 1\n new_node = n8nPythonNode(\n node_id= node_id,\n node_meta=deepcopy(self.flattened_tools[integration_name][\"data\"][resource_name][operation_name]),\n node_comments=comments,\n note_todo=TODO,\n node_json=self.flattened_tools[integration_name][\"meta\"][\"node_json\"],\n )\n new_node.params = parse_properties(new_node)\n new_node.update_implement_info()\n self.nodes.append(new_node)\n\n tool_call_status.append(ToolCallStatus.ToolCallSuccess)\n tool_call_result.append(f\"function_{k} defined SUCCESS: {integration_name}->{resource_name}->{operation_name}\")\n\n\n\n final_status = ToolCallStatus.NoSuchFunction\n if ToolCallStatus.ToolCallSuccess in tool_call_status:\n final_status = ToolCallStatus.ToolCallPartlySuccess\n if ToolCallStatus.NoSuchFunction not in tool_call_status:\n final_status = ToolCallStatus.ToolCallSuccess\n\n tool_call_result = {\n \"result\": tool_call_result,\n \"status\": final_status.name,\n }\n return final_status, json.dumps(tool_call_result)\n\n def ask_user_help(self, tool_input):\n \"\"\"\n Asks the user for help and returns the result and status of the tool call.\n\n Args:\n tool_input (Any): The input to be passed to the tool.\n\n Returns:\n Tuple[ToolCallStatus, str]: A tuple containing the final status of the tool call and the JSON-encoded tool call result.\n \"\"\"\n\n final_status = ToolCallStatus.ToolCallSuccess\n\n tool_call_result = {\n \"result\": \"\" if (CONFIG.environment == ENVIRONMENT.Production and not self.recorder.is_final_cache()) else input(),\n \"status\": final_status.name,\n }\n\n return final_status, json.dumps(tool_call_result)\n \n def task_submit(self, tool_input):\n \"\"\"\n Submits a task with the given tool input.\n\n Args:\n tool_input (dict): The input data for the tool.\n\n Returns:\n tuple: A tuple containing the final status of the tool call and the JSON-encoded result of the tool call.\n \"\"\"\n\n final_status = ToolCallStatus.ToolCallSuccess\n\n tool_call_result = {\n \"result\": \"successfully save to markdown\",\n \"status\": final_status.name,\n }\n\n self.recorder.save_markdown(tool_input['result'])\n\n return final_status, json.dumps(tool_call_result)"
},
{
"identifier": "ReACTHandler",
"path": "ProAgent/handler/ReACT.py",
"snippet": "class ReACTHandler():\n def __init__(self, cfg, query:userQuery, compiler: Compiler, recorder: RunningRecoder):\n \"\"\"\n Initializes a new instance of the class.\n\n Args:\n cfg (type): The cfg parameter description.\n query (userQuery): The query parameter description.\n compiler (Compiler): The compiler parameter description.\n recorder (RunningRecoder): The recorder parameter description.\n \n Attributes:\n messages (List[Dict]): The messages attribute description.\n actions (List[Action]): The actions attribute description.\n \"\"\"\n self.query = query\n self.refine_prompt = query.refine_prompt\n self.cfg = cfg\n self.compiler = compiler\n self.recorder = recorder\n self.messages: List[Dict] = []\n self.actions: List[Action] = []\n def run(self):\n \"\"\"\n Runs the main loop for the program.\n\n This function continuously executes a loop that performs the following steps:\n 1. Appends system prompts to the list of messages.\n 2. Replaces placeholders in a specific prompt and appends it to the list of messages.\n 3. Appends assistant messages and function outputs to the list of messages.\n 4. Prints highlighted code.\n 5. Replaces placeholders in the user prompt and appends it to the list of messages.\n 6. Retrieves intrinsic functions.\n 7. Parses messages using an OpenAIFunction agent.\n 8. Handles tool calls using the compiler.\n 9. Appends the parsed message and action to the list of messages and actions, respectively.\n\n This function does not have any parameters and does not return anything.\n\n Note: This function runs indefinitely until interrupted.\n \"\"\"\n while True:\n messages = []\n messages.append({\"role\":\"system\",\"content\": deepcopy(react_prompt.system_prompt_1)})\n messages.append({\"role\":\"system\",\"content\": deepcopy(react_prompt.system_prompt_2)})\n\n specific_prompt = deepcopy(react_prompt.system_prompt_3)\n specific_prompt = specific_prompt.replace(\"{{user_query}}\", self.query.print_self())\n specific_prompt = specific_prompt.replace(\"{{flatten_tools}}\", self.compiler.print_flatten_tools())\n messages.append({\"role\":\"system\",\"content\": specific_prompt})\n\n # cut some messages down, only allow for last_num messages\n last_num = 3\n for k, (assistant_message, parsed_action) in enumerate(zip(self.messages, self.actions)):\n if k < len(self.messages) - last_num:\n continue\n messages.append(assistant_message)\n messages.append({\n \"role\":\"function\",\n \"name\": parsed_action.tool_name,\n \"content\": parsed_action.tool_output,\n })\n \n\n user_prompt = deepcopy(react_prompt.user_prompt)\n\n refine_prompt = \"\"\n if len(self.refine_prompt) > 0:\n refine_prompt = f\"The user have some additional requirements to your work. Please refine your work based on the following requirements:\\n ```\\n{deepcopy(self.refine_prompt)}```\\n\"\n\n user_prompt = user_prompt.replace(\"{{refine_prompt}}\", refine_prompt)\n\n # print highlighted code\n highlighted_code = highlight_code(self.compiler.code_runner.print_clean_code(indent=4))\n user_prompt_colored = user_prompt.split(\"{{now_codes}}\")\n user_prompt_colored = highlighted_code.join(user_prompt_colored)\n logger.typewriter_log(user_prompt_colored)\n\n user_prompt = user_prompt.replace(\"{{now_codes}}\", self.compiler.code_runner.print_code())\n\n messages.append({\"role\":\"user\",\"content\": user_prompt})\n \n functions = get_intrinsic_functions()\n\n agent = OpenAIFunction()\n content, function_name, function_arguments, message = agent.parse(messages=messages,\n functions=functions,\n default_completion_kwargs=CONFIG.default_completion_kwargs,\n recorder=self.recorder)\n action = self.compiler.tool_call_handle(content, function_name, function_arguments)\n self.messages.append(message)\n self.actions.append(action)\n # exit()"
},
{
"identifier": "userQuery",
"path": "ProAgent/utils.py",
"snippet": "class userQuery():\n task: str\n additional_information: List[str] = field(default_factory= lambda : [])\n refine_prompt: str = field(default_factory= lambda : \"\")\n \n def print_self(self):\n lines = [self.task]\n for info in self.additional_information:\n lines.append(f\"- {info}\")\n return \"\\n\".join(lines)"
},
{
"identifier": "RunningRecoder",
"path": "ProAgent/running_recorder.py",
"snippet": "class RunningRecoder():\n def __init__(self, record_base_dir = \"./records\"):\n \"\"\"\n Initializes the object with the given record base directory.\n\n Parameters:\n record_base_dir (str): The base directory for the records. Defaults to \"./records\".\n\n Returns:\n None\n \"\"\"\n\n self.llm_record_cache = [] # Get cached records\n\n self.llm_interface_id = 0\n self.llm_server_cache = [] # Runtime records\n self.tool_call_id = 0\n self.tool_call_cache = []\n self.is_cached = True # Assume to be true at first\n self.newly_start = True\n\n now = int(round(time.time()*1000))\n strip = time.strftime('%Y_%m_%d_%H_%M_%S',time.localtime(now/1000))\n\n self.record_root_dir = os.path.join(record_base_dir,strip)\n os.makedirs(self.record_root_dir,exist_ok=True)\n\n print(colored(f\"Recorder Mode: {CONFIG.environment.name}\", color='yellow'))\n\n for subdir_name in [\"LLM_inout_pair\",\"tool_call_logs\"]:\n os.makedirs(os.path.join(self.record_root_dir,subdir_name),exist_ok=True)\n \n\n def save_meta(self):\n \"\"\"\n Saves the meta information of the record.\n\n This function writes the meta information of the record to a file in the\n record root directory. The meta information includes the tool call ID and\n the LLM inference ID.\n\n Parameters:\n None\n\n Returns:\n None\n \"\"\"\n with open(os.path.join(self.record_root_dir, \"meta.meta\"), \"w\", encoding=\"utf-8\") as writer:\n tool_call_log = {\n \"tool_call_id\": self.tool_call_id,\n \"llm_inference_id\": self.llm_interface_id,\n }\n json.dump(tool_call_log,writer,indent=2, ensure_ascii=False)\n\n def load_from_disk(self, record_dir: str, cfg):\n \"\"\"\n Load data from disk into memory cache.\n\n Args:\n record_dir (str): The directory path where the data is stored.\n cfg: The configuration object.\n\n Returns:\n None\n \"\"\"\n logger.typewriter_log(\n \"load from a disk record\",\n Fore.RED,\n record_dir,\n )\n self.newly_start = False\n for dir_name in os.listdir(record_dir):\n if dir_name == \"LLM_inout_pair\":\n inout_pair_list = os.listdir(os.path.join(record_dir,dir_name))\n inout_pair_list.sort()\n for file_name in inout_pair_list:\n with open(os.path.join(record_dir,dir_name,file_name), \"r\", encoding=\"utf-8\") as reader:\n llm_pair = json.load(reader)\n self.llm_record_cache.append(llm_pair)\n elif dir_name == \"meta.meta\":\n with open(os.path.join(record_dir, \"meta.meta\"), \"r\", encoding=\"utf-8\") as reader:\n tool_call_log = json.load(reader)\n \n \n def regist_llm_inout(self, base_kwargs, messages, functions, function_call, stop, other_args, output_data, uuid=\"\"):\n \"\"\"\n Registers the LLM input and output data for the specified function call. \n\n Args:\n base_kwargs (dict): The base keyword arguments for the function call.\n messages (list): The list of messages associated with the function call.\n functions (list): The list of functions called during the function call.\n function_call (str): The function call being registered.\n stop (bool): A flag indicating whether the function call should stop.\n other_args (list): The list of other arguments for the function call.\n output_data (Any): The output data for the function call.\n uuid (str, optional): The UUID associated with the function call. Defaults to \"\".\n\n Returns:\n None\n\n Raises:\n None\n \"\"\"\n with open(os.path.join(self.record_root_dir, \"LLM_inout_pair\", f\"{self.llm_interface_id:05d}.json\"), \"w\", encoding=\"utf-8\") as writer:\n llm_inout_record = {\n \"input\": {\n \"base_kwargs\": dump_common_things(base_kwargs),\n \"messages\":dump_common_things(messages),\n \"functions\":dump_common_things(functions),\n \"function_call\":dump_common_things(function_call),\n \"stop\":dump_common_things(stop),\n \"other_args\":dump_common_things(other_args),\n # 'uuid': dump_common_things(uuid)\n },\n \"output\": dump_common_things(output_data),\n \"llm_interface_id\": self.llm_interface_id,\n }\n json.dump(llm_inout_record,writer,indent=2, ensure_ascii=False)\n self.llm_server_cache.append(llm_inout_record)\n\n self.llm_interface_id += 1\n self.save_meta()\n\n\n def query_llm_inout(self, restrict_cache_query, base_kwargs, messages, functions, function_call, stop, other_args, uuid=\"\"):\n \"\"\"\n Query the LLM server for input and output data based on the given parameters.\n \n Parameters:\n - restrict_cache_query (bool): Whether to restrict the cache query.\n - base_kwargs (dict): A dictionary of base keyword arguments.\n - messages (list): A list of messages.\n - functions (list): A list of functions.\n - function_call (dict): A dictionary representing the function call.\n - stop (bool): Whether to stop the query.\n - other_args (dict): A dictionary of other arguments.\n - uuid (str): A string representing the UUID (optional).\n \n Returns:\n - object: The output data from the LLM server, or None if not found.\n \"\"\"\n\n \n if CONFIG.environment == ENVIRONMENT.Development or self.newly_start:\n self.is_cached = False\n return None\n elif CONFIG.environment == ENVIRONMENT.Refine:\n input_data = {\n \"base_kwargs\": dump_common_things(base_kwargs),\n \"messages\":dump_common_things(messages),\n \"functions\":dump_common_things(functions),\n \"function_call\":dump_common_things(function_call),\n \"stop\":dump_common_things(stop),\n \"other_args\":dump_common_things(other_args),\n }\n for cache in self.llm_record_cache:\n # compare user messages only\n input_data_user_messages = [item for item in input_data['messages'] if item['role'] == 'user']\n cache_data_user_messages = [item for item in cache[\"input\"]['messages'] if item['role'] == 'user']\n if input_data_user_messages == cache_data_user_messages:\n if restrict_cache_query and self.llm_interface_id != cache[\"llm_interface_id\"]:\n continue\n logger.typewriter_log(\n f\"get a llm_server response from Record {cache['llm_interface_id']}\",\n Fore.RED,\n )\n self.is_cached = True\n return cache[\"output\"]\n self.is_cached = False\n return None\n elif CONFIG.environment == ENVIRONMENT.Production:\n if self.llm_interface_id < len(self.llm_record_cache):\n logger.typewriter_log(\n \"get a llm_server response from Record\",\n Fore.RED,\n )\n self.is_cached = True\n return self.llm_record_cache[self.llm_interface_id]['output']\n else:\n self.is_cached = False\n return None\n else:\n self.is_cached = False\n return None\n \n\n def regist_tool_call(self, action: Action, now_code: str):\n \"\"\"\n Registers a tool call by saving the action and code to files.\n\n Args:\n action (Action): The action to be saved.\n now_code (str): The current code to be saved.\n\n Returns:\n None\n \"\"\"\n with open(os.path.join(self.record_root_dir, \"tool_call_logs\", f\"{self.tool_call_id:05d}_tool.json\"), \"w\", encoding=\"utf-8\") as writer:\n tool_call_log = action.to_json()\n json.dump(tool_call_log,writer,indent=2, ensure_ascii=False)\n with open(os.path.join(self.record_root_dir, \"tool_call_logs\", f\"{self.tool_call_id:05d}_code.py\"), \"w\", encoding=\"utf-8\") as writer:\n writer.write(now_code)\n\n self.tool_call_id += 1\n\n self.save_meta()\n\n def save_markdown(self, markdown):\n \"\"\"\n Save the given markdown content to a file.\n\n Parameters:\n markdown (str): The markdown content to be saved.\n\n Returns:\n None\n \"\"\"\n with open(os.path.join(self.record_root_dir, f\"README.md\"), \"w\", encoding=\"utf-8\") as writer:\n writer.write(markdown)\n \n def is_final_cache(self):\n \"\"\"\n Check if the current cache is the final cache.\n\n Returns:\n bool: True if the current cache is the final cache, False otherwise.\n \"\"\"\n return self.llm_interface_id + 1 >= len(self.llm_record_cache)"
}
] | import hydra
import omegaconf
import logging
import json
from colorama import Fore, Style
from mock_agent import mock_function_call_list
from ProAgent.loggers.logs import logger
from ProAgent.n8n_parser.compiler import Compiler
from ProAgent.handler.ReACT import ReACTHandler
from ProAgent.utils import userQuery
from ProAgent.running_recorder import RunningRecoder | 8,412 |
@hydra.main(config_path="ProAgent/configs", config_name="generate_n8n_query")
def main(cfg: omegaconf.DictConfig):
"""
The main function that runs the ReACTHandler.
Args:
cfg (omegaconf.DictConfig): The configuration object.
Returns:
None
"""
recorder = RunningRecoder()
record_dir = None
record_dir = "./apa_case"
if record_dir != None:
recorder.load_from_disk(record_dir, cfg)
# commercial
|
@hydra.main(config_path="ProAgent/configs", config_name="generate_n8n_query")
def main(cfg: omegaconf.DictConfig):
"""
The main function that runs the ReACTHandler.
Args:
cfg (omegaconf.DictConfig): The configuration object.
Returns:
None
"""
recorder = RunningRecoder()
record_dir = None
record_dir = "./apa_case"
if record_dir != None:
recorder.load_from_disk(record_dir, cfg)
# commercial | query = userQuery( | 4 | 2023-11-03 01:20:14+00:00 | 12k |
LLaVA-VL/LLaVA-Plus-Codebase | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "llava/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "SharedEmbedding",
"path": "llava/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "llava/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "llava/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "llava/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 7,343 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) | self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) | 1 | 2023-11-07 13:06:02+00:00 | 12k |
TheFunny/ArisuAutoSweeper | module/webui/updater.py | [
{
"identifier": "ExecutionError",
"path": "deploy/Windows/config.py",
"snippet": "class ExecutionError(Exception):\n pass"
},
{
"identifier": "GitManager",
"path": "deploy/Windows/git.py",
"snippet": "class GitManager(DeployConfig):\n @staticmethod\n def remove(file):\n try:\n os.remove(file)\n logger.info(f'Removed file: {file}')\n except FileNotFoundError:\n logger.info(f'File not found: {file}')\n\n @cached_property\n def git_config(self):\n conf = GitConfigParser()\n conf.read('./.git/config')\n return conf\n\n def git_repository_init(\n self, repo, source='origin', branch='master',\n proxy='', ssl_verify=True, keep_changes=False\n ):\n logger.hr('Git Init', 1)\n if not self.execute(f'\"{self.git}\" init', allow_failure=True):\n self.remove('./.git/config')\n self.remove('./.git/index')\n self.remove('./.git/HEAD')\n self.remove('./.git/ORIG_HEAD')\n self.execute(f'\"{self.git}\" init')\n Progress.GitInit()\n\n logger.hr('Set Git Proxy', 1)\n if proxy:\n if not self.git_config.check('http', 'proxy', value=proxy):\n self.execute(f'\"{self.git}\" config --local http.proxy {proxy}')\n if not self.git_config.check('https', 'proxy', value=proxy):\n self.execute(f'\"{self.git}\" config --local https.proxy {proxy}')\n else:\n if not self.git_config.check('http', 'proxy', value=None):\n self.execute(f'\"{self.git}\" config --local --unset http.proxy', allow_failure=True)\n if not self.git_config.check('https', 'proxy', value=None):\n self.execute(f'\"{self.git}\" config --local --unset https.proxy', allow_failure=True)\n\n if ssl_verify:\n if not self.git_config.check('http', 'sslVerify', value='true'):\n self.execute(f'\"{self.git}\" config --local http.sslVerify true', allow_failure=True)\n else:\n if not self.git_config.check('http', 'sslVerify', value='false'):\n self.execute(f'\"{self.git}\" config --local http.sslVerify false', allow_failure=True)\n Progress.GitSetConfig()\n\n logger.hr('Set Git Repository', 1)\n if not self.git_config.check(f'remote \"{source}\"', 'url', value=repo):\n if not self.execute(f'\"{self.git}\" remote set-url {source} {repo}', allow_failure=True):\n self.execute(f'\"{self.git}\" remote add {source} {repo}')\n Progress.GitSetRepo()\n\n logger.hr('Fetch Repository Branch', 1)\n self.execute(f'\"{self.git}\" fetch {source} {branch}')\n Progress.GitFetch()\n\n logger.hr('Pull Repository Branch', 1)\n # Remove git lock\n for lock_file in [\n './.git/index.lock',\n './.git/HEAD.lock',\n './.git/refs/heads/master.lock',\n ]:\n if os.path.exists(lock_file):\n logger.info(f'Lock file {lock_file} exists, removing')\n os.remove(lock_file)\n if keep_changes:\n if self.execute(f'\"{self.git}\" stash', allow_failure=True):\n self.execute(f'\"{self.git}\" pull --ff-only {source} {branch}')\n if self.execute(f'\"{self.git}\" stash pop', allow_failure=True):\n pass\n else:\n # No local changes to existing files, untracked files not included\n logger.info('Stash pop failed, there seems to be no local changes, skip instead')\n else:\n logger.info('Stash failed, this may be the first installation, drop changes instead')\n self.execute(f'\"{self.git}\" reset --hard {source}/{branch}')\n self.execute(f'\"{self.git}\" pull --ff-only {source} {branch}')\n else:\n self.execute(f'\"{self.git}\" reset --hard {source}/{branch}')\n Progress.GitReset()\n # Since `git fetch` is already called, checkout is faster\n if not self.execute(f'\"{self.git}\" checkout {branch}', allow_failure=True):\n self.execute(f'\"{self.git}\" pull --ff-only {source} {branch}')\n Progress.GitCheckout()\n\n logger.hr('Show Version', 1)\n self.execute(f'\"{self.git}\" --no-pager log --no-merges -1')\n Progress.GitShowVersion()\n\n def git_install(self):\n logger.hr('Update Alas', 0)\n\n if not self.AutoUpdate:\n logger.info('AutoUpdate is disabled, skip')\n Progress.GitShowVersion()\n return\n\n self.git_repository_init(\n repo=self.Repository,\n source='origin',\n branch=self.Branch,\n proxy=self.GitProxy,\n ssl_verify=self.SSLVerify,\n keep_changes=self.KeepLocalChanges,\n )"
},
{
"identifier": "PipManager",
"path": "deploy/Windows/pip.py",
"snippet": "class PipManager(DeployConfig):\n @cached_property\n def pip(self):\n return f'\"{self.python}\" -m pip'\n\n @cached_property\n def python_site_packages(self):\n return os.path.abspath(os.path.join(self.python, '../Lib/site-packages')) \\\n .replace(r\"\\\\\", \"/\").replace(\"\\\\\", \"/\")\n\n @cached_property\n def set_installed_dependency(self) -> t.Set[DataDependency]:\n data = []\n regex = re.compile(r'(.*)-(.*).dist-info')\n try:\n for name in os.listdir(self.python_site_packages):\n res = regex.search(name)\n if res:\n dep = DataDependency(name=res.group(1), version=res.group(2))\n data.append(dep)\n except FileNotFoundError:\n logger.info(f'Directory not found: {self.python_site_packages}')\n return set(data)\n\n @cached_property\n def set_required_dependency(self) -> t.Set[DataDependency]:\n data = []\n regex = re.compile('(.*)==(.*)[ ]*#')\n file = self.filepath('./requirements.txt')\n try:\n with open(file, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n res = regex.search(line)\n if res:\n dep = DataDependency(name=res.group(1), version=res.group(2))\n data.append(dep)\n except FileNotFoundError:\n logger.info(f'File not found: {file}')\n return set(data)\n\n @cached_property\n def set_dependency_to_install(self) -> t.Set[DataDependency]:\n \"\"\"\n A poor dependency comparison, but much much faster than `pip install` and `pip list`\n \"\"\"\n data = []\n for dep in self.set_required_dependency:\n if dep not in self.set_installed_dependency:\n data.append(dep)\n return set(data)\n\n def pip_install(self):\n logger.hr('Update Dependencies', 0)\n\n if not self.InstallDependencies:\n logger.info('InstallDependencies is disabled, skip')\n Progress.UpdateDependency()\n return\n\n if not len(self.set_dependency_to_install):\n logger.info('All dependencies installed')\n Progress.UpdateDependency()\n return\n else:\n logger.info(f'Dependencies to install: {self.set_dependency_to_install}')\n\n # Install\n logger.hr('Check Python', 1)\n self.execute(f'\"{self.python}\" --version')\n\n arg = []\n if self.PypiMirror:\n mirror = self.PypiMirror\n arg += ['-i', mirror]\n # Trust http mirror or skip ssl verify\n if 'http:' in mirror or not self.SSLVerify:\n arg += ['--trusted-host', urlparse(mirror).hostname]\n elif not self.SSLVerify:\n arg += ['--trusted-host', 'pypi.org']\n arg += ['--trusted-host', 'files.pythonhosted.org']\n\n # Don't update pip, just leave it.\n # logger.hr('Update pip', 1)\n # self.execute(f'\"{self.pip}\" install --upgrade pip{arg}')\n arg += ['--disable-pip-version-check']\n\n logger.hr('Update Dependencies', 1)\n arg = ' ' + ' '.join(arg) if arg else ''\n self.execute(f'{self.pip} install -r {self.requirements_file}{arg}')\n Progress.UpdateDependency()"
},
{
"identifier": "DEPLOY_CONFIG",
"path": "deploy/Windows/utils.py",
"snippet": "DEPLOY_CONFIG = './config/deploy.yaml'"
},
{
"identifier": "retry",
"path": "module/base/retry.py",
"snippet": "def retry(exceptions=Exception, tries=-1, delay=0, max_delay=None, backoff=1, jitter=0, logger=logging_logger):\n \"\"\"Returns a retry decorator.\n\n :param exceptions: an exception or a tuple of exceptions to catch. default: Exception.\n :param tries: the maximum number of attempts. default: -1 (infinite).\n :param delay: initial delay between attempts. default: 0.\n :param max_delay: the maximum value of delay. default: None (no limit).\n :param backoff: multiplier applied to delay between attempts. default: 1 (no backoff).\n :param jitter: extra seconds added to delay between attempts. default: 0.\n fixed if a number, random if a range tuple (min, max)\n :param logger: logger.warning(fmt, error, delay) will be called on failed attempts.\n default: retry.logging_logger. if None, logging is disabled.\n :returns: a retry decorator.\n \"\"\"\n\n @decorator\n def retry_decorator(f, *fargs, **fkwargs):\n args = fargs if fargs else list()\n kwargs = fkwargs if fkwargs else dict()\n return __retry_internal(partial(f, *args, **kwargs), exceptions, tries, delay, max_delay, backoff, jitter,\n logger)\n\n return retry_decorator"
},
{
"identifier": "logger",
"path": "module/logger/logger.py",
"snippet": "def empty_function(*args, **kwargs):\n def __init__(self, *args, func: Callable[[ConsoleRenderable], None] = None, **kwargs):\n def emit(self, record: logging.LogRecord) -> None:\n def handle(self, record: logging.LogRecord) -> bool:\n def options(self) -> ConsoleOptions:\ndef _set_file_logger(name=pyw_name):\ndef set_file_logger(name=pyw_name):\ndef set_func_logger(func):\ndef _get_renderables(\n self: Console, *objects, sep=\" \", end=\"\\n\", justify=None, emoji=None, markup=None, highlight=None,\n) -> List[ConsoleRenderable]:\ndef print(*objects: ConsoleRenderable, **kwargs):\ndef rule(title=\"\", *, characters=\"─\", style=\"rule.line\", end=\"\\n\", align=\"center\"):\ndef hr(title, level=3):\ndef attr(name, text):\ndef attr_align(name, text, front='', align=22):\ndef show():\ndef error_convert(func):\n def error_wrapper(msg, *args, **kwargs):\nclass RichFileHandler(RichHandler):\nclass RichRenderableHandler(RichHandler):\nclass HTMLConsole(Console):\nclass Highlighter(RegexHighlighter):\nWEB_THEME = Theme({\n \"web.brace\": Style(bold=True),\n \"web.bool_true\": Style(color=\"bright_green\", italic=True),\n \"web.bool_false\": Style(color=\"bright_red\", italic=True),\n \"web.none\": Style(color=\"magenta\", italic=True),\n \"web.path\": Style(color=\"magenta\"),\n \"web.filename\": Style(color=\"bright_magenta\"),\n \"web.str\": Style(color=\"green\", italic=False, bold=False),\n \"web.time\": Style(color=\"cyan\"),\n \"rule.text\": Style(bold=True),\n})"
},
{
"identifier": "DeployConfig",
"path": "module/webui/config.py",
"snippet": "class DeployConfig(_DeployConfig):\n def show_config(self):\n pass\n\n def read(self):\n \"\"\"\n Read and update deploy config, copy `self.configs` to properties.\n \"\"\"\n self.config = poor_yaml_read_with_lock(DEPLOY_TEMPLATE)\n self.config.update(poor_yaml_read_with_lock(self.file))\n\n for key, value in self.config.items():\n if hasattr(self, key):\n super().__setattr__(key, value)\n\n def write(self):\n \"\"\"\n Write `self.config` into deploy config.\n \"\"\"\n poor_yaml_write_with_lock(self.config, self.file)\n\n def __setattr__(self, key: str, value):\n \"\"\"\n Catch __setattr__, copy to `self.config`, write deploy config.\n \"\"\"\n super().__setattr__(key, value)\n if key[0].isupper() and key in self.config:\n self.config[key] = value\n self.write()"
},
{
"identifier": "ProcessManager",
"path": "module/webui/process_manager.py",
"snippet": "class ProcessManager:\n _processes: Dict[str, \"ProcessManager\"] = {}\n\n def __init__(self, config_name: str = \"alas\") -> None:\n self.config_name = config_name\n self._renderable_queue: queue.Queue[ConsoleRenderable] = State.manager.Queue()\n self.renderables: List[ConsoleRenderable] = []\n self.renderables_max_length = 400\n self.renderables_reduce_length = 80\n self._process: Process = None\n self.thd_log_queue_handler: threading.Thread = None\n\n def start(self, func, ev: threading.Event = None) -> None:\n if not self.alive:\n if func is None:\n func = get_config_mod(self.config_name)\n self._process = Process(\n target=ProcessManager.run_process,\n args=(\n self.config_name,\n func,\n self._renderable_queue,\n ev,\n ),\n )\n self._process.start()\n self.start_log_queue_handler()\n\n def start_log_queue_handler(self):\n if (\n self.thd_log_queue_handler is not None\n and self.thd_log_queue_handler.is_alive()\n ):\n return\n self.thd_log_queue_handler = threading.Thread(\n target=self._thread_log_queue_handler\n )\n self.thd_log_queue_handler.start()\n\n def stop(self) -> None:\n lock = FileLock(f\"{filepath_config(self.config_name)}.lock\")\n with lock:\n if self.alive:\n self._process.kill()\n self.renderables.append(\n f\"[{self.config_name}] exited. Reason: Manual stop\\n\"\n )\n if self.thd_log_queue_handler is not None:\n self.thd_log_queue_handler.join(timeout=1)\n if self.thd_log_queue_handler.is_alive():\n logger.warning(\n \"Log queue handler thread does not stop within 1 seconds\"\n )\n logger.info(f\"[{self.config_name}] exited\")\n\n def _thread_log_queue_handler(self) -> None:\n while self.alive:\n try:\n log = self._renderable_queue.get(timeout=1)\n except queue.Empty:\n continue\n self.renderables.append(log)\n if len(self.renderables) > self.renderables_max_length:\n self.renderables = self.renderables[self.renderables_reduce_length :]\n logger.info(\"End of log queue handler loop\")\n\n @property\n def alive(self) -> bool:\n if self._process is not None:\n return self._process.is_alive()\n else:\n return False\n\n @property\n def state(self) -> int:\n if self.alive:\n return 1\n elif len(self.renderables) == 0:\n return 2\n else:\n console = Console(no_color=True)\n with console.capture() as capture:\n console.print(self.renderables[-1])\n s = capture.get().strip()\n if s.endswith(\"Reason: Manual stop\"):\n return 2\n elif s.endswith(\"Reason: Finish\"):\n return 2\n elif s.endswith(\"Reason: Update\"):\n return 4\n else:\n return 3\n\n @classmethod\n def get_manager(cls, config_name: str) -> \"ProcessManager\":\n \"\"\"\n Create a new alas if not exists.\n \"\"\"\n if config_name not in cls._processes:\n cls._processes[config_name] = ProcessManager(config_name)\n return cls._processes[config_name]\n\n @staticmethod\n def run_process(\n config_name, func: str, q: queue.Queue, e: threading.Event = None\n ) -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--electron\", action=\"store_true\", help=\"Runs by electron client.\"\n )\n args, _ = parser.parse_known_args()\n State.electron = args.electron\n\n # Setup logger\n set_file_logger(name=config_name)\n if State.electron:\n # https://github.com/LmeSzinc/AzurLaneAutoScript/issues/2051\n logger.info(\"Electron detected, remove log output to stdout\")\n from module.logger.logger import console_hdlr\n logger.removeHandler(console_hdlr)\n set_func_logger(func=q.put)\n\n from module.config.config import AzurLaneConfig\n\n AzurLaneConfig.stop_event = e\n try:\n # Run alas\n if func == \"alas\":\n from module.alas import AzurLaneAutoScript\n from aas import ArisuAutoSweeper\n\n if e is not None:\n AzurLaneAutoScript.stop_event = e\n ArisuAutoSweeper(config_name=config_name).loop()\n else:\n logger.critical(f\"No function matched: {func}\")\n logger.info(f\"[{config_name}] exited. Reason: Finish\\n\")\n except Exception as e:\n logger.exception(e)\n\n @classmethod\n def running_instances(cls) -> List[\"ProcessManager\"]:\n l = []\n for process in cls._processes.values():\n if process.alive:\n l.append(process)\n return l\n\n @staticmethod\n def restart_processes(\n instances: List[Union[\"ProcessManager\", str]] = None, ev: threading.Event = None\n ):\n \"\"\"\n After update and reload, or failed to perform an update,\n restart all alas that running before update\n \"\"\"\n logger.hr(\"Restart alas\")\n\n # Load MOD_CONFIG_DICT\n mod_instance()\n\n if instances is None:\n instances = []\n\n _instances = set()\n\n for instance in instances:\n if isinstance(instance, str):\n _instances.add(ProcessManager.get_manager(instance))\n elif isinstance(instance, ProcessManager):\n _instances.add(instance)\n\n try:\n with open(\"./config/reloadalas\", mode=\"r\") as f:\n for line in f.readlines():\n line = line.strip()\n _instances.add(ProcessManager.get_manager(line))\n except FileNotFoundError:\n pass\n\n for process in _instances:\n logger.info(f\"Starting [{process.config_name}]\")\n process.start(func=get_config_mod(process.config_name), ev=ev)\n\n try:\n os.remove(\"./config/reloadalas\")\n except:\n pass\n logger.info(\"Start alas complete\")"
},
{
"identifier": "State",
"path": "module/webui/setting.py",
"snippet": "class State:\n \"\"\"\n Shared settings\n \"\"\"\n\n _init = False\n _clearup = False\n\n restart_event: threading.Event = None\n manager: SyncManager = None\n electron: bool = False\n theme: str = \"default\"\n\n @classmethod\n def init(cls):\n cls.manager = multiprocessing.Manager()\n cls._init = True\n\n @classmethod\n def clearup(cls):\n cls.manager.shutdown()\n cls._clearup = True\n\n @cached_class_property\n def deploy_config(self) -> \"DeployConfig\":\n \"\"\"\n Returns:\n DeployConfig:\n \"\"\"\n from module.webui.config import DeployConfig\n\n return DeployConfig()\n\n @cached_class_property\n def config_updater(self) -> \"ConfigUpdater\":\n \"\"\"\n Returns:\n ConfigUpdater:\n \"\"\"\n from module.config.config_updater import ConfigUpdater\n\n return ConfigUpdater()"
},
{
"identifier": "TaskHandler",
"path": "module/webui/utils.py",
"snippet": "class TaskHandler:\n def __init__(self) -> None:\n # List of background running task\n self.tasks: List[Task] = []\n # List of task name to be removed\n self.pending_remove_tasks: List[Task] = []\n # Running task\n self._task = None\n # Task running thread\n self._thread: threading.Thread = None\n self._alive = False\n self._lock = threading.Lock()\n\n def add(self, func, delay: float, pending_delete: bool = False) -> None:\n \"\"\"\n Add a task running background.\n Another way of `self.add_task()`.\n func: Callable or Generator\n \"\"\"\n if isinstance(func, Callable):\n g = get_generator(func)\n elif isinstance(func, Generator):\n g = func\n self.add_task(Task(g, delay), pending_delete=pending_delete)\n\n def add_task(self, task: Task, pending_delete: bool = False) -> None:\n \"\"\"\n Add a task running background.\n \"\"\"\n if task in self.tasks:\n logger.warning(f\"Task {task} already in tasks list.\")\n return\n logger.info(f\"Add task {task}\")\n with self._lock:\n self.tasks.append(task)\n if pending_delete:\n self.pending_remove_tasks.append(task)\n\n def _remove_task(self, task: Task) -> None:\n if task in self.tasks:\n self.tasks.remove(task)\n logger.info(f\"Task {task} removed.\")\n else:\n logger.warning(\n f\"Failed to remove task {task}. Current tasks list: {self.tasks}\"\n )\n\n def remove_task(self, task: Task, nowait: bool = False) -> None:\n \"\"\"\n Remove a task in `self.tasks`.\n Args:\n task:\n nowait: if True, remove it right now,\n otherwise remove when call `self.remove_pending_task`\n \"\"\"\n if nowait:\n with self._lock:\n self._remove_task(task)\n else:\n self.pending_remove_tasks.append(task)\n\n def remove_pending_task(self) -> None:\n \"\"\"\n Remove all pending remove tasks.\n \"\"\"\n with self._lock:\n for task in self.pending_remove_tasks:\n self._remove_task(task)\n self.pending_remove_tasks = []\n\n def remove_current_task(self) -> None:\n self.remove_task(self._task, nowait=True)\n\n def get_task(self, name) -> Task:\n with self._lock:\n for task in self.tasks:\n if task.name == name:\n return task\n return None\n\n def loop(self) -> None:\n \"\"\"\n Start task loop.\n You **should** run this function in an individual thread.\n \"\"\"\n self._alive = True\n while self._alive:\n if self.tasks:\n with self._lock:\n self.tasks.sort(key=operator.attrgetter(\"next_run\"))\n task = self.tasks[0]\n if task.next_run < time.time():\n start_time = time.time()\n try:\n self._task = task\n # logger.debug(f'Start task {task.g.__name__}')\n task.send(self)\n # logger.debug(f'End task {task.g.__name__}')\n except Exception as e:\n logger.exception(e)\n self.remove_task(task, nowait=True)\n finally:\n self._task = None\n end_time = time.time()\n task.next_run += task.delay\n with self._lock:\n for task in self.tasks:\n task.next_run += end_time - start_time\n else:\n time.sleep(0.05)\n else:\n time.sleep(0.5)\n logger.info(\"End of task handler loop\")\n\n def _get_thread(self) -> threading.Thread:\n thread = threading.Thread(target=self.loop, daemon=True)\n return thread\n\n def start(self) -> None:\n \"\"\"\n Start task handler.\n \"\"\"\n logger.info(\"Start task handler\")\n if self._thread is not None and self._thread.is_alive():\n logger.warning(\"Task handler already running!\")\n return\n self._thread = self._get_thread()\n self._thread.start()\n\n def stop(self) -> None:\n self.remove_pending_task()\n self._alive = False\n self._thread.join(timeout=2)\n if not self._thread.is_alive():\n logger.info(\"Finish task handler\")\n else:\n logger.warning(\"Task handler does not stop within 2 seconds\")"
},
{
"identifier": "get_next_time",
"path": "module/webui/utils.py",
"snippet": "def get_next_time(t: datetime.time):\n now = datetime.datetime.today().time()\n second = (\n (t.hour - now.hour) * 3600\n + (t.minute - now.minute) * 60\n + (t.second - now.second)\n )\n if second < 0:\n second += 86400\n return second"
}
] | import datetime
import subprocess
import threading
import time
import requests
from typing import Generator, List, Tuple
from deploy.Windows.config import ExecutionError
from deploy.Windows.git import GitManager
from deploy.Windows.pip import PipManager
from deploy.Windows.utils import DEPLOY_CONFIG
from module.base.retry import retry
from module.logger import logger
from module.webui.config import DeployConfig
from module.webui.process_manager import ProcessManager
from module.webui.setting import State
from module.webui.utils import TaskHandler, get_next_time
from module.webui.app import clearup | 7,825 | logger.info("No update")
return 0
try:
get_commit = requests.get(
base + f"{owner}/{repo}/commits/" + local_sha,
headers=headers,
params=para,
)
except Exception as e:
logger.exception(e)
logger.warning("Check update failed")
return 0
if get_commit.status_code != 200:
# for develops
logger.info(
f"Cannot find local commit {local_sha[:8]} in upstream, skip update"
)
return 0
logger.info(f"Update {sha[:8]} available")
return 1
def check_update(self):
if self.state in (0, "failed", "finish"):
self.state = self._check_update()
@retry(ExecutionError, tries=3, delay=5, logger=None)
def git_install(self):
return super().git_install()
@retry(ExecutionError, tries=3, delay=5, logger=None)
def pip_install(self):
return super().pip_install()
def update(self):
logger.hr("Run update")
self.set_repo()
try:
self.git_install()
self.pip_install()
except ExecutionError:
return False
return True
def run_update(self):
if self.state not in ("failed", 0, 1):
return
self._start_update()
def _start_update(self):
self.state = "start"
instances = ProcessManager.running_instances()
names = []
for alas in instances:
names.append(alas.config_name + "\n")
logger.info("Waiting all running alas finish.")
self._wait_update(instances, names)
def _wait_update(self, instances: List[ProcessManager], names):
if self.state == "cancel":
self.state = 1
self.state = "wait"
self.event.set()
_instances = instances.copy()
start_time = time.time()
while _instances:
for alas in _instances:
if not alas.alive:
_instances.remove(alas)
logger.info(f"Alas [{alas.config_name}] stopped")
logger.info(f"Remains: {[alas.config_name for alas in _instances]}")
if self.state == "cancel":
self.state = 1
self.event.clear()
ProcessManager.restart_processes(instances, self.event)
return
time.sleep(0.25)
if time.time() - start_time > 60 * 10:
logger.warning("Waiting alas shutdown timeout, force kill")
for alas in _instances:
alas.stop()
break
self._run_update(instances, names)
def _run_update(self, instances, names):
self.state = "run update"
logger.info("All alas stopped, start updating")
if self.update():
if State.restart_event is not None:
self.state = "reload"
with open("./config/reloadalas", mode="w") as f:
f.writelines(names)
self._trigger_reload(2)
clearup()
else:
self.state = "finish"
else:
self.state = "failed"
logger.warning("Update failed")
self.event.clear()
ProcessManager.restart_processes(instances, self.event)
return False
@staticmethod
def _trigger_reload(delay=2):
def trigger():
# with open("./config/reloadflag", mode="w"):
# # app ended here and uvicorn will restart whole app
# pass
State.restart_event.set()
timer = threading.Timer(delay, trigger)
timer.start()
def schedule_update(self) -> Generator:
|
class Updater(DeployConfig, GitManager, PipManager):
def __init__(self, file=DEPLOY_CONFIG):
super().__init__(file=file)
self.set_repo()
self.state = 0
self.event: threading.Event = None
@property
def delay(self):
self.read()
return int(self.CheckUpdateInterval) * 60
@property
def schedule_time(self):
self.read()
t = self.AutoRestartTime
if t is not None:
return datetime.time.fromisoformat(t)
else:
return None
def execute_output(self, command) -> str:
command = command.replace(r"\\", "/").replace("\\", "/").replace('"', '"')
log = subprocess.run(
command, capture_output=True, text=True, encoding="utf8", shell=True
).stdout
return log
def get_commit(self, revision="", n=1, short_sha1=False) -> Tuple:
"""
Return:
(sha1, author, isotime, message,)
"""
ph = "h" if short_sha1 else "H"
log = self.execute_output(
f'"{self.git}" log {revision} --pretty=format:"%{ph}---%an---%ad---%s" --date=iso -{n}'
)
if not log:
return None, None, None, None
logs = log.split("\n")
logs = list(map(lambda log: tuple(log.split("---")), logs))
if n == 1:
return logs[0]
else:
return logs
def _check_update(self) -> bool:
self.state = "checking"
# if State.deploy_config.GitOverCdn:
# status = self.goc_client.get_status()
# if status == "uptodate":
# logger.info(f"No update")
# return False
# elif status == "behind":
# logger.info(f"New update available")
# return True
# else:
# # failed, should fallback to `git pull`
# pass
source = "origin"
for _ in range(3):
if self.execute(
f'"{self.git}" fetch {source} {self.Branch}', allow_failure=True
):
break
else:
logger.warning("Git fetch failed")
return False
log = self.execute_output(
f'"{self.git}" log --not --remotes={source}/* -1 --oneline'
)
if log:
logger.info(
f"Cannot find local commit {log.split()[0]} in upstream, skip update"
)
return False
sha1, _, _, message = self.get_commit(f"..{source}/{self.Branch}")
if sha1:
logger.info(f"New update available")
logger.info(f"{sha1[:8]} - {message}")
return True
else:
logger.info(f"No update")
return False
def _check_update_(self) -> bool:
"""
Deprecated
"""
self.state = "checking"
r = self.Repository.split("/")
owner = r[3]
repo = r[4]
if "gitee" in r[2]:
base = "https://gitee.com/api/v5/repos/"
headers = {}
token = self.config["ApiToken"]
if token:
para = {"access_token": token}
else:
base = "https://api.github.com/repos/"
headers = {"Accept": "application/vnd.github.v3.sha"}
para = {}
token = self.config["ApiToken"]
if token:
headers["Authorization"] = "token " + token
try:
list_commit = requests.get(
base + f"{owner}/{repo}/branches/{self.Branch}",
headers=headers,
params=para,
)
except Exception as e:
logger.exception(e)
logger.warning("Check update failed")
return 0
if list_commit.status_code != 200:
logger.warning(f"Check update failed, code {list_commit.status_code}")
return 0
try:
sha = list_commit.json()["commit"]["sha"]
except Exception as e:
logger.exception(e)
logger.warning("Check update failed when parsing return json")
return 0
local_sha, _, _, _ = self._get_local_commit()
if sha == local_sha:
logger.info("No update")
return 0
try:
get_commit = requests.get(
base + f"{owner}/{repo}/commits/" + local_sha,
headers=headers,
params=para,
)
except Exception as e:
logger.exception(e)
logger.warning("Check update failed")
return 0
if get_commit.status_code != 200:
# for develops
logger.info(
f"Cannot find local commit {local_sha[:8]} in upstream, skip update"
)
return 0
logger.info(f"Update {sha[:8]} available")
return 1
def check_update(self):
if self.state in (0, "failed", "finish"):
self.state = self._check_update()
@retry(ExecutionError, tries=3, delay=5, logger=None)
def git_install(self):
return super().git_install()
@retry(ExecutionError, tries=3, delay=5, logger=None)
def pip_install(self):
return super().pip_install()
def update(self):
logger.hr("Run update")
self.set_repo()
try:
self.git_install()
self.pip_install()
except ExecutionError:
return False
return True
def run_update(self):
if self.state not in ("failed", 0, 1):
return
self._start_update()
def _start_update(self):
self.state = "start"
instances = ProcessManager.running_instances()
names = []
for alas in instances:
names.append(alas.config_name + "\n")
logger.info("Waiting all running alas finish.")
self._wait_update(instances, names)
def _wait_update(self, instances: List[ProcessManager], names):
if self.state == "cancel":
self.state = 1
self.state = "wait"
self.event.set()
_instances = instances.copy()
start_time = time.time()
while _instances:
for alas in _instances:
if not alas.alive:
_instances.remove(alas)
logger.info(f"Alas [{alas.config_name}] stopped")
logger.info(f"Remains: {[alas.config_name for alas in _instances]}")
if self.state == "cancel":
self.state = 1
self.event.clear()
ProcessManager.restart_processes(instances, self.event)
return
time.sleep(0.25)
if time.time() - start_time > 60 * 10:
logger.warning("Waiting alas shutdown timeout, force kill")
for alas in _instances:
alas.stop()
break
self._run_update(instances, names)
def _run_update(self, instances, names):
self.state = "run update"
logger.info("All alas stopped, start updating")
if self.update():
if State.restart_event is not None:
self.state = "reload"
with open("./config/reloadalas", mode="w") as f:
f.writelines(names)
self._trigger_reload(2)
clearup()
else:
self.state = "finish"
else:
self.state = "failed"
logger.warning("Update failed")
self.event.clear()
ProcessManager.restart_processes(instances, self.event)
return False
@staticmethod
def _trigger_reload(delay=2):
def trigger():
# with open("./config/reloadflag", mode="w"):
# # app ended here and uvicorn will restart whole app
# pass
State.restart_event.set()
timer = threading.Timer(delay, trigger)
timer.start()
def schedule_update(self) -> Generator: | th: TaskHandler | 9 | 2023-11-01 07:09:45+00:00 | 12k |
liuzhao1225/YouDub | main.py | [
{
"identifier": "TTS_Clone",
"path": "youdub/tts_xttsv2.py",
"snippet": "class TTS_Clone:\n def __init__(self, model_path=\"tts_models/multilingual/multi-dataset/xtts_v2\", device='cuda', language='zh-cn'):\n logging.info(f'Loading TTS model {model_path}...')\n self.tts = TTS(model_path).to(device)\n self.language = language\n logging.info('Model TTS loaded.')\n \n def inference(self, text, output_path, speaker_wav) -> np.ndarray:\n wav = self.tts.tts(\n text=text, speaker_wav=speaker_wav, language=self.language)\n wav = np.array(wav)\n save_wav(wav, output_path)\n # wav /= np.max(np.abs(wav))\n return wav"
},
{
"identifier": "audio_process_folder",
"path": "youdub/tts_xttsv2.py",
"snippet": "def audio_process_folder(folder, tts: TTS_Clone, speaker_to_voice_type=None, vocal_only=False):\n logging.info(f'TTS processing folder {folder}...')\n logging.info(f'speaker_to_voice_type: {speaker_to_voice_type}')\n with open(os.path.join(folder, 'zh.json'), 'r', encoding='utf-8') as f:\n transcript = json.load(f)\n full_wav = np.zeros((0,))\n if not os.path.exists(os.path.join(folder, 'temp')):\n os.makedirs(os.path.join(folder, 'temp'))\n\n for i, line in enumerate(transcript):\n text = line['text']\n # start = line['start']\n start = line['start']\n last_end = len(full_wav)/24000\n if start > last_end:\n full_wav = np.concatenate(\n (full_wav, np.zeros((int(24000 * (start - last_end)),))))\n start = len(full_wav)/24000\n line['start'] = start\n end = line['end']\n if os.path.exists(os.path.join(folder, 'temp', f'zh_{str(i).zfill(3)}.wav')):\n wav = librosa.load(os.path.join(\n folder, 'temp', f'zh_{str(i).zfill(3)}.wav'), sr=24000)[0]\n else:\n speaker = line.get('speaker', 'SPEAKER_00')\n speaker_wav = os.path.join(folder, 'SPEAKER', f'{speaker}.wav')\n wav = tts.inference(tts_preprocess_text(text), os.path.join(\n folder, 'temp', f'zh_{str(i).zfill(3)}.wav'), speaker_wav)\n time.sleep(0.1)\n # save_wav(wav, )\n wav_adjusted, adjusted_length = adjust_audio_length(wav, os.path.join(folder, 'temp', f'zh_{str(i).zfill(3)}.wav'), os.path.join(\n folder, 'temp', f'zh_{str(i).zfill(3)}_adjusted.wav'), end - start)\n\n wav_adjusted /= wav_adjusted.max()\n line['end'] = line['start'] + adjusted_length\n full_wav = np.concatenate(\n (full_wav, wav_adjusted))\n # load os.path.join(folder, 'en_Instruments.wav')\n # combine with full_wav (the length of the two audio might not be equal)\n transcript = split_text(transcript, punctuations=[\n ',', ';', ':', '。', '?', '!', '\\n','”'])\n with open(os.path.join(folder, 'transcript.json'), 'w', encoding='utf-8') as f:\n json.dump(transcript, f, ensure_ascii=False, indent=4)\n instruments_wav, sr = librosa.load(\n os.path.join(folder, 'en_Instruments.wav'), sr=24000)\n\n len_full_wav = len(full_wav)\n len_instruments_wav = len(instruments_wav)\n\n if len_full_wav > len_instruments_wav:\n # 如果 full_wav 更长,将 instruments_wav 延伸到相同长度\n instruments_wav = np.pad(\n instruments_wav, (0, len_full_wav - len_instruments_wav), mode='constant')\n elif len_instruments_wav > len_full_wav:\n # 如果 instruments_wav 更长,将 full_wav 延伸到相同长度\n full_wav = np.pad(\n full_wav, (0, len_instruments_wav - len_full_wav), mode='constant')\n # 合并两个音频\n full_wav /= np.max(np.abs(full_wav))\n save_wav(full_wav, os.path.join(folder, f'zh_Vocals.wav'))\n # instruments_wav /= np.max(np.abs(instruments_wav))\n instrument_coefficient = 1\n if vocal_only:\n instrument_coefficient = 0\n combined_wav = full_wav + instruments_wav*instrument_coefficient\n combined_wav /= np.max(np.abs(combined_wav))\n save_wav(combined_wav, os.path.join(folder, f'zh.wav'))"
},
{
"identifier": "TTS_Clone",
"path": "youdub/tts_bytedance.py",
"snippet": "class TTS_Clone:\n def __init__(self):\n self.appid = os.getenv('APPID')\n self.access_token = os.getenv('ACCESS_TOKEN')\n self.cluster = \"volcano_tts\"\n self.host = \"openspeech.bytedance.com\"\n self.api_url = f\"https://{self.host}/api/v1/tts\"\n self.header = {\"Authorization\": f\"Bearer;{self.access_token}\"}\n self.request_json = {\n \"app\": {\n \"appid\": self.appid,\n \"token\": \"access_token\",\n \"cluster\": self.cluster\n },\n \"user\": {\n \"uid\": \"388808087185088\"\n },\n \"audio\": {\n \"voice_type\": '',\n \"encoding\": \"wav\",\n \"speed_ratio\": 1,\n \"volume_ratio\": 1.0,\n \"pitch_ratio\": 1.0,\n },\n \"request\": {\n \"reqid\": str(uuid.uuid4()),\n \"text\": \"字节跳动语音合成\",\n \"text_type\": \"plain\",\n \"operation\": \"query\",\n \"with_frontend\": 1,\n \"frontend_type\": \"unitTson\"\n\n }\n }\n self.output_path = r'.'\n if not os.path.exists(self.output_path):\n os.mkdir(self.output_path)\n\n def inference(self, text, output_wav_path, speaker='SPEAKER_00', speaker_to_voice_type={'SPEAKER_00': 'BV701_streaming'}):\n self.request_json['request']['text'] = text\n self.request_json['request']['reqid'] = str(uuid.uuid4())\n self.request_json['audio']['voice_type'] = speaker_to_voice_type.get(\n speaker, 'BV701_streaming')\n max_retries = 5\n timeout_seconds = 10 # Set your desired timeout in seconds\n\n for attempt in range(max_retries):\n try:\n resp = requests.post(self.api_url, json.dumps(\n self.request_json), headers=self.header, timeout=timeout_seconds)\n if resp.status_code == 200:\n data = resp.json()[\"data\"]\n data = base64.b64decode(data)\n with open(output_wav_path, \"wb\") as f:\n f.write(data)\n print(f'{output_wav_path}: {text}')\n return np.frombuffer(data, dtype=np.int16)\n else:\n print(f\"Request failed with status code: {resp.status_code}\")\n if resp.status_code == 500:\n return None\n raise Exception(f\"Request failed with status code: {resp.status_code}\")\n except Exception as e:\n print(f\"Request failed: {e}, retrying ({attempt+1}/{max_retries})\")\n time.sleep(2) # Wait 2 seconds before retrying\n\n print(\"Max retries reached, request failed\")\n return None"
},
{
"identifier": "audio_process_folder",
"path": "youdub/tts_bytedance.py",
"snippet": "def audio_process_folder(folder, tts: TTS_Clone, speaker_to_voice_type, vocal_only=False):\n logging.info(f'TTS processing folder {folder}...')\n logging.info(f'speaker_to_voice_type: {speaker_to_voice_type}')\n with open(os.path.join(folder, 'zh.json'), 'r', encoding='utf-8') as f:\n transcript = json.load(f)\n full_wav = np.zeros((0,))\n if not os.path.exists(os.path.join(folder, 'temp')):\n os.makedirs(os.path.join(folder, 'temp'))\n\n for i, line in enumerate(transcript):\n text = line['text']\n # start = line['start']\n start = line['start']\n last_end = len(full_wav)/24000\n if start > last_end:\n full_wav = np.concatenate(\n (full_wav, np.zeros((int(24000 * (start - last_end)),))))\n start = len(full_wav)/24000\n line['start'] = start\n end = line['end']\n if os.path.exists(os.path.join(folder, 'temp', f'zh_{str(i).zfill(3)}.wav')):\n wav = librosa.load(os.path.join(\n folder, 'temp', f'zh_{str(i).zfill(3)}.wav'), sr=24000)[0]\n else:\n wav = tts.inference(tts_preprocess_text(text), os.path.join(\n folder, 'temp', f'zh_{str(i).zfill(3)}.wav'), speaker=line.get('speaker', 'SPEAKER_00'), speaker_to_voice_type=speaker_to_voice_type)\n time.sleep(0.1)\n # save_wav(wav, )\n wav_adjusted, adjusted_length = adjust_audio_length(wav, os.path.join(folder, 'temp', f'zh_{str(i).zfill(3)}.wav'), os.path.join(\n folder, 'temp', f'zh_{str(i).zfill(3)}_adjusted.wav'), end - start)\n\n wav_adjusted /= wav_adjusted.max()\n line['end'] = line['start'] + adjusted_length\n full_wav = np.concatenate(\n (full_wav, wav_adjusted))\n # load os.path.join(folder, 'en_Instruments.wav')\n # combine with full_wav (the length of the two audio might not be equal)\n transcript = split_text(transcript, punctuations=[\n ',', ';', ':', '。', '?', '!', '\\n', '”'])\n with open(os.path.join(folder, 'transcript.json'), 'w', encoding='utf-8') as f:\n json.dump(transcript, f, ensure_ascii=False, indent=4)\n instruments_wav, sr = librosa.load(\n os.path.join(folder, 'en_Instruments.wav'), sr=24000)\n\n len_full_wav = len(full_wav)\n len_instruments_wav = len(instruments_wav)\n\n if len_full_wav > len_instruments_wav:\n # 如果 full_wav 更长,将 instruments_wav 延伸到相同长度\n instruments_wav = np.pad(\n instruments_wav, (0, len_full_wav - len_instruments_wav), mode='constant')\n elif len_instruments_wav > len_full_wav:\n # 如果 instruments_wav 更长,将 full_wav 延伸到相同长度\n full_wav = np.pad(\n full_wav, (0, len_instruments_wav - len_full_wav), mode='constant')\n # 合并两个音频\n full_wav /= np.max(np.abs(full_wav))\n save_wav(full_wav, os.path.join(folder, f'zh_Vocals.wav'))\n # instruments_wav /= np.max(np.abs(instruments_wav))\n instrument_coefficient = 1\n if vocal_only:\n instrument_coefficient = 0\n combined_wav = full_wav + instruments_wav*instrument_coefficient\n combined_wav /= np.max(np.abs(combined_wav))\n save_wav(combined_wav, os.path.join(folder, f'zh.wav'))"
},
{
"identifier": "VideoProcessor",
"path": "youdub/asr_whisperX.py",
"snippet": "class VideoProcessor:\n def __init__(self, model='large', download_root='models/ASR/whisper', device='cuda', batch_size=32, diarize=False):\n logging.info(f'Loading model {model} from {download_root}...')\n self.device = device\n self.batch_size = batch_size\n self.model = model\n # self.model = whisperx.load_model(model, download_root=download_root, device=device)\n if model == 'large-v3':\n self.whisper_model = whisper.load_model(model, download_root=download_root, device=device) # whisperx doesn't support large-v3 yet, so use whisper instead\n else:\n self.whisper_model = whisperx.load_model(model, download_root=download_root, device=device)\n self.diarize = diarize\n if self.diarize:\n self.diarize_model = whisperx.DiarizationPipeline(use_auth_token=os.getenv('HF_TOKEN'), device=device)\n self.embedding_model = Model.from_pretrained(\"pyannote/embedding\", use_auth_token=os.getenv('HF_TOKEN'))\n self.embedding_inference = Inference(\n self.embedding_model, window=\"whole\")\n self.voice_type_embedding = dict()\n voice_type_folder = r'voice_type'\n for file in os.listdir(voice_type_folder):\n if file.endswith('.npy'):\n voice_type = file.replace('.npy', '')\n embedding = np.load(os.path.join(voice_type_folder, file))\n self.voice_type_embedding[voice_type] = embedding\n logging.info(f'Loaded {len(self.voice_type_embedding)} voice types.')\n\n self.language_code = 'en'\n self.align_model, self.meta_data = whisperx.load_align_model(language_code=self.language_code, device=device)\n self.vocal_remover = Demucs(model='htdemucs_ft')\n logging.info('Model loaded.')\n\n def transcribe_audio(self, wav_path):\n logging.debug(f'Transcribing audio {wav_path}...')\n if self.model == 'large-v3':\n rec_result = self.whisper_model.transcribe(\n wav_path, verbose=True, condition_on_previous_text=True, max_initial_timestamp=None)\n else:\n rec_result = self.whisper_model.transcribe(\n wav_path, batch_size=self.batch_size, print_progress=True, combined_progress=True)\n \n if rec_result['language'] == 'nn':\n return None\n if rec_result['language'] != self.language_code:\n self.language_code = rec_result['language']\n print(self.language_code)\n self.align_model, self.meta_data = whisperx.load_align_model(language_code=self.language_code, device=self.device)\n \n rec_result = whisperx.align(rec_result['segments'], self.align_model, self.meta_data, wav_path, self.device, return_char_alignments=False, print_progress=True)\n return rec_result\n \n def diarize_transcribed_audio(self, wav_path, transcribe_result):\n logging.info(f'Diarizing audio {wav_path}...')\n diarize_segments = self.diarize_model(wav_path)\n result = whisperx.assign_word_speakers(\n diarize_segments, transcribe_result)\n return result\n \n def get_speaker_embedding(self, json_path):\n with open(json_path, 'r', encoding='utf-8') as f:\n result = json.load(f)\n wav_folder = os.path.dirname(json_path)\n wav_path = os.path.join(wav_folder, 'en_Vocals.wav')\n audio_data, samplerate = sf.read(wav_path)\n speaker_dict = dict()\n length = len(audio_data)\n delay = 0.1\n for segment in result:\n start = max(0, int((segment['start'] - delay) * samplerate))\n end = min(int((segment['end']+delay) * samplerate), length)\n speaker_segment_audio = audio_data[start:end]\n speaker_dict[segment['speaker']] = np.concatenate((speaker_dict.get(\n segment['speaker'], np.zeros((0,2))),speaker_segment_audio))\n speaker_folder = os.path.join(wav_folder, 'SPEAKER')\n if not os.path.exists(speaker_folder):\n os.makedirs(speaker_folder)\n for speaker, audio in speaker_dict.items():\n speaker_file_path = os.path.join(\n speaker_folder, f\"{speaker}.wav\")\n sf.write(speaker_file_path, audio, samplerate)\n \n for file in os.listdir(speaker_folder):\n if file.startswith('SPEAKER') and file.endswith('.wav'):\n wav_path = os.path.join(speaker_folder, file)\n embedding = self.embedding_inference(wav_path)\n np.save(wav_path.replace('.wav', '.npy'), embedding)\n \n def find_closest_unique_voice_type(self, speaker_embedding):\n speaker_to_voice_type = {}\n available_speakers = set(speaker_embedding.keys())\n available_voice_types = set(self.voice_type_embedding.keys())\n\n while available_speakers and available_voice_types:\n min_distance = float('inf')\n closest_speaker = None\n closest_voice_type = None\n\n for speaker in available_speakers:\n sp_embedding = speaker_embedding[speaker]\n for voice_type in available_voice_types:\n vt_embedding = self.voice_type_embedding[voice_type]\n distance = cosine(sp_embedding, vt_embedding)\n\n if distance < min_distance:\n min_distance = distance\n closest_speaker = speaker\n closest_voice_type = voice_type\n\n if closest_speaker and closest_voice_type:\n speaker_to_voice_type[closest_speaker] = closest_voice_type\n available_speakers.remove(closest_speaker)\n available_voice_types.remove(closest_voice_type)\n\n return speaker_to_voice_type\n\n def get_speaker_to_voice_type_dict(self, json_path):\n self.get_speaker_embedding(json_path)\n wav_folder = os.path.dirname(json_path)\n speaker_folder = os.path.join(wav_folder, 'SPEAKER')\n speaker_embedding = dict()\n for file in os.listdir(speaker_folder):\n if file.startswith('SPEAKER') and file.endswith('.npy'):\n speaker_name = file.replace('.npy', '')\n embedding = np.load(os.path.join(speaker_folder, file))\n speaker_embedding[speaker_name] = embedding\n\n return self.find_closest_unique_voice_type(speaker_embedding)\n \n def extract_audio_from_video(self, video_path, audio_path):\n logging.info(f'Extracting audio from video {video_path}...')\n video = VideoFileClip(video_path)\n video.audio.write_audiofile(audio_path)\n output_dir = os.path.dirname(audio_path)\n if not os.path.exists(os.path.join(output_dir, 'en_Vocals.wav')) or not os.path.exists(os.path.join(output_dir, 'en_Instruments.wav')):\n self.vocal_remover.inference(\n audio_path, os.path.dirname(audio_path))\n logging.info(f'Audio extracted and saved to {audio_path}.')\n\n def save_transcription_to_json(self, transcription, json_path):\n logging.debug(f'Saving transcription to {json_path}...')\n if transcription is None:\n transcription_with_timestemp = []\n else:\n transcription_with_timestemp = [{'start': round(segment['start'], 3), 'end': round(\n segment['end'], 3), 'text': segment['text'].strip(), 'speaker': segment.get('speaker', 'SPEAKER_00')} for segment in transcription['segments'] if segment['text'] != '']\n\n transcription_with_timestemp = merge_segments(\n transcription_with_timestemp)\n with open(json_path.replace('en.json', 'subtitle.json'), 'w', encoding='utf-8') as f:\n # f.write(transcription_with_timestemp)\n json.dump(\n transcription_with_timestemp, f, ensure_ascii=False, indent=4)\n\n transcription_with_timestemp = merge_segments(\n transcription_with_timestemp, ending='.?!。?!')\n with open(json_path, 'w', encoding='utf-8') as f:\n # f.write(transcription_with_timestemp)\n json.dump(\n transcription_with_timestemp, f, ensure_ascii=False, indent=8)\n\n logging.debug('Transcription saved.')\n\n def process_video(self, video_path, output_folder):\n logging.debug('Processing video...')\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n if not os.path.exists(os.path.join(output_folder, 'en_Vocals.wav')):\n self.extract_audio_from_video(video_path, os.path.join(output_folder, 'en.wav'))\n if not os.path.exists(os.path.join(output_folder, 'en.json')):\n transcription = self.transcribe_audio(\n os.path.join(output_folder, 'en_Vocals.wav'))\n if self.diarize:\n transcription = self.diarize_transcribed_audio(\n os.path.join(output_folder, 'en.wav'), transcription)\n self.save_transcription_to_json(\n transcription, os.path.join(output_folder, 'en.json'))\n if not os.path.exists(os.path.join(output_folder, 'speaker_to_voice_type.json')):\n if self.diarize:\n speaker_to_voice_type = self.get_speaker_to_voice_type_dict(\n os.path.join(output_folder, 'en.json'))\n with open(os.path.join(output_folder, 'speaker_to_voice_type.json'), 'w', encoding='utf-8') as f:\n json.dump(speaker_to_voice_type, f, ensure_ascii=False, indent=4)\n else:\n speaker_to_voice_type = {'SPEAKER_00': 'BV701_streaming'}\n else:\n with open(os.path.join(output_folder, 'speaker_to_voice_type.json'), 'r', encoding='utf-8') as f:\n speaker_to_voice_type = json.load(f)\n logging.debug('Video processing completed.')\n return speaker_to_voice_type"
},
{
"identifier": "replace_audio_ffmpeg",
"path": "youdub/video_postprocess.py",
"snippet": "def replace_audio_ffmpeg(input_video: str, input_audio: str, input_subtitles: str, output_path: str, fps=30) -> None:\n input_folder = os.path.dirname(input_video)\n dst_folder = os.path.join(input_folder, '0_finished')\n if not os.path.exists(dst_folder):\n os.mkdir(dst_folder)\n \n if os.path.exists(output_path):\n command = f'move \"{input_video}\" \"{dst_folder}\"'\n subprocess.Popen(command, shell=True)\n return\n\n # Extract the video name from the input video path\n video_name = os.path.basename(input_video)\n\n # Replace video file extension with '.srt' for subtitles\n srt_name = video_name.replace('.mp4', '.srt').replace(\n '.mkv', '.srt').replace('.avi', '.srt').replace('.flv', '.srt')\n\n # Construct the path for the subtitles file\n srt_path = os.path.join(os.path.dirname(input_audio), srt_name)\n\n # Convert subtitles from JSON to SRT format\n convert_json_to_srt(input_subtitles, srt_path)\n\n # Determine the output folder and define a temporary file path\n output_folder = os.path.dirname(output_path)\n tmp = os.path.join(output_folder, 'tmp.mp4')\n\n # Prepare a list to hold FFmpeg commands\n commands = []\n\n # FFmpeg command to speed up the video by 1.05 times\n speed_up = 1.05\n \n if speed_up == 1:\n tmp = output_path\n commands.append(f'ffmpeg -i \"{input_video}\" -i \"{input_audio}\" -vf \"subtitles={srt_path}:force_style=\\'FontName=Arial,FontSize=20,PrimaryColour=&HFFFFFF,OutlineColour=&H000000,Outline=2,WrapStyle=2\\'\" -c:v libx264 -r {fps} -c:a aac -map 0:v:0 -map 1:a:0 \"{tmp}\" -y'.replace('\\\\', '/'))\n \n # commands.append(f'ffmpeg -i \"{input_video}\" -i \"{input_audio}\" -c:v libx264 -r {fps} -c:a aac -map 0:v:0 -map 1:a:0 \"{tmp}\" -y'.replace('\\\\', '/'))\n \n if speed_up != 1:\n commands.append(\n f'ffmpeg -i \"{tmp}\" -vf \"setpts={1/speed_up}*PTS\" -af \"atempo={speed_up}\" -c:v libx264 -c:a aac \"{output_path}\" -y'.replace('\\\\', '/'))\n\n # Command to delete the temporary file\n commands.append(f'del \"{tmp}\"')\n \n # move input video to dst folder\n commands.append(f'move \"{input_video}\" \"{dst_folder}\"')\n\n # Add an 'exit' command to close the command prompt window after execution\n commands.append('exit')\n\n # Join the commands with '&&' to ensure sequential execution\n command = ' && '.join(commands)\n\n # Execute the combined FFmpeg command\n print(command)\n subprocess.Popen(command, shell=True)"
},
{
"identifier": "Translator",
"path": "youdub/translation_unsafe.py",
"snippet": "class Translator:\n def __init__(self):\n self.system_message = system_message\n self.messages = []\n\n def translate(self, transcript, original_fname):\n print('总结中...')\n retry = 1\n summary = ''\n while retry >= 0:\n try:\n response = openai.ChatCompletion.create(\n model=model_name,\n messages=[{\"role\": \"system\", \"content\": f'你是一个科普专家。你的目的是总结文本中的主要科学知识。{magic}!'}] + [{\"role\": \"user\", \"content\": f\"。简要概括这个视频的主要内容。\\n标题:{original_fname}\\n内容:{''.join(transcript)}\\n标题:{original_fname}\\n请你用中文给视频写一个“标题”、“主要内容”和“专业名词”,谢谢。\"},], timeout=240)\n summary = response.choices[0].message.content\n print(summary)\n retry = -1\n except Exception as e:\n retry -= 1\n print('总结失败')\n print(e)\n print('重新总结')\n time.sleep(1)\n if retry == 0:\n print('总结失败')\n \n self.fixed_messages = [{'role': 'user', 'content': '请翻译:Hello!'}, {\n 'role': 'assistant', 'content': f'“你好!”'}, {'role': 'user', 'content': '请翻译:Animation videos explaining things with optimistic nihilism since 2,013.'}, {\n 'role': 'assistant', 'content': f'“从2013年开始,我们以乐观的虚无主义制作动画,进行科普。”'}]\n # self.fixed_messages = []\n self.messages = []\n final_result = []\n print('\\n翻译中...')\n for sentence in transcript:\n if not sentence:\n continue\n retry = 20\n retry_message = ''\n\n # print(messages)\n # [{\"role\": \"system\", \"content\": summary + '\\n' + self.system_message}] + self.fixed_messages + \\\n history = \" \".join(final_result[-30:])\n while retry > 0:\n retry -= 1\n messages = [\n {\"role\": \"system\", \"content\": f'请你扮演科普专家的角色。这是一个为视频配音设计的翻译任务,将各种语言精准而优雅地转化为尽量简短的中文。请在翻译时避免生硬的直译,而是追求自然流畅、贴近原文而又不失文学韵味的表达。在这个过程中,请特别注意维护中文特有的语序和句式结构,使翻译文本既忠于原意又符合中文的表达习惯。{magic}'}] + self.fixed_messages + [{\"role\": \"user\", \"content\": f'{summary}\\n{self.system_message}\\n请将Transformer, token等人工智能相关的专业名词保留原文。长句分成几个短句。\\n历史内容:\\n{history}\\n以上为参考的历史内容。\\n{retry_message}\\n深呼吸,请正确翻译这句英文:“{sentence}”翻译成简洁中文。'},]\n try:\n response = openai.ChatCompletion.create(\n model=model_name,\n messages=messages,\n temperature=0.3,\n timeout=60,\n )\n response = response.choices[0].message.content\n result = response.strip()\n if retry != 0:\n if '\\n' in result:\n retry_message += '无视前面的内容,仅仅只翻译下面的英文,请简短翻译,只输出翻译结果。'\n raise Exception('存在换行')\n if '翻译' in result:\n retry_message += '无视前面的内容,请不要出现“翻译”字样,仅仅只翻译下面的英文,请简短翻译,只输出翻译结果。'\n raise Exception('存在\"翻译\"字样')\n if '这句话的意思是' in result:\n retry_message += '无视前面的内容,请不要出现“这句话的意思是”字样,仅仅只翻译下面的英文,请简短翻译,只输出翻译结果。'\n raise Exception('存在\"这句话的意思是\"字样')\n if '这句话的意译是' in result:\n retry_message += '无视前面的内容,请不要出现“这句话的意译是”字样,仅仅只翻译下面的英文,请简短翻译,只输出翻译结果。'\n raise Exception('存在\"这句话的意译是\"字样')\n if '这句' in result:\n retry_message += '无视前面的内容,请不要出现“这句话”字样,仅仅只翻译下面的英文,请简短翻译,只输出翻译结果。'\n raise Exception('存在\"这句\"字样')\n if '深呼吸' in result:\n retry_message += '无视前面的内容,请不要出现“深呼吸”字样,仅仅只翻译下面的英文,请简短翻译,只输出翻译结果。'\n raise Exception('存在\"深呼吸\"字样')\n if (result.startswith('“') and result.endswith('”')) or (result.startswith('\"') and result.endswith('\"')):\n result = result[1:-1]\n if len(sentence) <= 10:\n if len(result) > 20:\n retry_message += '注意:仅仅只翻译下面的内容,请简短翻译,只输出翻译结果。'\n raise Exception('翻译过长')\n elif len(result) > len(sentence)*0.75:\n retry_message += '注意:仅仅只翻译下面的内容,请简短翻译,只输出翻译结果。'\n raise Exception('翻译过长')\n result = translation_postprocess(result)\n \n if result:\n self.messages.append(\n {'role': 'user', 'content': f\"{sentence}\"})\n self.messages.append(\n {'role': 'assistant', 'content': f'{result}'})\n print(sentence)\n print(response)\n print(f'最终结果:{result}')\n print('='*50)\n final_result.append(result)\n retry = 0\n except Exception as e:\n print(sentence)\n print(response)\n print(e)\n print('翻译失败')\n retry_message += f''\n time.sleep(0.5)\n return final_result, summary"
},
{
"identifier": "split_text",
"path": "youdub/utils.py",
"snippet": "def split_text(input_data,\n punctuations=['。', '?', '!', '\\n', \"”\"]):\n # Chinese punctuation marks for sentence ending\n\n # Function to check if a character is a Chinese ending punctuation\n def is_punctuation(char):\n return char in punctuations\n\n # Process each item in the input data\n output_data = []\n for item in input_data:\n start = item[\"start\"]\n text = item[\"text\"]\n speaker = item.get(\"speaker\", \"SPEAKER_00\")\n sentence_start = 0\n\n # Calculate the duration for each character\n duration_per_char = (item[\"end\"] - item[\"start\"]) / len(text)\n for i, char in enumerate(text):\n # If the character is a punctuation, split the sentence\n if not is_punctuation(char) and i != len(text) - 1:\n continue\n if i - sentence_start < 5 and i != len(text) - 1:\n continue\n if i < len(text) - 1 and is_punctuation(text[i+1]):\n continue\n sentence = text[sentence_start:i+1]\n sentence_end = start + duration_per_char * len(sentence)\n\n # Append the new item\n output_data.append({\n \"start\": round(start, 3),\n \"end\": round(sentence_end, 3),\n \"text\": sentence,\n \"speaker\": speaker\n })\n\n # Update the start for the next sentence\n start = sentence_end\n sentence_start = i + 1\n\n return output_data"
}
] | import os
import logging
import json
import re
import time
import numpy as np
import re
import argparse
from tqdm import tqdm
from youdub.tts_xttsv2 import TTS_Clone, audio_process_folder
from youdub.tts_bytedance import TTS_Clone as TTS_Clone_bytedance
from youdub.tts_bytedance import audio_process_folder as audio_process_folder_bytedance
from youdub.asr_whisperX import VideoProcessor
from youdub.video_postprocess import replace_audio_ffmpeg
from youdub.translation_unsafe import Translator
from youdub.utils import split_text
from multiprocessing import Process | 9,359 | # from youdub.tts_bytedance import TTS_Clone as TTS_Clone_bytedance, audio_process_folder as audio_process_folder_bytedance
allowed_chars = '[^a-zA-Z0-9_ .]'
def translate_from_folder(folder, translator: Translator, original_fname):
with open(os.path.join(folder, 'en.json'), mode='r', encoding='utf-8') as f:
transcript = json.load(f)
_transcript = [sentence['text'] for sentence in transcript if sentence['text']]
result = ['']
while len(result) != len(_transcript):
result, summary = translator.translate(_transcript, original_fname)
for i, sentence in enumerate(result):
transcript[i]['text'] = sentence
transcript = split_text(transcript) # 使用whisperX后,会自动分句,所以不再需要手动分句。同时避免了将`“你好。”`分为`“你好。`和`”`的情况
with open(os.path.join(folder, 'zh.json'), 'w', encoding='utf-8') as f:
json.dump(transcript, f, ensure_ascii=False, indent=4)
with open(os.path.join(folder, 'summary.txt'), 'w', encoding='utf-8') as f:
f.write(summary)
# def main(input_folder, output_folder, diarize=False):
def main():
parser = argparse.ArgumentParser(description='Process some videos.')
parser.add_argument('--input_folders', type=str, nargs='+', required=True,
help='The list of input folders containing the videos')
parser.add_argument('--output_folders', type=str, nargs='+', required=True, help='The list of output folders where the processed videos will be stored')
parser.add_argument('--vocal_only_folders', type=str, nargs='+', default=[],
help='The list of input folders containing the videos that only need vocal for the final result.')
parser.add_argument('--diarize', action='store_true',
help='Enable diarization')
args = parser.parse_args()
if len(args.input_folders) != len(args.output_folders):
raise ValueError(
"The number of input folders must match the number of output folders.")
print('='*50)
print('Initializing...')
if args.diarize:
print('Diarization enabled.')
print('='*50)
diarize = args.diarize
processor = VideoProcessor(diarize=diarize)
translator = Translator()
tts = TTS_Clone()
tts_bytedance = TTS_Clone_bytedance()
for input_folder, output_folder in zip(args.input_folders, args.output_folders):
if input_folder in args.vocal_only_folders:
vocal_only = True
print(f'Vocal only mode enabled for {input_folder}.')
else:
vocal_only = False
if not os.path.exists(os.path.join(input_folder, '0_finished')):
os.makedirs(os.path.join(input_folder, '0_finished'))
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if not os.path.exists(os.path.join(output_folder, '0_to_upload')):
os.makedirs(os.path.join(output_folder, '0_to_upload'))
if not os.path.exists(os.path.join(output_folder, '0_finished')):
os.makedirs(os.path.join(output_folder, '0_finished'))
print('='*50)
print(
f'Video processing started for {input_folder} to {output_folder}.')
print('='*50)
logging.info('Processing folder...')
files = os.listdir(input_folder)
t = tqdm(files, desc="Processing files")
video_lists = []
for file in t:
print('='*50)
t.set_description(f"Processing {file}")
print('='*50)
if file.endswith('.mp4') or file.endswith('.mkv') or file.endswith('.avi') or file.endswith('.flv'):
original_fname = file[:-4]
new_filename = re.sub(r'[^a-zA-Z0-9_. ]', '', file)
new_filename = re.sub(r'\s+', ' ', new_filename)
new_filename = new_filename.strip()
os.rename(os.path.join(input_folder, file),
os.path.join(input_folder, new_filename))
file = new_filename
video_lists.append(file)
input_path = os.path.join(input_folder, file)
output_path = os.path.join(output_folder, file[:-4]).strip()
if not os.path.exists(output_path):
os.makedirs(output_path)
speaker_to_voice_type = processor.process_video(
input_path, output_path)
else:
continue
if not os.path.exists(os.path.join(output_path, 'zh.json')):
translate_from_folder(output_path, translator, original_fname)
if len(speaker_to_voice_type) == 1:
print('Only one speaker detected. Using TTS.')
| # from youdub.tts_bytedance import TTS_Clone as TTS_Clone_bytedance, audio_process_folder as audio_process_folder_bytedance
allowed_chars = '[^a-zA-Z0-9_ .]'
def translate_from_folder(folder, translator: Translator, original_fname):
with open(os.path.join(folder, 'en.json'), mode='r', encoding='utf-8') as f:
transcript = json.load(f)
_transcript = [sentence['text'] for sentence in transcript if sentence['text']]
result = ['']
while len(result) != len(_transcript):
result, summary = translator.translate(_transcript, original_fname)
for i, sentence in enumerate(result):
transcript[i]['text'] = sentence
transcript = split_text(transcript) # 使用whisperX后,会自动分句,所以不再需要手动分句。同时避免了将`“你好。”`分为`“你好。`和`”`的情况
with open(os.path.join(folder, 'zh.json'), 'w', encoding='utf-8') as f:
json.dump(transcript, f, ensure_ascii=False, indent=4)
with open(os.path.join(folder, 'summary.txt'), 'w', encoding='utf-8') as f:
f.write(summary)
# def main(input_folder, output_folder, diarize=False):
def main():
parser = argparse.ArgumentParser(description='Process some videos.')
parser.add_argument('--input_folders', type=str, nargs='+', required=True,
help='The list of input folders containing the videos')
parser.add_argument('--output_folders', type=str, nargs='+', required=True, help='The list of output folders where the processed videos will be stored')
parser.add_argument('--vocal_only_folders', type=str, nargs='+', default=[],
help='The list of input folders containing the videos that only need vocal for the final result.')
parser.add_argument('--diarize', action='store_true',
help='Enable diarization')
args = parser.parse_args()
if len(args.input_folders) != len(args.output_folders):
raise ValueError(
"The number of input folders must match the number of output folders.")
print('='*50)
print('Initializing...')
if args.diarize:
print('Diarization enabled.')
print('='*50)
diarize = args.diarize
processor = VideoProcessor(diarize=diarize)
translator = Translator()
tts = TTS_Clone()
tts_bytedance = TTS_Clone_bytedance()
for input_folder, output_folder in zip(args.input_folders, args.output_folders):
if input_folder in args.vocal_only_folders:
vocal_only = True
print(f'Vocal only mode enabled for {input_folder}.')
else:
vocal_only = False
if not os.path.exists(os.path.join(input_folder, '0_finished')):
os.makedirs(os.path.join(input_folder, '0_finished'))
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if not os.path.exists(os.path.join(output_folder, '0_to_upload')):
os.makedirs(os.path.join(output_folder, '0_to_upload'))
if not os.path.exists(os.path.join(output_folder, '0_finished')):
os.makedirs(os.path.join(output_folder, '0_finished'))
print('='*50)
print(
f'Video processing started for {input_folder} to {output_folder}.')
print('='*50)
logging.info('Processing folder...')
files = os.listdir(input_folder)
t = tqdm(files, desc="Processing files")
video_lists = []
for file in t:
print('='*50)
t.set_description(f"Processing {file}")
print('='*50)
if file.endswith('.mp4') or file.endswith('.mkv') or file.endswith('.avi') or file.endswith('.flv'):
original_fname = file[:-4]
new_filename = re.sub(r'[^a-zA-Z0-9_. ]', '', file)
new_filename = re.sub(r'\s+', ' ', new_filename)
new_filename = new_filename.strip()
os.rename(os.path.join(input_folder, file),
os.path.join(input_folder, new_filename))
file = new_filename
video_lists.append(file)
input_path = os.path.join(input_folder, file)
output_path = os.path.join(output_folder, file[:-4]).strip()
if not os.path.exists(output_path):
os.makedirs(output_path)
speaker_to_voice_type = processor.process_video(
input_path, output_path)
else:
continue
if not os.path.exists(os.path.join(output_path, 'zh.json')):
translate_from_folder(output_path, translator, original_fname)
if len(speaker_to_voice_type) == 1:
print('Only one speaker detected. Using TTS.') | audio_process_folder_bytedance( | 3 | 2023-11-02 08:21:31+00:00 | 12k |
BrianPugh/cyclopts | tests/test_group_extractors.py | [
{
"identifier": "App",
"path": "cyclopts/core.py",
"snippet": "class App:\n _name: Optional[Tuple[str, ...]] = field(default=None, alias=\"name\", converter=optional_to_tuple_converter)\n\n _help: Optional[str] = field(default=None, alias=\"help\")\n\n usage: Optional[str] = field(default=None)\n\n # Everything below must be kw_only\n\n default_command: Optional[Callable] = field(default=None, converter=_validate_default_command, kw_only=True)\n default_parameter: Optional[Parameter] = field(default=None, kw_only=True)\n\n version: Union[None, str, Callable] = field(factory=_default_version, kw_only=True)\n version_flags: Tuple[str, ...] = field(\n default=[\"--version\"],\n on_setattr=attrs.setters.frozen,\n converter=to_tuple_converter,\n kw_only=True,\n )\n\n show: bool = field(default=True, kw_only=True)\n\n help_flags: Tuple[str, ...] = field(\n default=[\"--help\", \"-h\"],\n on_setattr=attrs.setters.frozen,\n converter=to_tuple_converter,\n kw_only=True,\n )\n\n # This can ONLY ever be Tuple[Union[Group, str], ...] due to converter.\n # The other types is to make mypy happy for Cyclopts users.\n group: Union[Group, str, Tuple[Union[Group, str], ...]] = field(\n default=None, converter=to_tuple_converter, kw_only=True\n )\n\n group_arguments: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_arguments()),\n kw_only=True,\n )\n group_parameters: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_parameters()),\n kw_only=True,\n )\n group_commands: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_commands()),\n kw_only=True,\n )\n\n converter: Optional[Callable] = field(default=None, kw_only=True)\n validator: List[Callable] = field(default=None, converter=to_list_converter, kw_only=True)\n\n ######################\n # Private Attributes #\n ######################\n # Maps CLI-name of a command to a function handle.\n _commands: Dict[str, \"App\"] = field(init=False, factory=dict)\n\n _parents: List[\"App\"] = field(init=False, factory=list)\n\n _meta: \"App\" = field(init=False, default=None)\n _meta_parent: \"App\" = field(init=False, default=None)\n\n def __attrs_post_init__(self):\n if self.help_flags:\n self.command(\n self.help_print,\n name=self.help_flags,\n help_flags=[],\n version_flags=[],\n help=\"Display this message and exit.\",\n )\n if self.version_flags:\n self.command(\n self.version_print,\n name=self.version_flags,\n help_flags=[],\n version_flags=[],\n help=\"Display application version.\",\n )\n\n ###########\n # Methods #\n ###########\n\n @property\n def name(self) -> Tuple[str, ...]:\n \"\"\"Application name(s). Dynamically derived if not previously set.\"\"\"\n if self._name:\n return self._name\n elif self.default_command is None:\n name = Path(sys.argv[0]).name\n if name == \"__main__.py\":\n name = _get_root_module_name()\n return (name,)\n else:\n return (_format_name(self.default_command.__name__),)\n\n @property\n def help(self) -> str:\n if self._help is not None:\n return self._help\n elif self.default_command is None:\n # Try and fallback to a meta-app docstring.\n if self._meta is None:\n return \"\"\n else:\n return self.meta.help\n elif self.default_command.__doc__ is None:\n return \"\"\n else:\n return self.default_command.__doc__\n\n @help.setter\n def help(self, value):\n self._help = value\n\n def version_print(self) -> None:\n \"\"\"Print the application version.\"\"\"\n print(self.version() if callable(self.version) else self.version)\n\n def __getitem__(self, key: str) -> \"App\":\n \"\"\"Get the subapp from a command string.\n\n All commands get registered to Cyclopts as subapps.\n The actual function handler is at ``app[key].default_command``.\n \"\"\"\n if self._meta:\n with suppress(KeyError):\n return self.meta[key]\n return self._commands[key]\n\n def __contains__(self, k: str) -> bool:\n if k in self._commands:\n return True\n if self._meta_parent:\n return k in self._meta_parent\n return False\n\n @property\n def meta(self) -> \"App\":\n if self._meta is None:\n self._meta = type(self)(\n group_commands=copy(self.group_commands),\n group_arguments=copy(self.group_arguments),\n group_parameters=copy(self.group_parameters),\n )\n self._meta._meta_parent = self\n return self._meta\n\n def _parse_command_chain(self, tokens):\n command_chain = []\n app = self\n apps = [app]\n unused_tokens = tokens\n\n command_mapping = _combined_meta_command_mapping(app)\n\n for i, token in enumerate(tokens):\n if token in self.help_flags:\n break\n try:\n app = command_mapping[token]\n apps.append(app)\n unused_tokens = tokens[i + 1 :]\n except KeyError:\n break\n command_chain.append(token)\n command_mapping = _combined_meta_command_mapping(app)\n\n return command_chain, apps, unused_tokens\n\n def command(\n self,\n obj: Optional[Callable] = None,\n name: Union[None, str, Iterable[str]] = None,\n **kwargs,\n ) -> Callable:\n \"\"\"Decorator to register a function as a CLI command.\n\n Parameters\n ----------\n obj: Optional[Callable]\n Function or :class:`App` to be registered as a command.\n name: Union[None, str, Iterable[str]]\n Name(s) to register the ``obj`` to.\n If not provided, defaults to:\n\n * If registering an :class:`App`, then the app's name.\n * If registering a function, then the function's name.\n `**kwargs`\n Any argument that :class:`App` can take.\n \"\"\"\n if obj is None: # Called ``@app.command(...)``\n return partial(self.command, name=name, **kwargs)\n\n if isinstance(obj, App):\n app = obj\n\n if app._name is None and name is None:\n raise ValueError(\"Sub-app MUST have a name specified.\")\n\n if kwargs:\n raise ValueError(\"Cannot supplied additional configuration when registering a sub-App.\")\n else:\n validate_command(obj)\n kwargs.setdefault(\"help_flags\", [])\n kwargs.setdefault(\"version_flags\", [])\n if \"group_commands\" not in kwargs:\n kwargs[\"group_commands\"] = copy(self.group_commands)\n if \"group_parameters\" not in kwargs:\n kwargs[\"group_parameters\"] = copy(self.group_parameters)\n if \"group_arguments\" not in kwargs:\n kwargs[\"group_arguments\"] = copy(self.group_arguments)\n app = App(default_command=obj, **kwargs)\n # app.name is handled below\n\n if name is None:\n name = app.name\n else:\n app._name = name\n\n for n in to_tuple_converter(name):\n if n in self:\n raise CommandCollisionError(f'Command \"{n}\" already registered.')\n\n # Warning: app._name may not align with command name\n self._commands[n] = app\n\n app._parents.append(self)\n\n return obj\n\n def default(\n self,\n obj: Optional[Callable] = None,\n *,\n converter=None,\n validator=None,\n ):\n \"\"\"Decorator to register a function as the default action handler.\"\"\"\n if obj is None: # Called ``@app.default_command(...)``\n return partial(self.default, converter=converter, validator=validator)\n\n if isinstance(obj, App): # Registering a sub-App\n raise TypeError(\"Cannot register a sub-App to default.\")\n\n if self.default_command is not None:\n raise CommandCollisionError(f\"Default command previously set to {self.default_command}.\")\n\n validate_command(obj)\n self.default_command = obj\n if converter:\n self.converter = converter\n if validator:\n self.validator = validator\n return obj\n\n def parse_known_args(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n ) -> Tuple[Callable, inspect.BoundArguments, List[str]]:\n \"\"\"Interpret arguments into a function, :class:`~inspect.BoundArguments`, and any remaining unknown tokens.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``\n\n Returns\n -------\n command: Callable\n Bare function to execute.\n\n bound: inspect.BoundArguments\n Bound arguments for ``command``.\n\n unused_tokens: List[str]\n Any remaining CLI tokens that didn't get parsed for ``command``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n command_chain, apps, unused_tokens = self._parse_command_chain(tokens)\n command_app = apps[-1]\n\n try:\n parent_app = apps[-2]\n except IndexError:\n parent_app = None\n\n try:\n if command_app.default_command:\n command = command_app.default_command\n resolved_command = ResolvedCommand(\n command,\n _resolve_default_parameter(apps),\n command_app.group_arguments,\n command_app.group_parameters,\n parse_docstring=False,\n )\n # We want the resolved group that ``app`` belongs to.\n if parent_app is None:\n command_groups = []\n else:\n command_groups = _get_command_groups(parent_app, command_app)\n\n bound, unused_tokens = create_bound_arguments(resolved_command, unused_tokens)\n try:\n if command_app.converter:\n bound.arguments = command_app.converter(**bound.arguments)\n for command_group in command_groups:\n if command_group.converter:\n bound.arguments = command_group.converter(**bound.arguments)\n for validator in command_app.validator:\n validator(**bound.arguments)\n for command_group in command_groups:\n for validator in command_group.validator:\n validator(**bound.arguments)\n except (AssertionError, ValueError, TypeError) as e:\n new_exception = ValidationError(value=e.args[0])\n raise new_exception from e\n\n return command, bound, unused_tokens\n else:\n if unused_tokens:\n raise InvalidCommandError(unused_tokens=unused_tokens)\n else:\n # Running the application with no arguments and no registered\n # ``default_command`` will default to ``help_print``.\n command = self.help_print\n bound = inspect.signature(command).bind(tokens=tokens, console=console)\n return command, bound, []\n except CycloptsError as e:\n e.app = command_app\n if command_chain:\n e.command_chain = command_chain\n raise\n\n raise NotImplementedError(\"Should never get here.\")\n\n def parse_args(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n print_error: bool = True,\n exit_on_error: bool = True,\n verbose: bool = False,\n ) -> Tuple[Callable, inspect.BoundArguments]:\n \"\"\"Interpret arguments into a function and :class:`~inspect.BoundArguments`.\n\n **Does** handle special flags like \"version\" or \"help\".\n\n Raises\n ------\n UnusedCliTokensError\n If any tokens remain after parsing.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``.\n print_error: bool\n Print a rich-formatted error on error.\n Defaults to ``True``.\n exit_on_error: bool\n If there is an error parsing the CLI tokens invoke ``sys.exit(1)``.\n Otherwise, continue to raise the exception.\n Defaults to ``True``.\n verbose: bool\n Populate exception strings with more information intended for developers.\n Defaults to ``False``.\n\n Returns\n -------\n command: Callable\n Function associated with command action.\n\n bound: inspect.BoundArguments\n Parsed and converted ``args`` and ``kwargs`` to be used when calling ``command``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n meta_parent = self\n\n try:\n # Special flags (help/version) get bubbled up to the root app.\n # The root ``help_print`` will then traverse the meta app linked list.\n\n # The Help Flag is allowed to be anywhere in the token stream.\n help_flag_index = None\n for help_flag in self.help_flags:\n try:\n help_flag_index = tokens.index(help_flag)\n break\n except ValueError:\n pass\n\n if help_flag_index is not None:\n tokens.pop(help_flag_index)\n command = self.help_print\n while meta_parent := meta_parent._meta_parent:\n command = meta_parent.help_print\n bound = inspect.signature(command).bind(tokens, console=console)\n unused_tokens = []\n elif any(flag in tokens for flag in self.version_flags):\n # Version\n command = self.version_print\n while meta_parent := meta_parent._meta_parent:\n command = meta_parent.version_print\n bound = inspect.signature(command).bind()\n unused_tokens = []\n else:\n # Normal parsing\n command, bound, unused_tokens = self.parse_known_args(tokens, console=console)\n if unused_tokens:\n raise UnusedCliTokensError(\n target=command,\n unused_tokens=unused_tokens,\n )\n except CycloptsError as e:\n e.verbose = verbose\n e.root_input_tokens = tokens\n if print_error:\n if console is None:\n console = Console()\n console.print(format_cyclopts_error(e))\n\n if exit_on_error:\n sys.exit(1)\n else:\n raise\n\n return command, bound\n\n def __call__(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n print_error: bool = True,\n exit_on_error: bool = True,\n verbose: bool = False,\n ):\n \"\"\"Interprets and executes a command.\n\n Parameters\n ----------\n tokens : Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``.\n print_error: bool\n Print a rich-formatted error on error.\n Defaults to ``True``.\n exit_on_error: bool\n If there is an error parsing the CLI tokens invoke ``sys.exit(1)``.\n Otherwise, continue to raise the exception.\n Defaults to ``True``.\n verbose: bool\n Populate exception strings with more information intended for developers.\n Defaults to ``False``.\n\n Returns\n -------\n return_value: Any\n The value the parsed command handler returns.\n \"\"\"\n tokens = normalize_tokens(tokens)\n command, bound = self.parse_args(\n tokens,\n console=console,\n print_error=print_error,\n exit_on_error=exit_on_error,\n verbose=verbose,\n )\n try:\n return command(*bound.args, **bound.kwargs)\n except Exception as e:\n if PydanticValidationError is not None and isinstance(e, PydanticValidationError):\n if print_error:\n if console is None:\n console = Console()\n console.print(format_cyclopts_error(e))\n\n if exit_on_error:\n sys.exit(1)\n raise\n\n def help_print(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n ) -> None:\n \"\"\"Print the help page.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Tokens to interpret for traversing the application command structure.\n If not provided, defaults to ``sys.argv``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n if console is None:\n console = Console()\n\n command_chain, apps, _ = self._parse_command_chain(tokens)\n executing_app = apps[-1]\n\n # Print the:\n # my-app command COMMAND [ARGS] [OPTIONS]\n if executing_app.usage is None:\n console.print(format_usage(self, command_chain))\n elif executing_app.usage: # i.e. skip empty-string.\n console.print(executing_app.usage + \"\\n\")\n\n # Print the App/Command's Doc String.\n console.print(format_doc(self, executing_app))\n\n def walk_apps():\n # Iterates from deepest to shallowest meta-apps\n meta_list = [] # shallowest to deepest\n meta_list.append(executing_app)\n meta = executing_app\n while (meta := meta._meta) and meta.default_command:\n meta_list.append(meta)\n yield from reversed(meta_list)\n\n panels: Dict[str, Tuple[Group, HelpPanel]] = {}\n # Handle commands first; there's an off chance they may be \"upgraded\"\n # to an argument/parameter panel.\n for subapp in walk_apps():\n # Handle Commands\n for group, elements in groups_from_app(subapp):\n if not group.show:\n continue\n\n try:\n _, command_panel = panels[group.name]\n except KeyError:\n command_panel = HelpPanel(\n format=\"command\",\n title=group.name,\n )\n panels[group.name] = (group, command_panel)\n\n if group.help:\n if command_panel.description:\n command_panel.description += \"\\n\" + group.help\n else:\n command_panel.description = group.help\n\n command_panel.entries.extend(format_command_entries(elements))\n\n # Handle Arguments/Parameters\n for subapp in walk_apps():\n if subapp.default_command:\n command = ResolvedCommand(\n subapp.default_command,\n subapp.default_parameter,\n subapp.group_arguments,\n subapp.group_parameters,\n )\n for group, iparams in command.groups_iparams:\n if not group.show:\n continue\n cparams = [command.iparam_to_cparam[x] for x in iparams]\n try:\n _, existing_panel = panels[group.name]\n except KeyError:\n existing_panel = None\n new_panel = create_parameter_help_panel(group, iparams, cparams)\n\n if existing_panel:\n # An imperfect merging process\n existing_panel.format = \"parameter\"\n existing_panel.entries = new_panel.entries + existing_panel.entries # Commands go last\n if new_panel.description:\n if existing_panel.description:\n existing_panel.description += \"\\n\" + new_panel.description\n else:\n existing_panel.description = new_panel.description\n else:\n panels[group.name] = (group, new_panel)\n\n groups = [x[0] for x in panels.values()]\n help_panels = [x[1] for x in panels.values()]\n\n for help_panel in sort_groups(groups, help_panels)[1]:\n help_panel.remove_duplicates()\n if help_panel.format == \"command\":\n # don't sort format == \"parameter\" because order may matter there!\n help_panel.sort()\n console.print(help_panel)\n\n def interactive_shell(\n self,\n prompt: str = \"$ \",\n quit: Union[None, str, Iterable[str]] = None,\n dispatcher: Optional[Dispatcher] = None,\n **kwargs,\n ) -> None:\n \"\"\"Create a blocking, interactive shell.\n\n All registered commands can be executed in the shell.\n\n Parameters\n ----------\n prompt: str\n Shell prompt. Defaults to ``\"$ \"``.\n quit: Union[str, Iterable[str]]\n String or list of strings that will cause the shell to exit and this method to return.\n Defaults to ``[\"q\", \"quit\"]``.\n dispatcher: Optional[Dispatcher]\n Optional function that subsequently invokes the command.\n The ``dispatcher`` function must have signature:\n\n .. code-block:: python\n\n def dispatcher(command: Callable, bound: inspect.BoundArguments) -> Any:\n return command(*bound.args, **bound.kwargs)\n\n The above is the default dispatcher implementation.\n `**kwargs`\n Get passed along to :meth:`parse_args`.\n \"\"\"\n if os.name == \"posix\":\n print(\"Interactive shell. Press Ctrl-D to exit.\")\n else: # Windows\n print(\"Interactive shell. Press Ctrl-Z followed by Enter to exit.\")\n\n if quit is None:\n quit = [\"q\", \"quit\"]\n if isinstance(quit, str):\n quit = [quit]\n\n def default_dispatcher(command, bound):\n return command(*bound.args, **bound.kwargs)\n\n if dispatcher is None:\n dispatcher = default_dispatcher\n\n kwargs.setdefault(\"exit_on_error\", False)\n\n while True:\n try:\n user_input = input(prompt)\n except EOFError:\n break\n\n tokens = normalize_tokens(user_input)\n if not tokens:\n continue\n if tokens[0] in quit:\n break\n\n try:\n command, bound = self.parse_args(tokens, **kwargs)\n dispatcher(command, bound)\n except CycloptsError:\n # Upstream ``parse_args`` already printed the error\n pass\n except Exception:\n print(traceback.format_exc())\n\n def __repr__(self):\n \"\"\"Only shows non-default values.\"\"\"\n non_defaults = {}\n for a in self.__attrs_attrs__: # pyright: ignore[reportGeneralTypeIssues]\n if not a.init:\n continue\n v = getattr(self, a.name)\n # Compare types first because of some weird attribute issues.\n if type(v) != type(a.default) or v != a.default: # noqa: E721\n non_defaults[a.alias] = v\n\n signature = \", \".join(f\"{k}={v!r}\" for k, v in non_defaults.items())\n return f\"{type(self).__name__}({signature})\""
},
{
"identifier": "Group",
"path": "cyclopts/group.py",
"snippet": "class Group:\n name: str = \"\"\n\n help: str = \"\"\n\n # All below parameters are keyword-only\n _show: Optional[bool] = field(default=None, alias=\"show\", kw_only=True)\n\n _sort_key: Any = field(\n default=None,\n alias=\"sort_key\",\n converter=lambda x: NO_USER_SORT_KEY if x is None else x,\n )\n\n converter: Optional[Callable] = field(default=None, kw_only=True)\n\n validator: Tuple[Callable, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[Callable, ...], to_tuple_converter(x)),\n kw_only=True,\n )\n\n default_parameter: Optional[\"Parameter\"] = field(\n default=None,\n validator=_group_default_parameter_must_be_none,\n kw_only=True,\n )\n\n def __str__(self):\n return self.name\n\n @property\n def show(self):\n return bool(self.name) if self._show is None else self._show\n\n @show.setter\n def show(self, value):\n self._show = value\n\n @property\n def sort_key(self):\n return None if self._sort_key is NO_USER_SORT_KEY else self._sort_key\n\n @sort_key.setter\n def sort_key(self, value):\n self._sort_key = value\n\n @classmethod\n def create_default_arguments(cls):\n return cls(\"Arguments\")\n\n @classmethod\n def create_default_parameters(cls):\n return cls(\"Parameters\")\n\n @classmethod\n def create_default_commands(cls):\n return cls(\"Commands\")\n\n @classmethod\n def create_ordered(cls, *args, sort_key=None, **kwargs):\n \"\"\"Create a group with a globally incremented :attr:`~Group.sort_key`.\n\n Used to create a group that will be displayed **after** a previously declared :meth:`Group.create_ordered` group on the help-page.\n\n If a :attr:`~Group.sort_key` is provided, it is **prepended** to the globally incremented counter value (i.e. has priority during sorting).\n \"\"\"\n count = next(_sort_key_counter)\n if sort_key is None:\n sort_key = (NO_USER_SORT_KEY, count)\n elif is_iterable(sort_key):\n sort_key = (tuple(sort_key), count)\n else:\n sort_key = (sort_key, count)\n return cls(*args, sort_key=sort_key, **kwargs)"
},
{
"identifier": "Parameter",
"path": "cyclopts/parameter.py",
"snippet": "class Parameter:\n \"\"\"Cyclopts configuration for individual function parameters.\"\"\"\n\n # All documentation has been moved to ``docs/api.rst`` for greater control with attrs.\n\n name: Tuple[str, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)),\n )\n\n converter: Callable = field(default=None, converter=attrs.converters.default_if_none(convert))\n\n validator: Tuple[Callable, ...] = field(\n default=(),\n converter=lambda x: cast(Tuple[Callable, ...], to_tuple_converter(x)),\n )\n\n negative: Union[None, Tuple[str, ...]] = field(default=None, converter=optional_to_tuple_converter)\n\n group: Tuple[Union[Group, str], ...] = field(default=None, converter=to_tuple_converter, hash=False)\n\n parse: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n _show: Optional[bool] = field(default=None, alias=\"show\")\n\n show_default: Optional[bool] = field(default=None)\n\n show_choices: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n help: Optional[str] = field(default=None)\n\n show_env_var: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n env_var: Tuple[str, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)),\n )\n\n negative_bool: Tuple[str, ...] = field(\n default=None,\n converter=_negative_converter((\"--no-\",)),\n validator=_double_hyphen_validator,\n )\n\n negative_iterable: Tuple[str, ...] = field(\n default=None,\n converter=_negative_converter((\"--empty-\",)),\n validator=_double_hyphen_validator,\n )\n\n required: Optional[bool] = field(default=None)\n\n allow_leading_hyphen: bool = field(default=False)\n\n # Populated by the record_attrs_init_args decorator.\n _provided_args: Tuple[str] = field(default=(), init=False, eq=False)\n\n @property\n def show(self):\n return self._show if self._show is not None else self.parse\n\n def get_negatives(self, type_, *names: str) -> Tuple[str, ...]:\n type_ = get_origin(type_) or type_\n\n if self.negative is not None:\n return self.negative\n elif type_ not in (bool, list, set):\n return ()\n\n out = []\n for name in names:\n if name.startswith(\"--\"):\n name = name[2:]\n elif name.startswith(\"-\"):\n # Do not support automatic negation for short flags.\n continue\n else:\n # Should never reach here.\n raise NotImplementedError(\"All parameters should have started with '-' or '--'.\")\n\n negative_prefixes = self.negative_bool if type_ is bool else self.negative_iterable\n\n for negative_prefix in negative_prefixes:\n out.append(f\"{negative_prefix}{name}\")\n return tuple(out)\n\n def __repr__(self):\n \"\"\"Only shows non-default values.\"\"\"\n content = \", \".join(\n [\n f\"{a.alias}={getattr(self, a.name)!r}\"\n for a in self.__attrs_attrs__ # pyright: ignore[reportGeneralTypeIssues]\n if a.alias in self._provided_args\n ]\n )\n return f\"{type(self).__name__}({content})\"\n\n @classmethod\n def combine(cls, *parameters: Optional[\"Parameter\"]) -> \"Parameter\":\n \"\"\"Returns a new Parameter with values of ``parameters``.\n\n Parameters\n ----------\n `*parameters`: Optional[Parameter]\n Parameters who's attributes override ``self`` attributes.\n Ordered from least-to-highest attribute priority.\n \"\"\"\n kwargs = {}\n for parameter in parameters:\n if parameter is None:\n continue\n for a in parameter.__attrs_attrs__: # pyright: ignore[reportGeneralTypeIssues]\n if a.init and a.alias in parameter._provided_args:\n kwargs[a.alias] = getattr(parameter, a.name)\n\n return cls(**kwargs)\n\n @classmethod\n def default(cls) -> \"Parameter\":\n \"\"\"Create a Parameter with all Cyclopts-default values.\n\n This is different than just :class:`Parameter` because the default\n values will be recorded and override all upstream parameter values.\n \"\"\"\n return cls(\n **{a.alias: a.default for a in cls.__attrs_attrs__ if a.init} # pyright: ignore[reportGeneralTypeIssues]\n )"
},
{
"identifier": "groups_from_app",
"path": "cyclopts/group_extractors.py",
"snippet": "def groups_from_app(app: \"App\") -> List[Tuple[Group, List[\"App\"]]]:\n \"\"\"Extract Group/App association.\"\"\"\n group_mapping: List[Tuple[Group, List[\"App\"]]] = [\n (app.group_commands, []),\n ]\n\n subapps = [subapp for subapp in app._commands.values() if subapp.show]\n\n # 2 iterations need to be performed:\n # 1. Extract out all Group objects as they may have additional configuration.\n # 2. Assign/Create Groups out of the strings, as necessary.\n for subapp in subapps:\n assert isinstance(subapp.group, tuple)\n for group in subapp.group:\n if isinstance(group, Group):\n for mapping in group_mapping:\n if mapping[0] is group:\n break\n elif mapping[0].name == group.name:\n raise ValueError(f'Command Group \"{group.name}\" already exists.')\n else:\n group_mapping.append((group, []))\n\n for subapp in subapps:\n if subapp.group:\n assert isinstance(subapp.group, tuple)\n for group in subapp.group:\n _create_or_append(group_mapping, group, subapp)\n else:\n _create_or_append(group_mapping, app.group_commands, subapp)\n\n # Remove the empty groups\n group_mapping = [x for x in group_mapping if x[1]]\n\n # Sort alphabetically by name\n group_mapping.sort(key=lambda x: x[0].name)\n\n return group_mapping"
}
] | import pytest
from cyclopts import App, Group, Parameter
from cyclopts.group_extractors import groups_from_app | 7,737 |
def test_groups_annotated_invalid_recursive_definition():
"""A default_parameter isn't allowed to have a group set, as it would introduce a paradox."""
default_parameter = Parameter(group="Drink") # pyright: ignore[reportGeneralTypeIssues]
with pytest.raises(ValueError):
Group("Food", default_parameter=default_parameter)
def test_groups_from_app_implicit():
def validator(**kwargs):
pass
|
def test_groups_annotated_invalid_recursive_definition():
"""A default_parameter isn't allowed to have a group set, as it would introduce a paradox."""
default_parameter = Parameter(group="Drink") # pyright: ignore[reportGeneralTypeIssues]
with pytest.raises(ValueError):
Group("Food", default_parameter=default_parameter)
def test_groups_from_app_implicit():
def validator(**kwargs):
pass
| app = App(help_flags=[], version_flags=[]) | 0 | 2023-11-03 02:24:25+00:00 | 12k |
RoboFlamingo/RoboFlamingo | robot_flamingo/data/data.py | [
{
"identifier": "RealDatasetHDF5",
"path": "robot_flamingo/data/real_dataset_hdf5.py",
"snippet": "class RealDatasetHDF5(Dataset):\n def __init__(self,\n data_dir,\n image_fn,\n text_fn,\n seq_len=12,\n mode='train',\n action_mode='ee_rel_pose_local',\n use_data_augmentation=True,\n text_aug=False):\n \"\"\"Constructor.\"\"\"\n super().__init__()\n self.dataset_dir = os.path.join(data_dir, mode)\n self.text_fn = text_fn\n self.image_fn = image_fn\n self.text_aug = text_aug\n with open('enrich_lang_real.json', 'r') as f:\n self.enrich_lang_dict = json.load(f)\n self.seq_len = seq_len\n self.mode = mode\n self.action_mode = action_mode\n self.use_data_augmentation = use_data_augmentation\n\n if self.action_mode == 'ee_rel_pose':\n self.action_dim = 7 # ee xyz (3) + ee euler (3) + gripper (1)\n self.state_dim = 7\n self.ACTION_POS_SCALE = 50\n self.ACTION_ROT_SCALE = 33\n elif self.action_mode == 'ee_rel_pose_local':\n self.action_dim = 7 # ee xyz (3) + ee euler (3) + gripper (1)\n self.state_dim = 7\n self.ACTION_POS_SCALE = 50\n self.ACTION_ROT_SCALE = 33\n else:\n raise NotImplementedError()\n print(f\"ACTION_POS_SCALE: {self.ACTION_POS_SCALE}\")\n print(f\"ACTION_ROT_SCALE: {self.ACTION_ROT_SCALE}\")\n \n # the input to this function is a numpy array\n self.input_size = (224, 224)\n self.clip_mean = (0.485, 0.456, 0.406)\n self.clip_std = (0.229, 0.224, 0.225)\n\n if self.use_data_augmentation:\n self.static_rgb_preprocess_train = T.Compose([\n T.ColorJitter(\n brightness=0.05,\n # contrast=0.05,\n # hue=0.02\n ),\n # CubeRandomShiftsAug(pad=10), # static rgb (300x400)\n RandomShiftsAug(pad=10), # static rgb (300x400)\n T.Resize(self.input_size, interpolation=Image.BICUBIC),\n T.Normalize(self.clip_mean, self.clip_std),\n PatchMask()])\n self.hand_rgb_preprocess_train = T.Compose([\n # CubeRandomShiftsAug(pad=20), # hand rgb (480x640)\n RandomShiftsAug(pad=20), # hand rgb (480x640)\n T.Resize(self.input_size, interpolation=Image.BICUBIC),\n T.Normalize(self.clip_mean, self.clip_std),\n PatchMask()])\n else:\n self.static_rgb_preprocess_train = T.Compose([\n T.ColorJitter(\n brightness=0.05,\n # contrast=0.05,\n # hue=0.02\n ),\n T.Resize(self.input_size, interpolation=Image.BICUBIC),\n T.Normalize(self.clip_mean, self.clip_std)])\n self.hand_rgb_preprocess_train = T.Compose([\n T.ColorJitter(\n brightness=0.05,\n # contrast=0.05,\n # hue=0.02\n ),\n T.Resize(self.input_size, interpolation=Image.BICUBIC),\n T.Normalize(self.clip_mean, self.clip_std)])\n self.static_rgb_preprocess_val = T.Compose([\n T.Resize(self.input_size, interpolation=Image.BICUBIC),\n T.Normalize(self.clip_mean, self.clip_std)])\n self.hand_rgb_preprocess_val = T.Compose([\n T.Resize(self.input_size, interpolation=Image.BICUBIC),\n T.Normalize(self.clip_mean, self.clip_std)])\n\n self.offset_rotm = gamma2rotm(OFFSET_EULER_Z)\n self.offset_pos = np.array(OFFSET_POS)\n self.hdf5 = h5py.File(os.path.join(self.dataset_dir, \"data.hdf5\"))\n self._initialize()\n print(f'{len(self)} trajectories in total')\n\n def _initialize(self):\n \"\"\"Generate the sequence index pair.\"\"\"\n with open(os.path.join(self.dataset_dir, \"meta.json\"), \"r\") as f:\n self.meta = json.load(f)\n n_trajs = self.meta[\"num_trajectories\"]\n # n_trajs = 1000\n print(f\"number of trajectories: {n_trajs}\")\n \n self.seq_tuple = []\n self.robot_states = dict()\n all_texts = []\n for traj_idx in tqdm(range(n_trajs)):\n text = self.meta[str(traj_idx)][0]\n all_texts.append(text)\n n_frames = self.meta[str(traj_idx)][1]\n video_name = self.meta[str(traj_idx)][2]\n hdf5_st = self.meta[str(traj_idx)][3]\n hdf5_ed = self.meta[str(traj_idx)][4]\n assert n_frames == hdf5_ed - hdf5_st\n if (hdf5_ed - hdf5_st) < self.seq_len:\n continue\n if video_name in EXCLUDING_VIDEOS:\n continue\n\n # load robot status and xform with offset\n traj_robot_status = self.hdf5[\"robot_status\"][\"robot_status_0\"][hdf5_st:hdf5_ed]\n traj_xyz = traj_robot_status[:, 10:13] # (n, 3)\n traj_xyz = traj_xyz.transpose() # (3, n)\n traj_xyz = (self.offset_rotm @ traj_xyz).transpose() + self.offset_pos\n traj_quat = traj_robot_status[:, 13:17]\n traj_rpy = np.zeros((n_frames, 3))\n for i in range(n_frames):\n traj_rpy[i] = rotm2euler(self.offset_rotm @ quat2rotm(traj_quat[i]))\n traj_state = np.zeros((n_frames, 7)).astype(np.float32)\n traj_state[:, :3] = traj_xyz\n traj_state[:, 3:6] = traj_rpy\n vive_control = self.hdf5[\"vive_control\"][\"vive_control_0\"][hdf5_st:hdf5_ed]\n vive_gripper_cmd = vive_control[:, 1]\n gripper_pos = traj_robot_status[:, 30]\n gripper_states = get_binary_gripper_state_from_gripper_pos_vive_cmd(gripper_pos, vive_gripper_cmd)\n traj_state[:, -1] = gripper_states\n assert not (traj_idx in self.robot_states)\n self.robot_states[traj_idx] = traj_state\n\n # create sequence: the last frame will not be in the sequence\n for st in range(0, n_frames - self.seq_len):\n ed = st + self.seq_len\n self.seq_tuple.append([traj_idx, text, st, ed, hdf5_st])\n \n all_texts = list(set(all_texts))\n print(all_texts)\n # exit(0)\n \n def __len__(self):\n return len(self.seq_tuple)\n\n def __getitem__(self, index):\n curr_tuple = self.seq_tuple[index]\n traj_idx = curr_tuple[0]\n text = curr_tuple[1]\n \n # if (\"on the plate\" in text) and (\"pick\" in text):\n # text = text.replace(\" on the plate\", \"\")\n # if (\"on the desk\" in text) and (\"pick\" in text):\n # text = text.replace(\" on the desk\", \"\")\n\n st = curr_tuple[2]\n ed = curr_tuple[3]\n hdf5_st = curr_tuple[4]\n\n static_rgbs = []\n hand_rgbs = []\n actions = []\n states = []\n\n tlen = ed - st\n assert tlen == self.seq_len\n\n for i in range(st, ed):\n # action\n if self.action_mode == 'ee_rel_pose':\n # delta_xyz + detla_rpy + gripper in absolute world coordinates\n # xyz are scaled up by 50 rpy are scaled up by 20 and both are clipped to [-1, 1]\n xyz_action = (self.robot_states[traj_idx][i+1, :3] - self.robot_states[traj_idx][i, :3]) \n rpy_action = (self.robot_states[traj_idx][i+1, 3:6] - self.robot_states[traj_idx][i, 3:6])\n gripper_action = self.robot_states[traj_idx][i+1, 6]\n elif self.action_mode == 'ee_rel_pose_local':\n # a_trans = rotm_t.T @ (trans_t+1 - trans_t)\n # a_rot = rotm_t.T @ rotm_t+1\n curr_xyz = self.robot_states[traj_idx][i, :3]\n curr_rpy = self.robot_states[traj_idx][i, 3:6]\n curr_rotm = euler2rotm(curr_rpy)\n next_xyz = self.robot_states[traj_idx][i+1, :3]\n next_rpy = self.robot_states[traj_idx][i+1, 3:6]\n next_rotm = euler2rotm(next_rpy)\n xyz_action = np.dot(curr_rotm.T, next_xyz - curr_xyz)\n rel_rotm = curr_rotm.T @ next_rotm\n rpy_action = rotm2euler(rel_rotm)\n for rpy_i in range(len(rpy_action)):\n while rpy_action[rpy_i] > np.pi:\n rpy_action[rpy_i] -= (2 * np.pi)\n while rpy_action[rpy_i] < -np.pi:\n rpy_action[rpy_i] += (2 * np.pi)\n gripper_action = self.robot_states[traj_idx][i+1, 6]\n else:\n raise NotImplementedError()\n action = np.zeros(7)\n action[:3] = xyz_action * self.ACTION_POS_SCALE\n action[3:6] = rpy_action * self.ACTION_ROT_SCALE\n action[6] = gripper_action\n actions.append(action)\n \n # state\n states.append(self.robot_states[traj_idx][i])\n\n # static rgb\n static_rgb = self.hdf5[\"rgb\"][\"rgb_1\"][hdf5_st+i]\n static_rgb = static_rgb[190:700, 250:1050] # mode 1\n static_rgb = Image.fromarray(static_rgb)\n static_rgb = T.ToTensor()(static_rgb.convert(\"RGB\"))\n static_rgbs.append(static_rgb)\n\n # hand rgb\n hand_rgb = self.hdf5[\"rgb\"][\"rgb_0\"][hdf5_st+i]\n hand_rgb = Image.fromarray(hand_rgb)\n hand_rgb = T.ToTensor()(hand_rgb.convert(\"RGB\"))\n hand_rgbs.append(hand_rgb)\n \n # Images\n static_rgbs = torch.stack(static_rgbs, dim=0)\n hand_rgbs = torch.stack(hand_rgbs, dim=0)\n if self.mode == 'train':\n static_rgbs = self.static_rgb_preprocess_train(static_rgbs)\n hand_rgbs = self.hand_rgb_preprocess_train(hand_rgbs)\n else:\n static_rgbs = self.static_rgb_preprocess_val(static_rgbs)\n hand_rgbs = self.hand_rgb_preprocess_val(hand_rgbs)\n\n # State\n states = np.array(states)\n states = torch.from_numpy(states)\n\n # Action\n actions = np.array(actions) # (len, act_dim)\n actions = torch.from_numpy(actions)\n\n # RGB\n _, C, H, W = static_rgbs.shape\n padded_static_rgbs = torch.zeros((self.seq_len, C, H, W)).float() # (len, C, H, W)\n padded_hand_rgbs = torch.zeros((self.seq_len, C, H, W)).float() # (len, C, H, W)\n padded_static_rgbs[:tlen] = static_rgbs\n padded_hand_rgbs[:tlen] = hand_rgbs\n rgb_data = padded_static_rgbs\n hand_rgb_data = padded_hand_rgbs\n\n # State\n padded_states = torch.zeros(self.seq_len, self.state_dim).float() # (len, state_dim)\n padded_states[:tlen] = states\n state_data = padded_states\n\n # Action\n padded_actions = torch.zeros(self.seq_len, self.action_dim).float() # (len, action_dim)\n padded_actions[:tlen] = actions\n action_data = padded_actions\n\n # Timestep\n timestep = np.zeros(self.seq_len, dtype=np.int32) # (len)\n timestep[:tlen] = np.arange(st, ed)\n timestep_data = torch.from_numpy(timestep).long()\n\n # Attention mask (should be all 1 for full dataset)\n attention_mask = np.ones(self.seq_len, dtype=np.int32) # (len)\n attention_mask[tlen:] = 0.0\n assert np.sum(attention_mask) == self.seq_len\n attention_mask_data = torch.from_numpy(attention_mask).long()\n\n data = dict()\n data['rgb'] = rgb_data # (len, C, H, W)\n data['hand_rgb'] = hand_rgb_data # (len, C, H, W)\n if self.text_aug:\n if text in self.enrich_lang_dict:\n if random.random() > 0.1: # preserve the original text in 0.1 prob\n text = random.choice(self.enrich_lang_dict[text])\n data['text'] = text\n data['timestep'] = timestep_data # (len,)\n data['state'] = state_data # (len, state_dim)\n data['action'] = action_data # (len, action_dim)\n data['attention_mask'] = attention_mask_data # (len,)\n\n return data\n \n def visualize_action(self):\n \"\"\"Visualize the distribution of actions.\"\"\"\n with open(os.path.join(self.dataset_dir, \"meta.json\"), \"r\") as f:\n self.meta = json.load(f)\n n_trajs = self.meta[\"num_trajectories\"]\n xyz_actions = []\n rpy_actions = []\n xyz_states = []\n rpy_states = []\n for traj_idx in range(n_trajs):\n temp_robot_states = self.robot_states[traj_idx]\n n_frames = self.meta[str(traj_idx)][1]\n for i in range(0, n_frames):\n xyz_states.append(temp_robot_states[i, :3])\n rpy_states.append(temp_robot_states[i, 3:6])\n for i in range(1, n_frames):\n xyz_action = temp_robot_states[i, :3] - temp_robot_states[i-1, :3]\n rpy_action = temp_robot_states[i, 3:6] - temp_robot_states[i-1, 3:6]\n xyz_actions.append(xyz_action)\n rpy_actions.append(rpy_action)\n print(f\"number of actions: {len(xyz_actions)}\")\n xyz_actions = np.array(xyz_actions)\n rpy_actions = np.array(rpy_actions)\n xyz_states = np.array(xyz_states)\n rpy_states = np.array(rpy_states)\n a_labels = ['a_x', 'a_y', 'a_z']\n for i in range(len(a_labels)):\n plt.figure()\n plt.hist(xyz_actions[:, i], bins=512, label=a_labels[i], alpha=0.5)\n plt.legend(loc='upper right')\n plt.savefig(f\"./data_stats/{a_labels[i]}.png\")\n a_labels = ['a_roll', 'a_pitch', 'a_yaw']\n for i in range(len(a_labels)):\n plt.figure()\n plt.hist(rpy_actions[:, i], bins=512, label=a_labels[i], alpha=0.5)\n plt.legend(loc='upper right')\n plt.savefig(f\"./data_stats/{a_labels[i]}.png\")\n s_labels = ['s_x', 's_y', 's_z']\n for i in range(len(s_labels)):\n plt.figure()\n plt.hist(xyz_states[:, i], bins=512, label=s_labels[i], alpha=0.5)\n plt.legend(loc='upper right')\n plt.savefig(f\"./data_stats/{s_labels[i]}.png\")\n s_labels = ['s_roll', 's_pitch', 's_yaw']\n for i in range(len(s_labels)):\n plt.figure()\n plt.hist(rpy_states[:, i], bins=512, label=s_labels[i], alpha=0.5)\n plt.legend(loc='upper right')\n plt.savefig(f\"./data_stats/{s_labels[i]}.png\")\n\n abs_xyz_actions = np.abs(xyz_actions)\n abs_rpy_actions = np.abs(rpy_actions)\n x_action_max = np.max(abs_xyz_actions[:, 0])\n y_action_max = np.max(abs_xyz_actions[:, 1])\n z_action_max = np.max(abs_xyz_actions[:, 2])\n x_action_min = np.min(abs_xyz_actions[:, 0])\n y_action_min = np.min(abs_xyz_actions[:, 1])\n z_action_min = np.min(abs_xyz_actions[:, 2])\n x_action_mean = np.mean(abs_xyz_actions[:, 0])\n y_action_mean = np.mean(abs_xyz_actions[:, 1])\n z_action_mean = np.mean(abs_xyz_actions[:, 2])\n\n print(f\"xyz_action max: {x_action_max:.3f}, {y_action_max:.3f}, {z_action_max:.3f}\")\n print(f\"xyz_action min: {x_action_min:.3f}, {y_action_min:.3f}, {z_action_min:.3f}\")\n print(f\"xyz_action mean: {x_action_mean:.3f}, {y_action_mean:.3f}, {z_action_mean:.3f}\")\n\n er_action_max = np.max(abs_rpy_actions[:, 0])\n ep_action_max = np.max(abs_rpy_actions[:, 1])\n ey_action_max = np.max(abs_rpy_actions[:, 2])\n er_action_min = np.min(abs_rpy_actions[:, 0])\n ep_action_min = np.min(abs_rpy_actions[:, 1])\n ey_action_min = np.min(abs_rpy_actions[:, 2])\n er_action_mean = np.mean(abs_rpy_actions[:, 0])\n ep_action_mean = np.mean(abs_rpy_actions[:, 1])\n ey_action_mean = np.mean(abs_rpy_actions[:, 2])\n\n print(f\"rpy_action max: {er_action_max:.3f}, {ep_action_max:.3f}, {ey_action_max:.3f}\")\n print(f\"rpy_action min: {er_action_min:.3f}, {ep_action_min:.3f}, {ey_action_min:.3f}\")\n print(f\"rpy_action mean: {er_action_mean:.3f}, {ep_action_mean:.3f}, {ey_action_mean:.3f}\")\n\n def visualize_episode_len(self):\n \"\"\"Visualize the length distribution of episodes.\"\"\"\n pass\n \n def collator(self, sample):\n image_tensors = torch.stack([s['rgb'] for s in sample], dim=0)\n gripper_tensors = torch.stack([s['hand_rgb'] for s in sample], dim=0)\n action_tensors = torch.stack([s['action'] for s in sample], dim=0)\n state_tensors = torch.stack([s['state'] for s in sample], dim=0)\n robot_obs = state_tensors.clone()\n text = [s['text'] for s in sample]\n # print(text)\n text_tensors, attention_mask = self.text_fn(text)\n # print(text_tensors, attention_mask)\n return image_tensors, (text_tensors, attention_mask), action_tensors, gripper_tensors, state_tensors, robot_obs"
},
{
"identifier": "CaptionDataset",
"path": "robot_flamingo/data/vl_dataset.py",
"snippet": "class CaptionDataset(Dataset):\n def __init__(\n self,\n image_train_dir_path,\n annotations_path,\n tokenizer=None,\n transforms=None,\n seed=123,\n is_train=True,\n dataset_name='coco',\n image_val_dir_path=None,\n ):\n self.image_train_dir_path = image_train_dir_path\n self.image_val_dir_path = image_val_dir_path\n self.annotations = []\n self.is_train = is_train\n self.dataset_name = dataset_name\n self.seed = seed\n random.seed(self.seed)\n full_annotations = json.load(open(annotations_path))\n self.tokenizer = tokenizer\n self.transforms = transforms\n print(len(full_annotations[\"images\"]), len(full_annotations[\"annotations\"]))\n self.id2path = {}\n self.id2caption = {}\n for i in range(len(full_annotations[\"images\"])):\n self.id2path[full_annotations[\"images\"][i][\"id\"]] = os.path.join(\n self.image_train_dir_path, full_annotations[\"images\"][i][\"file_name\"])\n self.image_ids = list(self.id2path.keys())\n for i in range(len(full_annotations[\"annotations\"])):\n image_id = full_annotations[\"annotations\"][i][\"image_id\"]\n if image_id not in self.id2caption:\n self.id2caption[image_id] = [full_annotations[\"annotations\"][i]['caption']]\n else:\n self.id2caption[image_id].append(full_annotations[\"annotations\"][i]['caption'])\n\n def __len__(self):\n return len(self.image_ids)\n\n def __getitem__(self, idx):\n image = Image.open(self.id2path[self.image_ids[idx]])\n image.load()\n caption = random.choice(self.id2caption[self.image_ids[idx]])\n return {\n \"image\": image,\n \"caption\": caption,\n \"image_id\": self.image_ids[idx]\n }\n \n def get_caption_prompt(self, caption=None):\n return f\"A photo of {caption if caption is not None else ''}\"\n \n def collator(self, samples):\n images = torch.stack([self.transforms(s['image']) for s in samples], dim=0)\n text = [self.get_caption_prompt(s['caption']) for s in samples]\n text_tensors, attention_mask = self.tokenizer(text)\n return images, (text_tensors, attention_mask)"
},
{
"identifier": "VQADataset",
"path": "robot_flamingo/data/vl_dataset.py",
"snippet": "class VQADataset(Dataset):\n def __init__(\n self, image_dir_path, question_path, annotations_path, tokenizer=None, transforms=None, seed=123, is_train=True, dataset_name='vqav2'\n ):\n self.questions = json.load(open(question_path, \"r\"))[\"questions\"]\n if annotations_path is not None:\n self.answers = json.load(open(annotations_path, \"r\"))[\"annotations\"]\n else:\n self.answers = None\n self.image_dir_path = image_dir_path\n self.is_train = is_train\n self.dataset_name = dataset_name\n # self.img_coco_split = \"train2014\"\n self.tokenizer = tokenizer\n self.transforms = transforms\n self.seed = seed\n random.seed(self.seed)\n if self.dataset_name in {\"vqav2\", \"ok_vqa\"}:\n self.img_coco_split = self.image_dir_path.strip(\"/\").split(\"/\")[-1]\n assert self.img_coco_split in {\"train2014\", \"val2014\", \"test2015\"}\n\n def __len__(self):\n return len(self.questions)\n\n def get_img_path(self, question):\n if self.dataset_name in {\"vqav2\", \"ok_vqa\"}:\n return os.path.join(\n self.image_dir_path,\n f\"COCO_{self.img_coco_split}_{question['image_id']:012d}.jpg\"\n if self.is_train\n else f\"COCO_{self.img_coco_split}_{question['image_id']:012d}.jpg\",\n )\n elif self.dataset_name == \"vizwiz\":\n return os.path.join(self.image_dir_path, question[\"image_id\"])\n elif self.dataset_name == \"textvqa\":\n return os.path.join(self.image_dir_path, f\"{question['image_id']}.jpg\")\n else:\n raise Exception(f\"Unknown VQA dataset {self.dataset_name}\")\n\n def __getitem__(self, idx):\n question = self.questions[idx]\n img_path = self.get_img_path(question)\n image = Image.open(img_path)\n # image.load()\n results = {\n \"image\": image,\n \"question\": question[\"question\"],\n \"question_id\": question[\"question_id\"],\n }\n if self.answers is not None:\n answers = self.answers[idx]\n results[\"answers\"] = [a[\"answer\"] for a in answers[\"answers\"]]\n return results\n \n def get_vqa_prompt(self, question, answer=None):\n return f\"Question:{question} Short answer:{answer if answer is not None else ''}\"\n \n def get_vqa_ques_prompt(self, question):\n return f\"Question:{question} Short answer:\"\n \n def collator(self, samples):\n images = torch.stack([self.transforms(s['image']) for s in samples], dim=0)\n text = [self.get_vqa_prompt(s['question'], random.choice(s['answers'])) for s in samples]\n text_tensors, attention_mask = self.tokenizer(text)\n B, T = attention_mask.shape\n ques = [self.get_vqa_ques_prompt(s['question']) for s in samples]\n _, ques_mask = self.tokenizer(ques)\n ques_len = ques_mask.sum(dim=1).unsqueeze(-1).expand(B, T)\n answer_mask = torch.ones_like(attention_mask)\n indices = torch.arange(answer_mask.shape[-1]).unsqueeze(0).expand(B, T)\n index_mask = indices < ques_len\n answer_mask.masked_fill_(index_mask, 0)\n answer_mask = answer_mask * attention_mask # both mask for attention and question\n return images, (text_tensors, attention_mask), answer_mask"
}
] | import ast
import functools
import io
import json
import logging
import math
import os
import random
import sys
import tarfile
import zipfile
import braceexpand
import torch
import torchvision
import webdataset as wds
import numpy as np
import numpy as np
import pyhash
import torch
import horovod.torch as hvd
import logging
import numpy as np
import pyhash
import torch
import pickle
import torch.nn as nn
import torch.nn.functional as F
import copy
from cgitb import text
from dataclasses import dataclass
from multiprocessing import Value
from PIL import Image
from torch.utils.data import DataLoader, IterableDataset, get_worker_info, Dataset
from torch.utils.data.distributed import DistributedSampler
from webdataset.filters import _shuffle
from webdataset.tariterators import (
base_plus_ext,
tar_file_expander,
url_opener,
valid_sample,
)
from calvin_agent.datasets.utils.episode_utils import (
get_state_info_dict,
process_actions,
process_depth,
process_language,
process_rgb,
process_state,
)
from omegaconf import DictConfig
from torch.utils.data import Dataset
from robot_flamingo.data.real_dataset_hdf5 import RealDatasetHDF5
from pathlib import Path
from typing import Dict, Tuple, Union
from calvin_agent.datasets.utils.episode_utils import (
get_state_info_dict,
process_actions,
process_depth,
# process_language,
# process_rgb,
process_state,
)
from omegaconf import DictConfig
from torch.utils.data import Dataset
from robot_flamingo.data.vl_dataset import CaptionDataset, VQADataset
from typing import Any, Dict, List, Tuple, Callable
from itertools import chain
from calvin_agent.datasets.utils.episode_utils import lookup_naming_pattern | 8,811 | coco_dataset,
batch_size=args.batch_size_vl,
pin_memory=False,
num_workers=args.workers,
prefetch_factor=3,
sampler=sampler,
persistent_workers=True,
collate_fn=coco_dataset.collator,
drop_last=True
)
return dataloader
def get_vqa_dataset(args, image_processor, tokenizer, epoch=0):
vqa_data_dir = "path/to/vqav2/train2014"
vqa_questions = "path/to/vqav2/v2_OpenEnded_mscoco_train2014_questions.json"
vqa_ann = "path/to/vqav2/v2_mscoco_train2014_annotations.json"
preprocess_text_fn = functools.partial(preprocess_text_calvin, tokenizer=tokenizer)
vqa_dataset = VQADataset(vqa_data_dir, vqa_questions, vqa_ann, preprocess_text_fn, image_processor)
sampler = DistributedSampler(
vqa_dataset,
num_replicas=args.world_size,
rank=args.rank,
shuffle=True,
seed=args.seed,
drop_last=True,
)
dataloader = DataLoader(
vqa_dataset,
batch_size=args.batch_size_vl,
pin_memory=False,
num_workers=args.workers,
prefetch_factor=3,
sampler=sampler,
persistent_workers=True,
collate_fn=vqa_dataset.collator,
drop_last=True
)
return dataloader
def get_calvin_dataset(args, image_processor, tokenizer, epoch=0, floor=False):
dataset_path = args.calvin_dataset
# ann is dict including language and info
shared_epoch = SharedEpoch(epoch=epoch)
preprocess_image_fn = functools.partial(
preprocess_image, image_processor=image_processor
)
preprocess_text_fn = functools.partial(preprocess_text_calvin, tokenizer=tokenizer)
calvin_dataset = DiskCalvinDataset(
datasets_dir=Path(dataset_path) / "training",
image_fn=preprocess_image_fn,
text_fn=preprocess_text_fn,
window_size=args.window_size,
rgb_pad=args.rgb_pad,
gripper_pad=args.gripper_pad,
traj_cons=args.traj_cons,
text_aug=args.text_aug,
dif_ws=args.dif_ws,
min_window_size=args.min_window_size,
max_window_size=args.max_window_size,
act_step=args.multi_step_action,
partial_data=args.partial_data
)
round_fn = math.floor if floor else math.ceil
num_samples = len(calvin_dataset)
global_batch_size = args.batch_size_calvin * args.world_size
num_batches = round_fn(num_samples / global_batch_size)
num_workers = max(1, args.workers)
num_worker_batches = round_fn(num_batches / num_workers) # per dataloader worker
num_batches = num_worker_batches * num_workers
num_samples = num_batches * global_batch_size
sampler = DistributedSampler(
calvin_dataset,
num_replicas=args.world_size,
rank=args.rank,
shuffle=True,
seed=args.seed,
drop_last=True,
)
# the batch_size and num_workers are per-GPU !
dataloader = DataLoader(
calvin_dataset,
batch_size=args.batch_size_calvin,
pin_memory=False,
num_workers=num_workers,
prefetch_factor=3,
sampler=sampler,
persistent_workers=True,
collate_fn=calvin_dataset.collater,
drop_last=True
)
# dataloader = DataLoader(calvin_dataset, batch_size=args.batch_size_calvin)
# add meta-data to dataloader instance for convenience
dataloader.num_batches = num_batches
dataloader.num_samples = num_samples
return DataInfo(dataloader=dataloader, shared_epoch=shared_epoch, sampler=sampler, dataset=calvin_dataset)
def get_real_dataset(args, image_processor, tokenizer, epoch=0, floor=False):
dataset_path = args.calvin_dataset
# ann is dict including language and info
shared_epoch = SharedEpoch(epoch=epoch)
preprocess_image_fn = functools.partial(
preprocess_image, image_processor=image_processor
)
preprocess_text_fn = functools.partial(preprocess_text_calvin, tokenizer=tokenizer)
|
Image.MAX_IMAGE_PIXELS = 1000000000
MAX_NUM_TOKENS = 256
MAX_NUM_IMAGES = 5
TINY_IMAGE_SIZE_THRESHOLD = 1
N_CHANNELS = 3
INTERLEAVED_IMAGE_SIZE = 224
_SHARD_SHUFFLE_SIZE = 2000
_SHARD_SHUFFLE_INITIAL = 500
_SAMPLE_SHUFFLE_SIZE = 5000
_SAMPLE_SHUFFLE_INITIAL = 1000
MIN_KB = 10
MAX_NUM_IMAGES = 5
try:
except ImportError:
hvd = None
hasher = pyhash.fnv1_32()
logger = logging.getLogger(__name__)
obs_config = DictConfig(
{
"rgb_obs": ["rgb_static", "rgb_gripper"],
"depth_obs": [],
"state_obs": ["robot_obs"],
"actions": ["rel_actions"],
"language": ["language"],
}
)
prop_state = DictConfig(
{
"n_state_obs": 15,
"keep_indices": [[0, 15]],
"robot_orientation_idx": [3, 6],
"normalize": True,
"normalize_robot_orientation": True,
}
)
def get_validation_window_size(
idx: int, min_window_size: int, max_window_size: int
) -> int:
"""
In validation step, use hash function instead of random sampling for consistent window sizes across epochs.
Args:
idx: Sequence index.
min_window_size: Minimum window size.
max_window_size: Maximum window size.
Returns:
Window size computed with hash function.
"""
window_range = max_window_size - min_window_size + 1
return min_window_size + hasher(str(idx)) % window_range
class RandomShiftsAug(nn.Module):
def __init__(self, pad):
super().__init__()
self.pad = pad
def forward(self, x):
n, c, h, w = x.size()
assert h == w
padding = tuple([self.pad] * 4)
x = F.pad(x, padding, 'replicate')
eps = 1.0 / (h + 2 * self.pad)
arange = torch.linspace(-1.0 + eps,
1.0 - eps,
h + 2 * self.pad,
device=x.device,
dtype=x.dtype)[:h]
arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2)
base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2)
base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1)
shift = torch.randint(0,
2 * self.pad + 1,
size=(n, 1, 1, 2),
device=x.device,
dtype=x.dtype)
shift *= 2.0 / (h + 2 * self.pad)
grid = base_grid + shift
return F.grid_sample(x, grid, padding_mode='zeros', align_corners=False)
def forward_traj(self, x):
n, t, c, h, w = x.size()
x = x.view(n*t, *x.shape[2:])
assert h == w
padding = tuple([self.pad] * 4)
x = F.pad(x, padding, 'replicate')
eps = 1.0 / (h + 2 * self.pad)
arange = torch.linspace(-1.0 + eps,
1.0 - eps,
h + 2 * self.pad,
device=x.device,
dtype=x.dtype)[:h]
arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2)
base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2)
base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1)
base_grid = base_grid.unsqueeze(1).repeat(1, t, 1, 1, 1)
base_grid = base_grid.view(n*t, *base_grid.shape[2:])
shift = torch.randint(1,
2 * self.pad + 1,
size=(n*t, 1, 1, 2),
device=x.device,
dtype=x.dtype)
shift *= 2.0 / (h + 2 * self.pad)
grid = base_grid + shift
x = F.grid_sample(x, grid, padding_mode='zeros', align_corners=False)
x = x.view(n, t, *x.shape[1:])
return x
class BaseCalvinDataset(Dataset):
"""
Abstract dataset base class.
Args:
datasets_dir: Path of folder containing episode files (string must contain 'validation' or 'training').
obs_space: DictConfig of observation space.
proprio_state: DictConfig with shape of prioprioceptive state.
key: 'vis' or 'lang'.
lang_folder: Name of the subdirectory of the dataset containing the language annotations.
num_workers: Number of dataloading workers for this dataset.
transforms: Dict with pytorch data transforms.
batch_size: Batch size.
min_window_size: Minimum window length of loaded sequences.
max_window_size: Maximum window length of loaded sequences.
pad: If True, repeat last frame such that all sequences have length 'max_window_size'.
aux_lang_loss_window: How many sliding windows to consider for auxiliary language losses, counted from the end
of an annotated language episode.
"""
def __init__(
self,
datasets_dir: Path,
proprio_state: DictConfig = prop_state,
lang_folder: str = "lang_annotations",
num_workers: int = 0,
key: str = "lang",
obs_space: DictConfig = obs_config,
transforms: Dict = {},
batch_size: int = 32,
window_size: int = 16,
min_window_size: int = 16,
max_window_size: int = 16,
pad: bool = True,
aux_lang_loss_window: int = 1,
rgb_pad=-1,
gripper_pad=-1,
traj_cons=False,
text_aug=False,
dif_ws=False,
act_step=1
):
self.observation_space = obs_space
self.proprio_state = proprio_state
self.transforms = transforms
self.with_lang = key == "lang"
self.relative_actions = "rel_actions" in self.observation_space["actions"]
self.pad = pad
self.batch_size = batch_size
self.num_workers = num_workers
self.window_size = window_size
if not dif_ws:
self.min_window_size = window_size + act_step - 1
self.max_window_size = window_size + act_step - 1
else:
self.min_window_size = min_window_size
self.max_window_size = max_window_size
self.act_step = act_step
# print('ws {}, min_ws {}, max_ws {}'.format(self.window_size, self.max_window_size, self.min_window_size))
self.abs_datasets_dir = datasets_dir
self.lang_folder = lang_folder # if self.with_lang else None
self.aux_lang_loss_window = aux_lang_loss_window
self.traj_cons = traj_cons
with open('/mnt/bn/robotics/lxh/robot-flamingo/enrich_lang_annotations.json', 'r') as f:
self.enrich_lang = json.load(f)
self.text_aug = text_aug
self.rgb_pad = rgb_pad
if self.rgb_pad != -1:
self.rgb_shift = RandomShiftsAug(rgb_pad)
self.gripper_pad = gripper_pad
if self.gripper_pad != -1:
self.gripper_shift = RandomShiftsAug(gripper_pad)
assert (
"validation" in self.abs_datasets_dir.as_posix()
or "training" in self.abs_datasets_dir.as_posix()
)
self.validation = "validation" in self.abs_datasets_dir.as_posix()
assert self.abs_datasets_dir.is_dir()
logger.info(f"loading dataset at {self.abs_datasets_dir}")
logger.info("finished loading dataset")
def process_rgb(
self,
episode: Dict[str, np.ndarray],
observation_space: DictConfig,
transforms: Dict,
seq_idx: int = 0,
window_size: int = 0,
) -> Dict[str, Dict[str, torch.Tensor]]:
rgb_obs_keys = observation_space["rgb_obs"]
seq_rgb_obs_dict = {}
for _, rgb_obs_key in enumerate(rgb_obs_keys):
rgb_obs = episode[rgb_obs_key]
# expand dims for single environment obs
if len(rgb_obs.shape) != 4:
rgb_obs = np.expand_dims(rgb_obs, axis=0)
assert len(rgb_obs.shape) == 4
if window_size == 0 and seq_idx == 0: # single file loader
# To Square image
seq_rgb_obs_ = torch.from_numpy(rgb_obs).byte()
else: # episode loader
seq_rgb_obs_ = torch.from_numpy(
rgb_obs[seq_idx : seq_idx + window_size]
).byte()
if rgb_obs_key in transforms:
seq_rgb_obs_ = transforms[rgb_obs_key](seq_rgb_obs_)
seq_rgb_obs_dict[rgb_obs_key] = seq_rgb_obs_
# shape: N_rgb_obs x (BxHxWxC)
return {"rgb_obs": seq_rgb_obs_dict}
def process_language(
self, episode: Dict[str, np.ndarray], transforms: Dict, with_lang: bool
):
return {"lang": episode["language"]}
def __getitem__(self, idx: Union[int, Tuple[int, int]], fixed_seed=False) -> Dict:
"""
Get sequence of dataset.
Args:
idx: Index of the sequence.
Returns:
Loaded sequence.
"""
if isinstance(idx, int):
# When max_ws_size and min_ws_size are equal, avoid unnecessary padding
# acts like Constant dataset. Currently, used for language data
if self.min_window_size == self.max_window_size:
window_size = self.max_window_size
elif self.min_window_size < self.max_window_size:
window_size = self._get_window_size(idx)
else:
logger.error(
f"min_window_size {self.min_window_size} > max_window_size {self.max_window_size}"
)
raise ValueError
else:
idx, window_size = idx
head = False
sequence = self._get_sequences(idx, window_size, head=head)
if self.pad:
pad_size = self._get_pad_size(sequence)
sequence = self._pad_sequence(sequence, pad_size, head=head)
new_list = []
np_rgb = copy.deepcopy(sequence["rgb_obs"]["rgb_static"].numpy())
for i in range(np_rgb.shape[0]):
new_list.append(Image.fromarray(np_rgb[i, :, :, :].astype(np.uint8)))
sequence["rgb_obs"]["rgb_static"] = new_list
new_list = []
np_gripper = copy.deepcopy(sequence["rgb_obs"]["rgb_gripper"].numpy())
for i in range(np_gripper.shape[0]):
new_list.append(Image.fromarray(np_gripper[i, :, :, :].astype(np.uint8)))
sequence["rgb_obs"]["rgb_gripper"] = new_list
# print(pad_size, len(new_list))
return sequence
def _get_sequences(self, idx: int, window_size: int, head: bool=False) -> Dict:
"""
Load sequence of length window_size.
Args:
idx: Index of starting frame.
window_size: Length of sampled episode.
Returns:
dict: Dictionary of tensors of loaded sequence with different input modalities and actions.
"""
episode = self._load_episode(idx, window_size)
seq_state_obs = process_state(
episode, self.observation_space, self.transforms, self.proprio_state
)
seq_rgb_obs = self.process_rgb(episode, self.observation_space, self.transforms)
seq_depth_obs = process_depth(episode, self.observation_space, self.transforms)
seq_acts = process_actions(episode, self.observation_space, self.transforms)
info = get_state_info_dict(episode)
seq_lang = self.process_language(episode, self.transforms, self.with_lang)
info = self._add_language_info(info, idx)
seq_dict = {
**seq_state_obs,
**seq_rgb_obs,
**seq_depth_obs,
**seq_acts,
**info,
**seq_lang,
} # type:ignore
seq_dict["idx"] = idx # type:ignore
return seq_dict
def _load_episode(self, idx: int, window_size: int) -> Dict[str, np.ndarray]:
raise NotImplementedError
def _get_window_size(self, idx: int) -> int:
"""
Sample a window size taking into account the episode limits.
Args:
idx: Index of the sequence to load.
Returns:
Window size.
"""
window_diff = self.max_window_size - self.min_window_size
if len(self.episode_lookup) <= idx + window_diff:
# last episode
max_window = self.min_window_size + len(self.episode_lookup) - idx - 1
elif (
self.episode_lookup[idx + window_diff]
!= self.episode_lookup[idx] + window_diff
):
# less than max_episode steps until next episode
steps_to_next_episode = int(
np.nonzero(
self.episode_lookup[idx : idx + window_diff + 1]
- (self.episode_lookup[idx] + np.arange(window_diff + 1))
)[0][0]
)
max_window = min(
self.max_window_size, (self.min_window_size + steps_to_next_episode - 1)
)
else:
max_window = self.max_window_size
if self.validation:
# in validation step, repeat the window sizes for each epoch.
return get_validation_window_size(idx, self.min_window_size, max_window)
else:
return np.random.randint(self.min_window_size, max_window + 1)
def __len__(self) -> int:
"""
Returns:
Size of the dataset.
"""
return len(self.episode_lookup)
def _get_pad_size(self, sequence: Dict) -> int:
"""
Determine how many frames to append to end of the sequence
Args:
sequence: Loaded sequence.
Returns:
Number of frames to pad.
"""
return self.max_window_size - len(sequence["actions"])
def _pad_sequence(self, seq: Dict, pad_size: int, head: bool=False) -> Dict:
"""
Pad a sequence by repeating the last frame.
Args:
seq: Sequence to pad.
pad_size: Number of frames to pad.
Returns:
Padded sequence.
"""
seq.update({"robot_obs": self._pad_with_repetition(seq["robot_obs"], pad_size)})
seq.update(
{
"rgb_obs": {
k: self._pad_with_repetition(v, pad_size, head)
for k, v in seq["rgb_obs"].items()
}
}
)
seq.update(
{
"depth_obs": {
k: self._pad_with_repetition(v, pad_size, head)
for k, v in seq["depth_obs"].items()
}
}
)
# todo: find better way of distinguishing rk and play action spaces
if not self.relative_actions:
if head:
seq_acts = self._pad_with_zeros(seq["actions"], pad_size, head)
else:
# repeat action for world coordinates action space
seq.update({"actions": self._pad_with_repetition(seq["actions"], pad_size, head)})
else:
# for relative actions zero pad all but the last action dims and repeat last action dim (gripper action)
if head:
seq_acts = self._pad_with_zeros(seq["actions"], pad_size, head)
else:
seq_acts = torch.cat(
[
self._pad_with_zeros(seq["actions"][..., :-1], pad_size, head),
self._pad_with_repetition(seq["actions"][..., -1:], pad_size, head),
],
dim=-1,
)
seq.update({"actions": seq_acts})
seq.update(
{
"state_info": {
k: self._pad_with_repetition(v, pad_size, head)
for k, v in seq["state_info"].items()
}
}
)
return seq
@staticmethod
def _pad_with_repetition(input_tensor: torch.Tensor, pad_size: int, head: bool = False) -> torch.Tensor:
"""
Pad a sequence Tensor by repeating last element pad_size times.
Args:
input_tensor: Sequence to pad.
pad_size: Number of frames to pad.
Returns:
Padded Tensor.
"""
if head:
last_repeated = torch.repeat_interleave(
torch.unsqueeze(input_tensor[0], dim=0), repeats=pad_size, dim=0
)
padded = torch.vstack((last_repeated, input_tensor))
else:
last_repeated = torch.repeat_interleave(
torch.unsqueeze(input_tensor[-1], dim=0), repeats=pad_size, dim=0
)
padded = torch.vstack((input_tensor, last_repeated))
return padded
@staticmethod
def _pad_with_zeros(input_tensor: torch.Tensor, pad_size: int, head: bool = False) -> torch.Tensor:
"""
Pad a Tensor with zeros.
Args:
input_tensor: Sequence to pad.
pad_size: Number of frames to pad.
Returns:
Padded Tensor.
"""
zeros_repeated = torch.repeat_interleave(
torch.unsqueeze(torch.zeros(input_tensor.shape[-1]), dim=0),
repeats=pad_size,
dim=0,
)
if head:
padded = torch.vstack((zeros_repeated, input_tensor))
else:
padded = torch.vstack((input_tensor, zeros_repeated))
return padded
def _add_language_info(self, info: Dict, idx: int) -> Dict:
"""
If dataset contains language, add info to determine if this sequence will be used for the auxiliary losses.
Args:
info: Info dictionary.
idx: Sequence index.
Returns:
Info dictionary with updated information.
"""
if not self.with_lang:
return info
use_for_aux_lang_loss = (
idx + self.aux_lang_loss_window >= len(self.lang_lookup)
or self.lang_lookup[idx] < self.lang_lookup[idx + self.aux_lang_loss_window]
)
info["use_for_aux_lang_loss"] = use_for_aux_lang_loss
return info
class DebugDataset(Dataset):
def __init__(self, **kwargs: Any,):
super().__init__()
def __len__(self) -> int:
return 10000
def __getitem__(self, index):
window_size = 8
rgb = torch.randn(window_size, 3, 200, 200)
gripper = torch.randn(window_size, 84, 84)
state = torch.randn(window_size, 15)
class DiskCalvinDataset(BaseCalvinDataset):
"""
Dataset that loads episodes as individual files from disk.
Args:
skip_frames: Skip this amount of windows for language dataset.
save_format: File format in datasets_dir (pkl or npz).
pretrain: Set to True when pretraining.
"""
def __init__(
self,
image_fn: Callable,
text_fn: Callable,
*args: Any,
skip_frames: int = 1,
save_format: str = "npz",
pretrain: bool = False,
partial_data=False,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self.save_format = save_format
self.image_fn = image_fn
self.text_fn = text_fn
self.partial_data = partial_data
if self.save_format == "pkl":
self.load_file = load_pkl
elif self.save_format == "npz":
self.load_file = load_npz
else:
raise NotImplementedError
self.pretrain = pretrain
self.skip_frames = skip_frames
if self.with_lang:
(
self.episode_lookup,
self.lang_lookup,
self.lang_ann,
self.lang_task
) = self._build_file_indices_lang(self.abs_datasets_dir)
else:
self.episode_lookup = self._build_file_indices(self.abs_datasets_dir)
self.naming_pattern, self.n_digits = lookup_naming_pattern(
self.abs_datasets_dir, self.save_format
)
def _get_episode_name(self, file_idx: int) -> Path:
"""
Convert file idx to file path.
Args:
file_idx: index of starting frame.
Returns:
Path to file.
"""
return Path(
f"{self.naming_pattern[0]}{file_idx:0{self.n_digits}d}{self.naming_pattern[1]}"
)
def _load_episode(self, idx: int, window_size: int) -> Dict[str, np.ndarray]:
"""
Load consecutive frames saved as individual files on disk and combine to episode dict.
Args:
idx: Index of first frame.
window_size: Length of sampled episode.
Returns:
episode: Dict of numpy arrays containing the episode where keys are the names of modalities.
"""
start_idx = self.episode_lookup[idx]
end_idx = start_idx + window_size
keys = list(chain(*self.observation_space.values()))
keys.remove("language")
keys.append("scene_obs")
episodes = [
self.load_file(self._get_episode_name(file_idx))
for file_idx in range(start_idx, end_idx)
]
episode = {key: np.stack([ep[key] for ep in episodes]) for key in keys}
if self.with_lang:
episode["language"] = self.lang_ann[self.lang_lookup[idx]]
if self.text_aug:
task = self.lang_task[self.lang_lookup[idx]]
enrich_lang = random.choice(self.enrich_lang[task] + [episode["language"]])
episode["language"] = enrich_lang
return episode
def _build_file_indices_lang(
self, abs_datasets_dir: Path
):
"""
This method builds the mapping from index to file_name used for loading the episodes of the language dataset.
Args:
abs_datasets_dir: Absolute path of the directory containing the dataset.
Returns:
episode_lookup: Mapping from training example index to episode (file) index.
lang_lookup: Mapping from training example to index of language instruction.
lang_ann: Language embeddings.
"""
assert abs_datasets_dir.is_dir()
episode_lookup = []
try:
print(
"trying to load lang data from: ",
abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy",
)
lang_data = np.load(
abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy",
allow_pickle=True,
).item()
except Exception:
print(
"Exception, trying to load lang data from: ",
abs_datasets_dir / "auto_lang_ann.npy",
)
lang_data = np.load(
abs_datasets_dir / "auto_lang_ann.npy", allow_pickle=True
).item()
ep_start_end_ids = lang_data["info"]["indx"] # each of them are 64
lang_ann = lang_data["language"]["ann"] # length total number of annotations
lang_task = lang_data["language"]["task"]
lang_lookup = []
partial_st_ed_list = load_partial_traj_data()
for i, (start_idx, end_idx) in enumerate(ep_start_end_ids):
if self.partial_data:
if (start_idx, end_idx) not in partial_st_ed_list:
continue
if self.pretrain:
start_idx = max(
start_idx,
end_idx + 1 - self.min_window_size - self.aux_lang_loss_window,
)
assert end_idx >= self.max_window_size
cnt = 0
for idx in range(start_idx, end_idx + 1 - self.min_window_size):
if cnt % self.skip_frames == 0:
lang_lookup.append(i)
episode_lookup.append(idx)
cnt += 1
return np.array(episode_lookup), lang_lookup, lang_ann, lang_task
def _build_file_indices(self, abs_datasets_dir: Path) -> np.ndarray:
"""
This method builds the mapping from index to file_name used for loading the episodes of the non language
dataset.
Args:
abs_datasets_dir: Absolute path of the directory containing the dataset.
Returns:
episode_lookup: Mapping from training example index to episode (file) index.
"""
assert abs_datasets_dir.is_dir()
episode_lookup = []
ep_start_end_ids = np.load(abs_datasets_dir / "ep_start_end_ids.npy")
logger.info(
f'Found "ep_start_end_ids.npy" with {len(ep_start_end_ids)} episodes.'
)
for start_idx, end_idx in ep_start_end_ids:
assert end_idx > self.max_window_size
for idx in range(start_idx, end_idx + 1 - self.min_window_size):
episode_lookup.append(idx)
return np.array(episode_lookup)
def collater(self, sample):
action_tensors = torch.from_numpy(np.array([np.stack(s["actions"]) for s in sample]))
state_tensors = torch.from_numpy(np.array([np.stack(s["robot_obs"]) for s in sample]))
image_tensors = torch.stack([self.image_fn(s["rgb_obs"]["rgb_static"]) for s in sample])
gripper_tensors = torch.stack([self.image_fn(s["rgb_obs"]["rgb_gripper"]) for s in sample])
stacked_language = [s["lang"] for s in sample]
text_tensors, attention_mask = self.text_fn(stacked_language)
if self.rgb_pad != -1:
bs, seq_len = image_tensors.shape[:2]
if self.traj_cons:
image_tensors = self.rgb_shift.forward_traj(image_tensors)
else:
image_tensors = image_tensors.view(bs*seq_len, *image_tensors.shape[2:])
image_tensors = self.rgb_shift(image_tensors)
image_tensors = image_tensors.view(bs, seq_len, *image_tensors.shape[1:])
if self.gripper_pad != -1:
bs, seq_len = gripper_tensors.shape[:2]
if self.traj_cons:
gripper_tensors = self.gripper_shift.forward_traj(gripper_tensors)
else:
gripper_tensors = gripper_tensors.view(bs * seq_len, *gripper_tensors.shape[2:])
gripper_tensors = self.gripper_shift(gripper_tensors)
gripper_tensors = gripper_tensors.view(bs, seq_len, *gripper_tensors.shape[1:])
robot_obs = torch.zeros(1)
if self.act_step != 1:
actions = torch.zeros((action_tensors.shape[0], self.window_size, self.act_step, action_tensors.shape[-1]))
for b in range(action_tensors.shape[0]):
for ix in range(self.window_size):
actions[b, ix] = action_tensors[b, ix:ix+self.act_step]
robot_obs = torch.zeros((action_tensors.shape[0], self.window_size, self.act_step, state_tensors.shape[-1]))
for b in range(action_tensors.shape[0]):
for ix in range(self.window_size):
robot_obs[b, ix] = state_tensors[b, ix:ix+self.act_step]
robot_obs = torch.cat([robot_obs[..., :6], robot_obs[..., [-1]]], dim=-1)
action_tensors = actions
image_tensors = image_tensors[:, :-(self.act_step-1)]
gripper_tensors = gripper_tensors[:, :-(self.act_step-1)]
state_tensors = state_tensors[:, :-(self.act_step-1)]
return image_tensors, (text_tensors, attention_mask), action_tensors, gripper_tensors, state_tensors, robot_obs
class CalvinDataset(Dataset):
"""Naive implementation of dataset to store
calvin debug dataset, may be changed to WDS for the full dataset
"""
def __init__(self, image_fn, text_fn, dataset_path, is_train=True) -> None:
super().__init__()
self.dataset_path = dataset_path
self.image_fn = image_fn
self.text_fn = text_fn
tag = "training" if is_train else "validation"
self.file_prefix = f"{self.dataset_path}/{tag}"
self.anns = np.load(
f"{self.file_prefix}/lang_annotations/auto_lang_ann.npy", allow_pickle=True
).item()
def __len__(self):
return len(self.anns["info"]["indx"])
def __getitem__(self, index):
task = self.anns["language"]["task"][index]
text = self.anns["language"]["ann"][index]
st, ed = self.anns["info"]["indx"][index]
# CJ: randomly sample a datapoint in the episode
frame = random.randint(st, ed)
frame = np.load(
f"{self.file_prefix}/episode_{frame:07d}.npz"
) # , allow_pickle=True (lazy load)
rgb_static = Image.fromarray(frame["rgb_static"])
rgb_gripper = Image.fromarray(frame["rgb_gripper"])
actions = np.array(frame["rel_actions"])
actions[..., 6:] = (actions[..., 6:] + 1) // 2
return rgb_static, text, actions
def collater(self, sample):
images = [s[0] for s in sample]
texts = [s[1] for s in sample]
actions = [s[2] for s in sample]
image_tensors = self.image_fn(images)
text_tensors = self.text_fn(texts)
action_tensors = torch.FloatTensor(np.stack(actions))
return image_tensors, text_tensors, action_tensors
def load_pkl(filename: Path) -> Dict[str, np.ndarray]:
with open(filename, "rb") as f:
return pickle.load(f)
def load_npz(filename: Path) -> Dict[str, np.ndarray]:
return np.load(filename.as_posix())
class SharedEpoch:
def __init__(self, epoch: int = 0):
self.shared_epoch = Value("i", epoch)
def set_value(self, epoch):
self.shared_epoch.value = epoch
def get_value(self):
return self.shared_epoch.value
@dataclass
class DataInfo:
dataloader: DataLoader
sampler: DistributedSampler = None
shared_epoch: SharedEpoch = None
dataset: Dataset = None
def set_epoch(self, epoch):
if self.shared_epoch is not None:
self.shared_epoch.set_value(epoch)
if self.sampler is not None and isinstance(self.sampler, DistributedSampler):
self.sampler.set_epoch(epoch)
def preprocess_image(sample, image_processor):
image = [image_processor(s).unsqueeze(0) for s in sample]
image = torch.cat(image, dim=0)
# apply random horizontal flip and color jitter
return image
def preprocess_text_calvin(sample, tokenizer):
tokenizer.padding_side = "right"
sample = [
# (f"{s.strip()}{tokenizer.eos_token}")
# for s in sample
(f"<image>{s.strip()}<|endofchunk|>{tokenizer.eos_token}") for s in sample
]
text = tokenizer(
sample,
max_length=32,
padding="longest",
truncation="only_first",
return_tensors="pt",
)
return text["input_ids"], text["attention_mask"]
def preprocess_interleaved(sample, tokenizer, clip_processor, sim_threshold):
info = json.loads(sample[0])
tar_file_obj = io.BytesIO(sample[1])
image_tar = tarfile.open(fileobj=tar_file_obj)
sentences = info["text_list"]
images, image_idxs = [], []
for image_path, sim in zip(info["image_info"], info["similarity_matrix"]):
# pick one image per sentence
if info["image_info"][image_path]["matched_text_index"] in image_idxs:
continue
rawbytes = image_tar.extractfile(
os.path.join(image_tar.getnames()[0], image_path)
).read()
# filter to images >= 10KB
if len(rawbytes) // 1000 <= MIN_KB:
continue
if sim[info["image_info"][image_path]["matched_text_index"]] < sim_threshold:
continue
image = Image.open(io.BytesIO(rawbytes)).convert("RGB")
images.append(image)
image_idxs.append(info["image_info"][image_path]["matched_text_index"])
if len(images) == 0:
raise ValueError("No images in sample")
# filter out images that are exact duplicates
images_tensors = preprocess_image(images, clip_processor)
keep_ixs = range(min(len(images_tensors), MAX_NUM_IMAGES))
images_tensors = images_tensors[keep_ixs]
image_idxs = [image_idxs[ix] for ix in keep_ixs]
# pad to 5 images
if len(images_tensors) < MAX_NUM_IMAGES:
zero_padding = torch.zeros(
(MAX_NUM_IMAGES - len(images_tensors), 3, 224, 224), dtype=torch.float
)
images_tensors = torch.cat((images_tensors, zero_padding), dim=0)
# add in <image> and <eoc> tokens
# eoc after sentence = "sentence loss"
for ix in image_idxs:
sentences[ix] = f"<|endofchunk|><image>{sentences[ix]}"
text = " ".join(sentences)
text = text.replace("<|endofchunk|>", "", 1) # but remove first eoc
# whitespace cleanup
text = (
text.replace(" <|endofchunk|>", "<|endofchunk|>")
.replace("<image> ", "<image>")
.replace(" <image>", "<image>")
)
text = f"{text}<|endofchunk|>{tokenizer.eos_token}"
tokenizer.padding_side = "right"
text_tensor = tokenizer(
text, max_length=256, truncation=True, padding="max_length", return_tensors="pt"
)
# reject sequences with too few images (after truncation)
num_images = torch.count_nonzero(
text_tensor["input_ids"]
== tokenizer.additional_special_tokens_ids[
tokenizer.additional_special_tokens.index("<image>")
]
)
if num_images == 0:
raise ValueError("No images in sample")
elif (
num_images == 1 and random.random() <= 0.5
): # 50% chance of keeping single image samples
raise ValueError("Only one image in sample")
return (
images_tensors,
(text_tensor["input_ids"], text_tensor["attention_mask"]),
)
def get_coco_dataset(args, image_processor, tokenizer, epoch=0):
coco_data_dir = "path/to/coco/train2014"
coco_ann = "path/to/coco/annotations/captions_train2014.json"
preprocess_text_fn = functools.partial(preprocess_text_calvin, tokenizer=tokenizer)
coco_dataset = CaptionDataset(coco_data_dir, coco_ann, preprocess_text_fn, image_processor)
sampler = DistributedSampler(
coco_dataset,
num_replicas=args.world_size,
rank=args.rank,
shuffle=True,
seed=args.seed,
drop_last=True,
)
dataloader = DataLoader(
coco_dataset,
batch_size=args.batch_size_vl,
pin_memory=False,
num_workers=args.workers,
prefetch_factor=3,
sampler=sampler,
persistent_workers=True,
collate_fn=coco_dataset.collator,
drop_last=True
)
return dataloader
def get_vqa_dataset(args, image_processor, tokenizer, epoch=0):
vqa_data_dir = "path/to/vqav2/train2014"
vqa_questions = "path/to/vqav2/v2_OpenEnded_mscoco_train2014_questions.json"
vqa_ann = "path/to/vqav2/v2_mscoco_train2014_annotations.json"
preprocess_text_fn = functools.partial(preprocess_text_calvin, tokenizer=tokenizer)
vqa_dataset = VQADataset(vqa_data_dir, vqa_questions, vqa_ann, preprocess_text_fn, image_processor)
sampler = DistributedSampler(
vqa_dataset,
num_replicas=args.world_size,
rank=args.rank,
shuffle=True,
seed=args.seed,
drop_last=True,
)
dataloader = DataLoader(
vqa_dataset,
batch_size=args.batch_size_vl,
pin_memory=False,
num_workers=args.workers,
prefetch_factor=3,
sampler=sampler,
persistent_workers=True,
collate_fn=vqa_dataset.collator,
drop_last=True
)
return dataloader
def get_calvin_dataset(args, image_processor, tokenizer, epoch=0, floor=False):
dataset_path = args.calvin_dataset
# ann is dict including language and info
shared_epoch = SharedEpoch(epoch=epoch)
preprocess_image_fn = functools.partial(
preprocess_image, image_processor=image_processor
)
preprocess_text_fn = functools.partial(preprocess_text_calvin, tokenizer=tokenizer)
calvin_dataset = DiskCalvinDataset(
datasets_dir=Path(dataset_path) / "training",
image_fn=preprocess_image_fn,
text_fn=preprocess_text_fn,
window_size=args.window_size,
rgb_pad=args.rgb_pad,
gripper_pad=args.gripper_pad,
traj_cons=args.traj_cons,
text_aug=args.text_aug,
dif_ws=args.dif_ws,
min_window_size=args.min_window_size,
max_window_size=args.max_window_size,
act_step=args.multi_step_action,
partial_data=args.partial_data
)
round_fn = math.floor if floor else math.ceil
num_samples = len(calvin_dataset)
global_batch_size = args.batch_size_calvin * args.world_size
num_batches = round_fn(num_samples / global_batch_size)
num_workers = max(1, args.workers)
num_worker_batches = round_fn(num_batches / num_workers) # per dataloader worker
num_batches = num_worker_batches * num_workers
num_samples = num_batches * global_batch_size
sampler = DistributedSampler(
calvin_dataset,
num_replicas=args.world_size,
rank=args.rank,
shuffle=True,
seed=args.seed,
drop_last=True,
)
# the batch_size and num_workers are per-GPU !
dataloader = DataLoader(
calvin_dataset,
batch_size=args.batch_size_calvin,
pin_memory=False,
num_workers=num_workers,
prefetch_factor=3,
sampler=sampler,
persistent_workers=True,
collate_fn=calvin_dataset.collater,
drop_last=True
)
# dataloader = DataLoader(calvin_dataset, batch_size=args.batch_size_calvin)
# add meta-data to dataloader instance for convenience
dataloader.num_batches = num_batches
dataloader.num_samples = num_samples
return DataInfo(dataloader=dataloader, shared_epoch=shared_epoch, sampler=sampler, dataset=calvin_dataset)
def get_real_dataset(args, image_processor, tokenizer, epoch=0, floor=False):
dataset_path = args.calvin_dataset
# ann is dict including language and info
shared_epoch = SharedEpoch(epoch=epoch)
preprocess_image_fn = functools.partial(
preprocess_image, image_processor=image_processor
)
preprocess_text_fn = functools.partial(preprocess_text_calvin, tokenizer=tokenizer)
| calvin_dataset = RealDatasetHDF5( | 0 | 2023-11-02 01:36:23+00:00 | 12k |
microsoft/monitors4codegen | src/monitors4codegen/multilspy/lsp_protocol_handler/server.py | [
{
"identifier": "LspNotification",
"path": "src/monitors4codegen/multilspy/lsp_protocol_handler/lsp_requests.py",
"snippet": "class LspNotification:\n def __init__(self, send_notification):\n self.send_notification = send_notification\n\n def did_change_workspace_folders(\n self, params: lsp_types.DidChangeWorkspaceFoldersParams\n ) -> None:\n \"\"\"The `workspace/didChangeWorkspaceFolders` notification is sent from the client to the server when the workspace\n folder configuration changes.\"\"\"\n return self.send_notification(\"workspace/didChangeWorkspaceFolders\", params)\n\n def cancel_work_done_progress(\n self, params: lsp_types.WorkDoneProgressCancelParams\n ) -> None:\n \"\"\"The `window/workDoneProgress/cancel` notification is sent from the client to the server to cancel a progress\n initiated on the server side.\"\"\"\n return self.send_notification(\"window/workDoneProgress/cancel\", params)\n\n def did_create_files(self, params: lsp_types.CreateFilesParams) -> None:\n \"\"\"The did create files notification is sent from the client to the server when\n files were created from within the client.\n\n @since 3.16.0\"\"\"\n return self.send_notification(\"workspace/didCreateFiles\", params)\n\n def did_rename_files(self, params: lsp_types.RenameFilesParams) -> None:\n \"\"\"The did rename files notification is sent from the client to the server when\n files were renamed from within the client.\n\n @since 3.16.0\"\"\"\n return self.send_notification(\"workspace/didRenameFiles\", params)\n\n def did_delete_files(self, params: lsp_types.DeleteFilesParams) -> None:\n \"\"\"The will delete files request is sent from the client to the server before files are actually\n deleted as long as the deletion is triggered from within the client.\n\n @since 3.16.0\"\"\"\n return self.send_notification(\"workspace/didDeleteFiles\", params)\n\n def did_open_notebook_document(\n self, params: lsp_types.DidOpenNotebookDocumentParams\n ) -> None:\n \"\"\"A notification sent when a notebook opens.\n\n @since 3.17.0\"\"\"\n return self.send_notification(\"notebookDocument/didOpen\", params)\n\n def did_change_notebook_document(\n self, params: lsp_types.DidChangeNotebookDocumentParams\n ) -> None:\n return self.send_notification(\"notebookDocument/didChange\", params)\n\n def did_save_notebook_document(\n self, params: lsp_types.DidSaveNotebookDocumentParams\n ) -> None:\n \"\"\"A notification sent when a notebook document is saved.\n\n @since 3.17.0\"\"\"\n return self.send_notification(\"notebookDocument/didSave\", params)\n\n def did_close_notebook_document(\n self, params: lsp_types.DidCloseNotebookDocumentParams\n ) -> None:\n \"\"\"A notification sent when a notebook closes.\n\n @since 3.17.0\"\"\"\n return self.send_notification(\"notebookDocument/didClose\", params)\n\n def initialized(self, params: lsp_types.InitializedParams) -> None:\n \"\"\"The initialized notification is sent from the client to the\n server after the client is fully initialized and the server\n is allowed to send requests from the server to the client.\"\"\"\n return self.send_notification(\"initialized\", params)\n\n def exit(self) -> None:\n \"\"\"The exit event is sent from the client to the server to\n ask the server to exit its process.\"\"\"\n return self.send_notification(\"exit\")\n\n def workspace_did_change_configuration(\n self, params: lsp_types.DidChangeConfigurationParams\n ) -> None:\n \"\"\"The configuration change notification is sent from the client to the server\n when the client's configuration has changed. The notification contains\n the changed configuration as defined by the language client.\"\"\"\n return self.send_notification(\"workspace/didChangeConfiguration\", params)\n\n def did_open_text_document(\n self, params: lsp_types.DidOpenTextDocumentParams\n ) -> None:\n \"\"\"The document open notification is sent from the client to the server to signal\n newly opened text documents. The document's truth is now managed by the client\n and the server must not try to read the document's truth using the document's\n uri. Open in this sense means it is managed by the client. It doesn't necessarily\n mean that its content is presented in an editor. An open notification must not\n be sent more than once without a corresponding close notification send before.\n This means open and close notification must be balanced and the max open count\n is one.\"\"\"\n return self.send_notification(\"textDocument/didOpen\", params)\n\n def did_change_text_document(\n self, params: lsp_types.DidChangeTextDocumentParams\n ) -> None:\n \"\"\"The document change notification is sent from the client to the server to signal\n changes to a text document.\"\"\"\n return self.send_notification(\"textDocument/didChange\", params)\n\n def did_close_text_document(\n self, params: lsp_types.DidCloseTextDocumentParams\n ) -> None:\n \"\"\"The document close notification is sent from the client to the server when\n the document got closed in the client. The document's truth now exists where\n the document's uri points to (e.g. if the document's uri is a file uri the\n truth now exists on disk). As with the open notification the close notification\n is about managing the document's content. Receiving a close notification\n doesn't mean that the document was open in an editor before. A close\n notification requires a previous open notification to be sent.\"\"\"\n return self.send_notification(\"textDocument/didClose\", params)\n\n def did_save_text_document(\n self, params: lsp_types.DidSaveTextDocumentParams\n ) -> None:\n \"\"\"The document save notification is sent from the client to the server when\n the document got saved in the client.\"\"\"\n return self.send_notification(\"textDocument/didSave\", params)\n\n def will_save_text_document(\n self, params: lsp_types.WillSaveTextDocumentParams\n ) -> None:\n \"\"\"A document will save notification is sent from the client to the server before\n the document is actually saved.\"\"\"\n return self.send_notification(\"textDocument/willSave\", params)\n\n def did_change_watched_files(\n self, params: lsp_types.DidChangeWatchedFilesParams\n ) -> None:\n \"\"\"The watched files notification is sent from the client to the server when\n the client detects changes to file watched by the language client.\"\"\"\n return self.send_notification(\"workspace/didChangeWatchedFiles\", params)\n\n def set_trace(self, params: lsp_types.SetTraceParams) -> None:\n return self.send_notification(\"$/setTrace\", params)\n\n def cancel_request(self, params: lsp_types.CancelParams) -> None:\n return self.send_notification(\"$/cancelRequest\", params)\n\n def progress(self, params: lsp_types.ProgressParams) -> None:\n return self.send_notification(\"$/progress\", params)"
},
{
"identifier": "LspRequest",
"path": "src/monitors4codegen/multilspy/lsp_protocol_handler/lsp_requests.py",
"snippet": "class LspRequest:\n def __init__(self, send_request):\n self.send_request = send_request\n\n async def implementation(\n self, params: lsp_types.ImplementationParams\n ) -> Union[\"lsp_types.Definition\", List[\"lsp_types.LocationLink\"], None]:\n \"\"\"A request to resolve the implementation locations of a symbol at a given text\n document position. The request's parameter is of type [TextDocumentPositionParams]\n (#TextDocumentPositionParams) the response is of type {@link Definition} or a\n Thenable that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/implementation\", params)\n\n async def type_definition(\n self, params: lsp_types.TypeDefinitionParams\n ) -> Union[\"lsp_types.Definition\", List[\"lsp_types.LocationLink\"], None]:\n \"\"\"A request to resolve the type definition locations of a symbol at a given text\n document position. The request's parameter is of type [TextDocumentPositionParams]\n (#TextDocumentPositionParams) the response is of type {@link Definition} or a\n Thenable that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/typeDefinition\", params)\n\n async def document_color(\n self, params: lsp_types.DocumentColorParams\n ) -> List[\"lsp_types.ColorInformation\"]:\n \"\"\"A request to list all color symbols found in a given text document. The request's\n parameter is of type {@link DocumentColorParams} the\n response is of type {@link ColorInformation ColorInformation[]} or a Thenable\n that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/documentColor\", params)\n\n async def color_presentation(\n self, params: lsp_types.ColorPresentationParams\n ) -> List[\"lsp_types.ColorPresentation\"]:\n \"\"\"A request to list all presentation for a color. The request's\n parameter is of type {@link ColorPresentationParams} the\n response is of type {@link ColorInformation ColorInformation[]} or a Thenable\n that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/colorPresentation\", params)\n\n async def folding_range(\n self, params: lsp_types.FoldingRangeParams\n ) -> Union[List[\"lsp_types.FoldingRange\"], None]:\n \"\"\"A request to provide folding ranges in a document. The request's\n parameter is of type {@link FoldingRangeParams}, the\n response is of type {@link FoldingRangeList} or a Thenable\n that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/foldingRange\", params)\n\n async def declaration(\n self, params: lsp_types.DeclarationParams\n ) -> Union[\"lsp_types.Declaration\", List[\"lsp_types.LocationLink\"], None]:\n \"\"\"A request to resolve the type definition locations of a symbol at a given text\n document position. The request's parameter is of type [TextDocumentPositionParams]\n (#TextDocumentPositionParams) the response is of type {@link Declaration}\n or a typed array of {@link DeclarationLink} or a Thenable that resolves\n to such.\"\"\"\n return await self.send_request(\"textDocument/declaration\", params)\n\n async def selection_range(\n self, params: lsp_types.SelectionRangeParams\n ) -> Union[List[\"lsp_types.SelectionRange\"], None]:\n \"\"\"A request to provide selection ranges in a document. The request's\n parameter is of type {@link SelectionRangeParams}, the\n response is of type {@link SelectionRange SelectionRange[]} or a Thenable\n that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/selectionRange\", params)\n\n async def prepare_call_hierarchy(\n self, params: lsp_types.CallHierarchyPrepareParams\n ) -> Union[List[\"lsp_types.CallHierarchyItem\"], None]:\n \"\"\"A request to result a `CallHierarchyItem` in a document at a given position.\n Can be used as an input to an incoming or outgoing call hierarchy.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"textDocument/prepareCallHierarchy\", params)\n\n async def incoming_calls(\n self, params: lsp_types.CallHierarchyIncomingCallsParams\n ) -> Union[List[\"lsp_types.CallHierarchyIncomingCall\"], None]:\n \"\"\"A request to resolve the incoming calls for a given `CallHierarchyItem`.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"callHierarchy/incomingCalls\", params)\n\n async def outgoing_calls(\n self, params: lsp_types.CallHierarchyOutgoingCallsParams\n ) -> Union[List[\"lsp_types.CallHierarchyOutgoingCall\"], None]:\n \"\"\"A request to resolve the outgoing calls for a given `CallHierarchyItem`.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"callHierarchy/outgoingCalls\", params)\n\n async def semantic_tokens_full(\n self, params: lsp_types.SemanticTokensParams\n ) -> Union[\"lsp_types.SemanticTokens\", None]:\n \"\"\"@since 3.16.0\"\"\"\n return await self.send_request(\"textDocument/semanticTokens/full\", params)\n\n async def semantic_tokens_delta(\n self, params: lsp_types.SemanticTokensDeltaParams\n ) -> Union[\"lsp_types.SemanticTokens\", \"lsp_types.SemanticTokensDelta\", None]:\n \"\"\"@since 3.16.0\"\"\"\n return await self.send_request(\"textDocument/semanticTokens/full/delta\", params)\n\n async def semantic_tokens_range(\n self, params: lsp_types.SemanticTokensRangeParams\n ) -> Union[\"lsp_types.SemanticTokens\", None]:\n \"\"\"@since 3.16.0\"\"\"\n return await self.send_request(\"textDocument/semanticTokens/range\", params)\n\n async def linked_editing_range(\n self, params: lsp_types.LinkedEditingRangeParams\n ) -> Union[\"lsp_types.LinkedEditingRanges\", None]:\n \"\"\"A request to provide ranges that can be edited together.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"textDocument/linkedEditingRange\", params)\n\n async def will_create_files(\n self, params: lsp_types.CreateFilesParams\n ) -> Union[\"lsp_types.WorkspaceEdit\", None]:\n \"\"\"The will create files request is sent from the client to the server before files are actually\n created as long as the creation is triggered from within the client.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"workspace/willCreateFiles\", params)\n\n async def will_rename_files(\n self, params: lsp_types.RenameFilesParams\n ) -> Union[\"lsp_types.WorkspaceEdit\", None]:\n \"\"\"The will rename files request is sent from the client to the server before files are actually\n renamed as long as the rename is triggered from within the client.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"workspace/willRenameFiles\", params)\n\n async def will_delete_files(\n self, params: lsp_types.DeleteFilesParams\n ) -> Union[\"lsp_types.WorkspaceEdit\", None]:\n \"\"\"The did delete files notification is sent from the client to the server when\n files were deleted from within the client.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"workspace/willDeleteFiles\", params)\n\n async def moniker(\n self, params: lsp_types.MonikerParams\n ) -> Union[List[\"lsp_types.Moniker\"], None]:\n \"\"\"A request to get the moniker of a symbol at a given text document position.\n The request parameter is of type {@link TextDocumentPositionParams}.\n The response is of type {@link Moniker Moniker[]} or `null`.\"\"\"\n return await self.send_request(\"textDocument/moniker\", params)\n\n async def prepare_type_hierarchy(\n self, params: lsp_types.TypeHierarchyPrepareParams\n ) -> Union[List[\"lsp_types.TypeHierarchyItem\"], None]:\n \"\"\"A request to result a `TypeHierarchyItem` in a document at a given position.\n Can be used as an input to a subtypes or supertypes type hierarchy.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"textDocument/prepareTypeHierarchy\", params)\n\n async def type_hierarchy_supertypes(\n self, params: lsp_types.TypeHierarchySupertypesParams\n ) -> Union[List[\"lsp_types.TypeHierarchyItem\"], None]:\n \"\"\"A request to resolve the supertypes for a given `TypeHierarchyItem`.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"typeHierarchy/supertypes\", params)\n\n async def type_hierarchy_subtypes(\n self, params: lsp_types.TypeHierarchySubtypesParams\n ) -> Union[List[\"lsp_types.TypeHierarchyItem\"], None]:\n \"\"\"A request to resolve the subtypes for a given `TypeHierarchyItem`.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"typeHierarchy/subtypes\", params)\n\n async def inline_value(\n self, params: lsp_types.InlineValueParams\n ) -> Union[List[\"lsp_types.InlineValue\"], None]:\n \"\"\"A request to provide inline values in a document. The request's parameter is of\n type {@link InlineValueParams}, the response is of type\n {@link InlineValue InlineValue[]} or a Thenable that resolves to such.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"textDocument/inlineValue\", params)\n\n async def inlay_hint(\n self, params: lsp_types.InlayHintParams\n ) -> Union[List[\"lsp_types.InlayHint\"], None]:\n \"\"\"A request to provide inlay hints in a document. The request's parameter is of\n type {@link InlayHintsParams}, the response is of type\n {@link InlayHint InlayHint[]} or a Thenable that resolves to such.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"textDocument/inlayHint\", params)\n\n async def resolve_inlay_hint(\n self, params: lsp_types.InlayHint\n ) -> \"lsp_types.InlayHint\":\n \"\"\"A request to resolve additional properties for an inlay hint.\n The request's parameter is of type {@link InlayHint}, the response is\n of type {@link InlayHint} or a Thenable that resolves to such.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"inlayHint/resolve\", params)\n\n async def text_document_diagnostic(\n self, params: lsp_types.DocumentDiagnosticParams\n ) -> \"lsp_types.DocumentDiagnosticReport\":\n \"\"\"The document diagnostic request definition.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"textDocument/diagnostic\", params)\n\n async def workspace_diagnostic(\n self, params: lsp_types.WorkspaceDiagnosticParams\n ) -> \"lsp_types.WorkspaceDiagnosticReport\":\n \"\"\"The workspace diagnostic request definition.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"workspace/diagnostic\", params)\n\n async def initialize(\n self, params: lsp_types.InitializeParams\n ) -> \"lsp_types.InitializeResult\":\n \"\"\"The initialize request is sent from the client to the server.\n It is sent once as the request after starting up the server.\n The requests parameter is of type {@link InitializeParams}\n the response if of type {@link InitializeResult} of a Thenable that\n resolves to such.\"\"\"\n return await self.send_request(\"initialize\", params)\n\n async def shutdown(self) -> None:\n \"\"\"A shutdown request is sent from the client to the server.\n It is sent once when the client decides to shutdown the\n server. The only notification that is sent after a shutdown request\n is the exit event.\"\"\"\n return await self.send_request(\"shutdown\")\n\n async def will_save_wait_until(\n self, params: lsp_types.WillSaveTextDocumentParams\n ) -> Union[List[\"lsp_types.TextEdit\"], None]:\n \"\"\"A document will save request is sent from the client to the server before\n the document is actually saved. The request can return an array of TextEdits\n which will be applied to the text document before it is saved. Please note that\n clients might drop results if computing the text edits took too long or if a\n server constantly fails on this request. This is done to keep the save fast and\n reliable.\"\"\"\n return await self.send_request(\"textDocument/willSaveWaitUntil\", params)\n\n async def completion(\n self, params: lsp_types.CompletionParams\n ) -> Union[List[\"lsp_types.CompletionItem\"], \"lsp_types.CompletionList\", None]:\n \"\"\"Request to request completion at a given text document position. The request's\n parameter is of type {@link TextDocumentPosition} the response\n is of type {@link CompletionItem CompletionItem[]} or {@link CompletionList}\n or a Thenable that resolves to such.\n\n The request can delay the computation of the {@link CompletionItem.detail `detail`}\n and {@link CompletionItem.documentation `documentation`} properties to the `completionItem/resolve`\n request. However, properties that are needed for the initial sorting and filtering, like `sortText`,\n `filterText`, `insertText`, and `textEdit`, must not be changed during resolve.\n \"\"\"\n return await self.send_request(\"textDocument/completion\", params)\n\n async def resolve_completion_item(\n self, params: lsp_types.CompletionItem\n ) -> \"lsp_types.CompletionItem\":\n \"\"\"Request to resolve additional information for a given completion item.The request's\n parameter is of type {@link CompletionItem} the response\n is of type {@link CompletionItem} or a Thenable that resolves to such.\"\"\"\n return await self.send_request(\"completionItem/resolve\", params)\n\n async def hover(\n self, params: lsp_types.HoverParams\n ) -> Union[\"lsp_types.Hover\", None]:\n \"\"\"Request to request hover information at a given text document position. The request's\n parameter is of type {@link TextDocumentPosition} the response is of\n type {@link Hover} or a Thenable that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/hover\", params)\n\n async def signature_help(\n self, params: lsp_types.SignatureHelpParams\n ) -> Union[\"lsp_types.SignatureHelp\", None]:\n return await self.send_request(\"textDocument/signatureHelp\", params)\n\n async def definition(\n self, params: lsp_types.DefinitionParams\n ) -> Union[\"lsp_types.Definition\", List[\"lsp_types.LocationLink\"], None]:\n \"\"\"A request to resolve the definition location of a symbol at a given text\n document position. The request's parameter is of type [TextDocumentPosition]\n (#TextDocumentPosition) the response is of either type {@link Definition}\n or a typed array of {@link DefinitionLink} or a Thenable that resolves\n to such.\"\"\"\n return await self.send_request(\"textDocument/definition\", params)\n\n async def references(\n self, params: lsp_types.ReferenceParams\n ) -> Union[List[\"lsp_types.Location\"], None]:\n \"\"\"A request to resolve project-wide references for the symbol denoted\n by the given text document position. The request's parameter is of\n type {@link ReferenceParams} the response is of type\n {@link Location Location[]} or a Thenable that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/references\", params)\n\n async def document_highlight(\n self, params: lsp_types.DocumentHighlightParams\n ) -> Union[List[\"lsp_types.DocumentHighlight\"], None]:\n \"\"\"Request to resolve a {@link DocumentHighlight} for a given\n text document position. The request's parameter is of type [TextDocumentPosition]\n (#TextDocumentPosition) the request response is of type [DocumentHighlight[]]\n (#DocumentHighlight) or a Thenable that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/documentHighlight\", params)\n\n async def document_symbol(\n self, params: lsp_types.DocumentSymbolParams\n ) -> Union[\n List[\"lsp_types.SymbolInformation\"], List[\"lsp_types.DocumentSymbol\"], None\n ]:\n \"\"\"A request to list all symbols found in a given text document. The request's\n parameter is of type {@link TextDocumentIdentifier} the\n response is of type {@link SymbolInformation SymbolInformation[]} or a Thenable\n that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/documentSymbol\", params)\n\n async def code_action(\n self, params: lsp_types.CodeActionParams\n ) -> Union[List[Union[\"lsp_types.Command\", \"lsp_types.CodeAction\"]], None]:\n \"\"\"A request to provide commands for the given text document and range.\"\"\"\n return await self.send_request(\"textDocument/codeAction\", params)\n\n async def resolve_code_action(\n self, params: lsp_types.CodeAction\n ) -> \"lsp_types.CodeAction\":\n \"\"\"Request to resolve additional information for a given code action.The request's\n parameter is of type {@link CodeAction} the response\n is of type {@link CodeAction} or a Thenable that resolves to such.\"\"\"\n return await self.send_request(\"codeAction/resolve\", params)\n\n async def workspace_symbol(\n self, params: lsp_types.WorkspaceSymbolParams\n ) -> Union[\n List[\"lsp_types.SymbolInformation\"], List[\"lsp_types.WorkspaceSymbol\"], None\n ]:\n \"\"\"A request to list project-wide symbols matching the query string given\n by the {@link WorkspaceSymbolParams}. The response is\n of type {@link SymbolInformation SymbolInformation[]} or a Thenable that\n resolves to such.\n\n @since 3.17.0 - support for WorkspaceSymbol in the returned data. Clients\n need to advertise support for WorkspaceSymbols via the client capability\n `workspace.symbol.resolveSupport`.\n \"\"\"\n return await self.send_request(\"workspace/symbol\", params)\n\n async def resolve_workspace_symbol(\n self, params: lsp_types.WorkspaceSymbol\n ) -> \"lsp_types.WorkspaceSymbol\":\n \"\"\"A request to resolve the range inside the workspace\n symbol's location.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"workspaceSymbol/resolve\", params)\n\n async def code_lens(\n self, params: lsp_types.CodeLensParams\n ) -> Union[List[\"lsp_types.CodeLens\"], None]:\n \"\"\"A request to provide code lens for the given text document.\"\"\"\n return await self.send_request(\"textDocument/codeLens\", params)\n\n async def resolve_code_lens(\n self, params: lsp_types.CodeLens\n ) -> \"lsp_types.CodeLens\":\n \"\"\"A request to resolve a command for a given code lens.\"\"\"\n return await self.send_request(\"codeLens/resolve\", params)\n\n async def document_link(\n self, params: lsp_types.DocumentLinkParams\n ) -> Union[List[\"lsp_types.DocumentLink\"], None]:\n \"\"\"A request to provide document links\"\"\"\n return await self.send_request(\"textDocument/documentLink\", params)\n\n async def resolve_document_link(\n self, params: lsp_types.DocumentLink\n ) -> \"lsp_types.DocumentLink\":\n \"\"\"Request to resolve additional information for a given document link. The request's\n parameter is of type {@link DocumentLink} the response\n is of type {@link DocumentLink} or a Thenable that resolves to such.\"\"\"\n return await self.send_request(\"documentLink/resolve\", params)\n\n async def formatting(\n self, params: lsp_types.DocumentFormattingParams\n ) -> Union[List[\"lsp_types.TextEdit\"], None]:\n \"\"\"A request to to format a whole document.\"\"\"\n return await self.send_request(\"textDocument/formatting\", params)\n\n async def range_formatting(\n self, params: lsp_types.DocumentRangeFormattingParams\n ) -> Union[List[\"lsp_types.TextEdit\"], None]:\n \"\"\"A request to to format a range in a document.\"\"\"\n return await self.send_request(\"textDocument/rangeFormatting\", params)\n\n async def on_type_formatting(\n self, params: lsp_types.DocumentOnTypeFormattingParams\n ) -> Union[List[\"lsp_types.TextEdit\"], None]:\n \"\"\"A request to format a document on type.\"\"\"\n return await self.send_request(\"textDocument/onTypeFormatting\", params)\n\n async def rename(\n self, params: lsp_types.RenameParams\n ) -> Union[\"lsp_types.WorkspaceEdit\", None]:\n \"\"\"A request to rename a symbol.\"\"\"\n return await self.send_request(\"textDocument/rename\", params)\n\n async def prepare_rename(\n self, params: lsp_types.PrepareRenameParams\n ) -> Union[\"lsp_types.PrepareRenameResult\", None]:\n \"\"\"A request to test and perform the setup necessary for a rename.\n\n @since 3.16 - support for default behavior\"\"\"\n return await self.send_request(\"textDocument/prepareRename\", params)\n\n async def execute_command(\n self, params: lsp_types.ExecuteCommandParams\n ) -> Union[\"lsp_types.LSPAny\", None]:\n \"\"\"A request send from the client to the server to execute a command. The request might return\n a workspace edit which the client will apply to the workspace.\"\"\"\n return await self.send_request(\"workspace/executeCommand\", params)"
},
{
"identifier": "ErrorCodes",
"path": "src/monitors4codegen/multilspy/lsp_protocol_handler/lsp_types.py",
"snippet": "class ErrorCodes(IntEnum):\n \"\"\"Predefined error codes.\"\"\"\n\n ParseError = -32700\n InvalidRequest = -32600\n MethodNotFound = -32601\n InvalidParams = -32602\n InternalError = -32603\n ServerNotInitialized = -32002\n \"\"\" Error code indicating that a server received a notification or\n request before the server has received the `initialize` request. \"\"\"\n UnknownErrorCode = -32001"
}
] | import asyncio
import dataclasses
import json
import os
from typing import Any, Dict, List, Optional, Union
from .lsp_requests import LspNotification, LspRequest
from .lsp_types import ErrorCodes | 8,381 | def __init__(self, code: ErrorCodes, message: str) -> None:
super().__init__(message)
self.code = code
def to_lsp(self) -> StringDict:
return {"code": self.code, "message": super().__str__()}
@classmethod
def from_lsp(cls, d: StringDict) -> "Error":
return Error(d["code"], d["message"])
def __str__(self) -> str:
return f"{super().__str__()} ({self.code})"
def make_response(request_id: Any, params: PayloadLike) -> StringDict:
return {"jsonrpc": "2.0", "id": request_id, "result": params}
def make_error_response(request_id: Any, err: Error) -> StringDict:
return {"jsonrpc": "2.0", "id": request_id, "error": err.to_lsp()}
def make_notification(method: str, params: PayloadLike) -> StringDict:
return {"jsonrpc": "2.0", "method": method, "params": params}
def make_request(method: str, request_id: Any, params: PayloadLike) -> StringDict:
return {"jsonrpc": "2.0", "method": method, "id": request_id, "params": params}
class StopLoopException(Exception):
pass
def create_message(payload: PayloadLike):
body = json.dumps(payload, check_circular=False, ensure_ascii=False, separators=(",", ":")).encode(ENCODING)
return (
f"Content-Length: {len(body)}\r\n".encode(ENCODING),
"Content-Type: application/vscode-jsonrpc; charset=utf-8\r\n\r\n".encode(ENCODING),
body,
)
class MessageType:
error = 1
warning = 2
info = 3
log = 4
class Request:
def __init__(self) -> None:
self.cv = asyncio.Condition()
self.result: Optional[PayloadLike] = None
self.error: Optional[Error] = None
async def on_result(self, params: PayloadLike) -> None:
self.result = params
async with self.cv:
self.cv.notify()
async def on_error(self, err: Error) -> None:
self.error = err
async with self.cv:
self.cv.notify()
def content_length(line: bytes) -> Optional[int]:
if line.startswith(b"Content-Length: "):
_, value = line.split(b"Content-Length: ")
value = value.strip()
try:
return int(value)
except ValueError:
raise ValueError("Invalid Content-Length header: {}".format(value))
return None
class LanguageServerHandler:
"""
This class provides the implementation of Python client for the Language Server Protocol.
A class that launches the language server and communicates with it
using the Language Server Protocol (LSP).
It provides methods for sending requests, responses, and notifications to the server
and for registering handlers for requests and notifications from the server.
Uses JSON-RPC 2.0 for communication with the server over stdin/stdout.
Attributes:
send: A LspRequest object that can be used to send requests to the server and
await for the responses.
notify: A LspNotification object that can be used to send notifications to the server.
cmd: A string that represents the command to launch the language server process.
process: A subprocess.Popen object that represents the language server process.
_received_shutdown: A boolean flag that indicates whether the client has received
a shutdown request from the server.
request_id: An integer that represents the next available request id for the client.
_response_handlers: A dictionary that maps request ids to Request objects that
store the results or errors of the requests.
on_request_handlers: A dictionary that maps method names to callback functions
that handle requests from the server.
on_notification_handlers: A dictionary that maps method names to callback functions
that handle notifications from the server.
logger: An optional function that takes two strings (source and destination) and
a payload dictionary, and logs the communication between the client and the server.
tasks: A dictionary that maps task ids to asyncio.Task objects that represent
the asynchronous tasks created by the handler.
task_counter: An integer that represents the next available task id for the handler.
loop: An asyncio.AbstractEventLoop object that represents the event loop used by the handler.
"""
def __init__(self, process_launch_info: ProcessLaunchInfo, logger=None) -> None:
"""
Params:
cmd: A string that represents the command to launch the language server process.
logger: An optional function that takes two strings (source and destination) and
a payload dictionary, and logs the communication between the client and the server.
"""
| """
This file provides the implementation of the JSON-RPC client, that launches and
communicates with the language server.
The initial implementation of this file was obtained from
https://github.com/predragnikolic/OLSP under the MIT License with the following terms:
MIT License
Copyright (c) 2023 Предраг Николић
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
StringDict = Dict[str, Any]
PayloadLike = Union[List[StringDict], StringDict, None]
CONTENT_LENGTH = "Content-Length: "
ENCODING = "utf-8"
@dataclasses.dataclass
class ProcessLaunchInfo:
"""
This class is used to store the information required to launch a process.
"""
# The command to launch the process
cmd: str
# The environment variables to set for the process
env: Dict[str, str] = dataclasses.field(default_factory=dict)
# The working directory for the process
cwd: str = os.getcwd()
class Error(Exception):
def __init__(self, code: ErrorCodes, message: str) -> None:
super().__init__(message)
self.code = code
def to_lsp(self) -> StringDict:
return {"code": self.code, "message": super().__str__()}
@classmethod
def from_lsp(cls, d: StringDict) -> "Error":
return Error(d["code"], d["message"])
def __str__(self) -> str:
return f"{super().__str__()} ({self.code})"
def make_response(request_id: Any, params: PayloadLike) -> StringDict:
return {"jsonrpc": "2.0", "id": request_id, "result": params}
def make_error_response(request_id: Any, err: Error) -> StringDict:
return {"jsonrpc": "2.0", "id": request_id, "error": err.to_lsp()}
def make_notification(method: str, params: PayloadLike) -> StringDict:
return {"jsonrpc": "2.0", "method": method, "params": params}
def make_request(method: str, request_id: Any, params: PayloadLike) -> StringDict:
return {"jsonrpc": "2.0", "method": method, "id": request_id, "params": params}
class StopLoopException(Exception):
pass
def create_message(payload: PayloadLike):
body = json.dumps(payload, check_circular=False, ensure_ascii=False, separators=(",", ":")).encode(ENCODING)
return (
f"Content-Length: {len(body)}\r\n".encode(ENCODING),
"Content-Type: application/vscode-jsonrpc; charset=utf-8\r\n\r\n".encode(ENCODING),
body,
)
class MessageType:
error = 1
warning = 2
info = 3
log = 4
class Request:
def __init__(self) -> None:
self.cv = asyncio.Condition()
self.result: Optional[PayloadLike] = None
self.error: Optional[Error] = None
async def on_result(self, params: PayloadLike) -> None:
self.result = params
async with self.cv:
self.cv.notify()
async def on_error(self, err: Error) -> None:
self.error = err
async with self.cv:
self.cv.notify()
def content_length(line: bytes) -> Optional[int]:
if line.startswith(b"Content-Length: "):
_, value = line.split(b"Content-Length: ")
value = value.strip()
try:
return int(value)
except ValueError:
raise ValueError("Invalid Content-Length header: {}".format(value))
return None
class LanguageServerHandler:
"""
This class provides the implementation of Python client for the Language Server Protocol.
A class that launches the language server and communicates with it
using the Language Server Protocol (LSP).
It provides methods for sending requests, responses, and notifications to the server
and for registering handlers for requests and notifications from the server.
Uses JSON-RPC 2.0 for communication with the server over stdin/stdout.
Attributes:
send: A LspRequest object that can be used to send requests to the server and
await for the responses.
notify: A LspNotification object that can be used to send notifications to the server.
cmd: A string that represents the command to launch the language server process.
process: A subprocess.Popen object that represents the language server process.
_received_shutdown: A boolean flag that indicates whether the client has received
a shutdown request from the server.
request_id: An integer that represents the next available request id for the client.
_response_handlers: A dictionary that maps request ids to Request objects that
store the results or errors of the requests.
on_request_handlers: A dictionary that maps method names to callback functions
that handle requests from the server.
on_notification_handlers: A dictionary that maps method names to callback functions
that handle notifications from the server.
logger: An optional function that takes two strings (source and destination) and
a payload dictionary, and logs the communication between the client and the server.
tasks: A dictionary that maps task ids to asyncio.Task objects that represent
the asynchronous tasks created by the handler.
task_counter: An integer that represents the next available task id for the handler.
loop: An asyncio.AbstractEventLoop object that represents the event loop used by the handler.
"""
def __init__(self, process_launch_info: ProcessLaunchInfo, logger=None) -> None:
"""
Params:
cmd: A string that represents the command to launch the language server process.
logger: An optional function that takes two strings (source and destination) and
a payload dictionary, and logs the communication between the client and the server.
""" | self.send = LspRequest(self.send_request) | 1 | 2023-11-04 21:49:04+00:00 | 12k |
bigai-nlco/langsuite | langsuite/envs/teach/teach_world.py | [
{
"identifier": "CSS4_COLORS",
"path": "langsuite/constants.py",
"snippet": "CSS4_COLORS = {\n \"aliceblue\": \"#F0F8FF\",\n \"antiquewhite\": \"#FAEBD7\",\n \"aqua\": \"#00FFFF\",\n \"aquamarine\": \"#7FFFD4\",\n \"azure\": \"#F0FFFF\",\n \"beige\": \"#F5F5DC\",\n \"bisque\": \"#FFE4C4\",\n \"black\": \"#000000\",\n \"blanchedalmond\": \"#FFEBCD\",\n \"blue\": \"#0000FF\",\n \"blueviolet\": \"#8A2BE2\",\n \"brown\": \"#A52A2A\",\n \"burlywood\": \"#DEB887\",\n \"cadetblue\": \"#5F9EA0\",\n \"chartreuse\": \"#7FFF00\",\n \"chocolate\": \"#D2691E\",\n \"coral\": \"#FF7F50\",\n \"cornflowerblue\": \"#6495ED\",\n \"cornsilk\": \"#FFF8DC\",\n \"crimson\": \"#DC143C\",\n \"cyan\": \"#00FFFF\",\n \"darkblue\": \"#00008B\",\n \"darkcyan\": \"#008B8B\",\n \"darkgoldenrod\": \"#B8860B\",\n \"darkgray\": \"#A9A9A9\",\n \"darkgreen\": \"#006400\",\n \"darkgrey\": \"#A9A9A9\",\n \"darkkhaki\": \"#BDB76B\",\n \"darkmagenta\": \"#8B008B\",\n \"darkolivegreen\": \"#556B2F\",\n \"darkorange\": \"#FF8C00\",\n \"darkorchid\": \"#9932CC\",\n \"darkred\": \"#8B0000\",\n \"darksalmon\": \"#E9967A\",\n \"darkseagreen\": \"#8FBC8F\",\n \"darkslateblue\": \"#483D8B\",\n \"darkslategray\": \"#2F4F4F\",\n \"darkslategrey\": \"#2F4F4F\",\n \"darkturquoise\": \"#00CED1\",\n \"darkviolet\": \"#9400D3\",\n \"deeppink\": \"#FF1493\",\n \"deepskyblue\": \"#00BFFF\",\n \"dimgray\": \"#696969\",\n \"dimgrey\": \"#696969\",\n \"dodgerblue\": \"#1E90FF\",\n \"firebrick\": \"#B22222\",\n \"floralwhite\": \"#FFFAF0\",\n \"forestgreen\": \"#228B22\",\n \"fuchsia\": \"#FF00FF\",\n \"gainsboro\": \"#DCDCDC\",\n \"ghostwhite\": \"#F8F8FF\",\n \"gold\": \"#FFD700\",\n \"goldenrod\": \"#DAA520\",\n \"gray\": \"#808080\",\n \"green\": \"#008000\",\n \"greenyellow\": \"#ADFF2F\",\n \"grey\": \"#808080\",\n \"honeydew\": \"#F0FFF0\",\n \"hotpink\": \"#FF69B4\",\n \"indianred\": \"#CD5C5C\",\n \"indigo\": \"#4B0082\",\n \"ivory\": \"#FFFFF0\",\n \"khaki\": \"#F0E68C\",\n \"lavender\": \"#E6E6FA\",\n \"lavenderblush\": \"#FFF0F5\",\n \"lawngreen\": \"#7CFC00\",\n \"lemonchiffon\": \"#FFFACD\",\n \"lightblue\": \"#ADD8E6\",\n \"lightcoral\": \"#F08080\",\n \"lightcyan\": \"#E0FFFF\",\n \"lightgoldenrodyellow\": \"#FAFAD2\",\n \"lightgray\": \"#D3D3D3\",\n \"lightgreen\": \"#90EE90\",\n \"lightgrey\": \"#D3D3D3\",\n \"lightpink\": \"#FFB6C1\",\n \"lightsalmon\": \"#FFA07A\",\n \"lightseagreen\": \"#20B2AA\",\n \"lightskyblue\": \"#87CEFA\",\n \"lightslategray\": \"#778899\",\n \"lightslategrey\": \"#778899\",\n \"lightsteelblue\": \"#B0C4DE\",\n \"lightyellow\": \"#FFFFE0\",\n \"lime\": \"#00FF00\",\n \"limegreen\": \"#32CD32\",\n \"linen\": \"#FAF0E6\",\n \"magenta\": \"#FF00FF\",\n \"maroon\": \"#800000\",\n \"mediumaquamarine\": \"#66CDAA\",\n \"mediumblue\": \"#0000CD\",\n \"mediumorchid\": \"#BA55D3\",\n \"mediumpurple\": \"#9370DB\",\n \"mediumseagreen\": \"#3CB371\",\n \"mediumslateblue\": \"#7B68EE\",\n \"mediumspringgreen\": \"#00FA9A\",\n \"mediumturquoise\": \"#48D1CC\",\n \"mediumvioletred\": \"#C71585\",\n \"midnightblue\": \"#191970\",\n \"mintcream\": \"#F5FFFA\",\n \"mistyrose\": \"#FFE4E1\",\n \"moccasin\": \"#FFE4B5\",\n \"navajowhite\": \"#FFDEAD\",\n \"navy\": \"#000080\",\n \"oldlace\": \"#FDF5E6\",\n \"olive\": \"#808000\",\n \"olivedrab\": \"#6B8E23\",\n \"orange\": \"#FFA500\",\n \"orangered\": \"#FF4500\",\n \"orchid\": \"#DA70D6\",\n \"palegoldenrod\": \"#EEE8AA\",\n \"palegreen\": \"#98FB98\",\n \"paleturquoise\": \"#AFEEEE\",\n \"palevioletred\": \"#DB7093\",\n \"papayawhip\": \"#FFEFD5\",\n \"peachpuff\": \"#FFDAB9\",\n \"peru\": \"#CD853F\",\n \"pink\": \"#FFC0CB\",\n \"plum\": \"#DDA0DD\",\n \"powderblue\": \"#B0E0E6\",\n \"purple\": \"#800080\",\n \"rebeccapurple\": \"#663399\",\n \"red\": \"#FF0000\",\n \"rosybrown\": \"#BC8F8F\",\n \"royalblue\": \"#4169E1\",\n \"saddlebrown\": \"#8B4513\",\n \"salmon\": \"#FA8072\",\n \"sandybrown\": \"#F4A460\",\n \"seagreen\": \"#2E8B57\",\n \"seashell\": \"#FFF5EE\",\n \"sienna\": \"#A0522D\",\n \"silver\": \"#C0C0C0\",\n \"skyblue\": \"#87CEEB\",\n \"slateblue\": \"#6A5ACD\",\n \"slategray\": \"#708090\",\n \"slategrey\": \"#708090\",\n \"snow\": \"#FFFAFA\",\n \"springgreen\": \"#00FF7F\",\n \"steelblue\": \"#4682B4\",\n \"tan\": \"#D2B48C\",\n \"teal\": \"#008080\",\n \"thistle\": \"#D8BFD8\",\n \"tomato\": \"#FF6347\",\n \"turquoise\": \"#40E0D0\",\n \"violet\": \"#EE82EE\",\n \"wheat\": \"#F5DEB3\",\n \"white\": \"#FFFFFF\",\n \"whitesmoke\": \"#F5F5F5\",\n \"yellow\": \"#FFFF00\",\n \"yellowgreen\": \"#9ACD32\",\n}"
},
{
"identifier": "Geometry",
"path": "langsuite/shapes.py",
"snippet": "class Geometry:\n def __init__(self) -> None:\n self.shapey_geo = None\n\n def __repr__(self) -> str:\n return \"\""
},
{
"identifier": "Point2D",
"path": "langsuite/shapes.py",
"snippet": "class Point2D(Geometry):\n def __init__(self, *args) -> None:\n if len(args) > 2:\n raise TypeError(f\"Point2D takes at most 2 arguements ({len(args)} given)\")\n elif len(args) == 2:\n self.x, self.y = float(args[0]), float(args[1])\n elif len(args) == 1:\n if isinstance(args[0], Point2D) or isinstance(args[0], Point):\n self.x, self.y = args[0].x, args[0].y\n elif type(args[0]) in [list, tuple, np.ndarray] and len(args[0]) == 2:\n self.x, self.y = args[0][:2]\n else:\n raise TypeError(\n f\"Unsupport argument type for Point2D ({type(args[0])} given)\"\n )\n else:\n raise TypeError(\"Point2D takes at least 1 argument\")\n self.shapely_geo = Point(self.x, self.y)\n\n @property\n def modulus(self) -> float:\n return math.sqrt(self.x**2 + self.y**2)\n\n def __add__(self, other):\n return Point2D(self.x + other.x, self.y + other.y)\n\n def __sub__(self, other):\n return Point2D(self.x - other.x, self.y - other.y)\n\n def __mul__(self, other: float):\n return Point2D(self.x * other, self.y * other)\n\n def __truediv__(self, other: float):\n if other == 0.0:\n raise RuntimeError(\"Div Zero in Point2D\")\n return Point2D(self.x / other, self.y / other)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Point2D):\n return False\n return self.x == other.x and self.y == other.y\n\n def __str__(self) -> str:\n return f\"({self.x}, {self.y})\"\n\n def to_wkt(self) -> str:\n return self.shapely_geo.wkt\n\n def to_numpy(self) -> np.ndarray:\n return np.array([self.x, self.y], dtype=np.float32)\n\n def rotate(self, angle, center, use_radians=False):\n \"\"\"Rotation of Polygon2D geometry\n Refers to https://shapely.readthedocs.io/en/stable/manual.html#shapely.affinity.rotate\n\n Args:\n angle: degrees or radians by setting `use_radians=True`\n origin: (x0, y0)\n\n \"\"\"\n if isinstance(center, Point2D):\n center = (center.x, center.y)\n # TODO\n self.shapely_geo = shapely.affinity.rotate(\n self.shapely_geo, angle, center, use_radians\n )\n self.x = self.shapely_geo.x\n self.y = self.shapely_geo.y"
},
{
"identifier": "Polygon2D",
"path": "langsuite/shapes.py",
"snippet": "class Polygon2D(Geometry):\n def __init__(\n self,\n coords: List[Union[Point2D, Tuple[float, float]]],\n holes: Optional[List[Union[Point2D, Tuple[float, float]]]] = None,\n ) -> None:\n self.coords = [Point2D(c) for c in coords]\n self.holes = [] if holes is None else [Point2D(c) for c in holes]\n self.shapely_geo = Polygon(\n shell=[c.shapely_geo for c in self.coords],\n holes=[c.shapely_geo for c in self.holes],\n )\n\n def __repr__(self) -> str:\n return \"{\" + \", \".join([str(c) for c in self.coords]) + \"}\"\n\n @property\n def area(self) -> float:\n return self.shapely_geo.area\n\n @property\n def is_closed(self) -> bool:\n return len(self.coords) > 1 and self.coords[-1] == self.coords[0]\n\n @property\n def length(self) -> float:\n return self.shapely_geo.length\n\n @property\n def centroid(self) -> Point2D:\n return Point2D(self.shapely_geo.centroid)\n\n @property\n def x_min(self) -> float:\n return np.min([c.x for c in self.coords])\n\n @property\n def x_max(self) -> float:\n return np.max([c.x for c in self.coords])\n\n @property\n def y_min(self) -> float:\n return np.min([c.y for c in self.coords])\n\n @property\n def y_max(self) -> float:\n return np.max([c.y for c in self.coords])\n\n @property\n def xy(self):\n return self.shapely_geo.exterior.xy\n\n def intersects(self, other) -> bool:\n return self.shapely_geo.intersects(other.shapely_geo)\n\n def rotate(self, angle, origin=\"center\", use_radians=False):\n \"\"\"Rotation of Polygon2D geometry\n Refers to https://shapely.readthedocs.io/en/stable/manual.html#shapely.affinity.rotate\n\n Args:\n angle: degrees or radians by setting `use_radians=True`\n origin: ['center', 'centroid', (x0, y0)]\n\n \"\"\"\n if isinstance(origin, Point2D):\n origin = (origin.x, origin.y)\n self.shapely_geo = shapely.affinity.rotate(\n self.shapely_geo, angle, origin, use_radians\n )\n self.coords = [Point2D(c) for c in self.shapely_geo.exterior.coords]\n\n def to_wkt(self) -> str:\n \"\"\"Well-known text representation of geometry\n https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry\n\n Examples:\n POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))\n POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10), (20 30, 35 35, 30 20, 20 30))\n\n \"\"\"\n return self.shapely_geo.wkt\n\n def to_numpy(self) -> np.array:\n return (\n np.array([p.to_numpy() for p in self.coords[:-1]])\n if self.is_closed\n else np.array([p.to_numpy() for p in self.coords])\n )\n\n def contains(self, other) -> bool:\n \"\"\"Returns True if a Point or a Polygon is contained by the current Polygon\n Args:\n other: Point2D or Polygon2D\n\n Returns:\n a boolean value\n \"\"\"\n if not isinstance(other, Polygon2D) and not isinstance(other, Point2D):\n raise TypeError(\n f\"contains only support Polygon2D or Point2D ({type(other)} given)\"\n )\n return self.shapely_geo.contains(other.shapely_geo)"
},
{
"identifier": "logger",
"path": "langsuite/utils/logging.py",
"snippet": "class Logger:\n def __init__(\n self,\n log_level: int = logging.DEBUG,\n log_file: str = \"\",\n use_cmd: bool = False,\n console_logging=True,\n ) -> None:\n def has_cmdline_interface(self):\n def setLevel(self, level):\n def set_cmd_client(self, cmd_cli: CMDClient, disable_console_logging=True):\n def set_log_file(self, log_file):\n def close(self):\n def info(self, msg):\n def debug(self, msg):\n def error(self, msg):\n def warn(self, msg):\n def user_input(self):\n def emit(self, message):\n def robot_emit(self, message_or_streamer, name=\"Robot\", action=\"chat\"):"
},
{
"identifier": "WORLD_REGISTRY",
"path": "langsuite/world.py",
"snippet": "WORLD_REGISTRY = Registry(\"world\")"
},
{
"identifier": "Object2D",
"path": "langsuite/world.py",
"snippet": "class Object2D:\n def __init__(\n self,\n obj_type: ObjectType,\n id: str,\n *,\n alias: Optional[str] = None,\n geometry: Optional[Geometry] = None,\n asset_id: Optional[str] = None,\n **kwargs,\n ) -> None:\n self.id = id\n self.asset_id = asset_id\n self.alias = alias\n self.obj_type = obj_type\n self.geometry = geometry\n self.props = dict()\n for k, val in kwargs.items():\n self.props[k] = val\n\n self.walls = defaultdict()\n self.doors = defaultdict()\n self.windows = defaultdict()\n if \"children\" in self.props:\n self.children = self.props[\"children\"]\n else:\n self.children = defaultdict()\n self.chilren_types = [ObjectType.OBJECT]\n\n @classmethod\n def create(cls, obj_data):\n return NotImplementedError()\n\n def __repr__(self) -> str:\n obj_string = f\"asset_id: {self.asset_id}\"\n return obj_string\n\n def contains(self, other) -> bool:\n \"\"\"Returns True is another object is in current object\n\n Args:\n other: Object2D: an object instance\n \"\"\"\n if not isinstance(other, Object2D):\n return ValueError(\n f\"Invalid input: other has to be of type Object ({type(other)} given)\"\n )\n if other.obj_type not in self.chilren_types:\n return False\n if other.obj_type == ObjectType.WALL:\n return other.id in self.walls.keys()\n elif other.obj_type == ObjectType.DOOR:\n return other.id in self.doors.keys()\n elif other.obj_type == ObjectType.WINDOW:\n return other.id in self.windows.keys()\n elif other.obj_type == ObjectType.OBJECT:\n return other.id in self.children.keys()\n else:\n raise ValueError(f\"Invalid input: {type(other)}.\")\n\n def add_wall(self, wall) -> Optional[str]:\n if ObjectType.WALL not in self.chilren_types:\n raise ValueError(f\"Unable to add type {wall.obj_type}\")\n if wall.id in self.wall:\n return wall.id\n self.walls[wall.id] = wall\n return wall.id\n\n def add_door(self, door) -> Optional[str]:\n if ObjectType.DOOR not in self.chilren_types:\n raise ValueError(f\"Unable to add type {door.obj_type}\")\n if door.id in self.doors:\n return door.id\n self.doors[door.id] = door\n return door.id\n\n def add_window(self, window) -> Optional[str]:\n if ObjectType.WINDOW not in self.chilren_types:\n raise ValueError(f\"Unable to add type {window.obj_type}\")\n\n if window.id in self.windows:\n return window.id\n self.windows[window.id] = window\n return window.id\n\n def add_object(self, object) -> Optional[str]:\n if ObjectType.OBJECT not in self.chilren_types:\n raise ValueError(f\"Unable to add type {object.obj_type}\")\n\n if object.id in self.children:\n return object.id\n self.children[object.id] = object\n return object.id\n\n def update_position(self, position):\n diff = position - self.position\n coords = []\n for i in range(len(self.geometry.coords)):\n coords.append(self.geometry.coords[i] + diff)\n self.geometry = Polygon2D(coords)\n self.position = position"
},
{
"identifier": "ObjectType",
"path": "langsuite/world.py",
"snippet": "class ObjectType(Enum):\n OBJECT = 1\n ROOM = 2\n WALL = 3\n WINDOW = 4\n DOOR = 5"
},
{
"identifier": "Room",
"path": "langsuite/world.py",
"snippet": "class Room(Object2D):\n def __init__(\n self,\n room_id: str,\n *,\n alias: Optional[str] = None,\n geometry: Optional[Polygon2D] = None,\n asset_id: Optional[str] = None,\n **kwargs,\n ):\n super().__init__(\n ObjectType.ROOM,\n room_id,\n alias=alias,\n geometry=geometry,\n asset_id=asset_id,\n **kwargs,\n )\n self.chilren_types = [\n ObjectType.OBJECT,\n ObjectType.DOOR,\n ObjectType.WINDOW,\n ObjectType.WALL,\n ]"
},
{
"identifier": "Wall",
"path": "langsuite/world.py",
"snippet": "class Wall(Object2D):\n def __init__(\n self,\n wall_id: str,\n *,\n alias: Optional[str],\n geometry: Optional[Geometry],\n asset_id: Optional[str],\n room2room: Union[Tuple[str], str] = [],\n **kwargs,\n ):\n super().__init__(\n ObjectType.WALL,\n wall_id,\n alias=alias,\n geometry=geometry,\n asset_id=asset_id,\n **kwargs,\n )\n self.chilren_types = [ObjectType.OBJECT, ObjectType.DOOR, ObjectType.WINDOW]\n self.room2room = [room2room] if type(room2room) == str else room2room"
},
{
"identifier": "World",
"path": "langsuite/world.py",
"snippet": "class World:\n def __init__(self, world_id: str):\n self.world_id = world_id\n self.rooms: Dict[str, Room] = dict()\n self.walls: Dict[str, Wall] = dict()\n self.doors: Dict[str, Door] = dict()\n self.windows: Dict[str, Window] = dict()\n self.objects: Dict[str, Object2D] = dict()\n self.grid_size = None\n self.room_polygons = None\n self.id2object = {}\n\n @classmethod\n def create(cls, world_cfg):\n world_type = world_cfg.get(\"type\")\n if world_type is None or len(world_type) == 0:\n raise ValueError(\"World type must be provided to create a world.\")\n\n if WORLD_REGISTRY.hasRegistered(world_type):\n return WORLD_REGISTRY.get(world_type).create(world_cfg)\n else:\n raise NotImplementedError(f\"World type {world_type} not found.\")\n\n def add_room(self, room: Room) -> Optional[str]:\n return NotImplementedError()"
}
] | import json
import random
import numpy as np
import plotly.graph_objects as go
from collections import defaultdict
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
from langsuite.constants import CSS4_COLORS
from langsuite.shapes import Geometry, Point2D, Polygon2D
from langsuite.utils.logging import logger
from langsuite.world import WORLD_REGISTRY, Object2D, ObjectType, Room, Wall, World | 7,288 | # bbox = get_bbox(bbox_cornerpoints)
bbox = get_bbox(center, size)
rotation = obj_data["rotation"]["y"]
polys_2d = None
center = Point2D(center["x"], center["z"])
position = Point2D(
obj_data.get("position").get("x"), obj_data.get("position").get("z")
)
if bbox:
# ul = center - Point2D(bbox["x"], bbox["z"]) * 0.5
# br = center + Point2D(bbox["x"], bbox["z"]) * 0.5
# polys_2d = Box2D(ul, br)
polys_2d = Polygon2D(bbox)
# TODO Box2D rotate ISSUE
# polys_2d.rotate(360 - rotation, origin=(center.x, center.y))
children = {}
if (
"receptacleObjectIds" in obj_data
and obj_data["receptacleObjectIds"] is not None
and len(obj_data["receptacleObjectIds"]) > 0
):
children_id = deepcopy(obj_data["receptacleObjectIds"])
while len(children_id) > 0:
c_id = children_id.pop(0)
for object_c in objs_data:
if object_c["objectId"] == c_id:
children[c_id] = TeachObject.create(object_c, objs_data)
break
return cls(
object_id,
geometry=polys_2d,
asset_id=asset_id,
position=center,
rotation=rotation,
children=children,
props=props,
)
def plot(self, axes=None):
if self.geometry is None:
return
x, y = self.geometry.shapely_geo.exterior.xy
axes.fill(x, y)
if len(self.children) > 0:
for c in self.children:
self.children[c].plot(axes=axes)
def render(self, fig):
if self.geometry is None:
return
x, y = self.geometry.shapely_geo.exterior.xy
fig.add_trace(
go.Scatter(
mode="lines",
x=np.array(x),
y=np.array(y),
fill="toself",
name=self.id,
fillcolor=self.color,
line=dict(width=0),
)
)
if len(self.children) > 0:
for c in self.children:
self.children[c].render(fig)
def find_all_children(self):
children = []
if len(self.children) > 0:
for child in self.children.values():
children.append(child)
children.extend(child.find_all_children())
return children
def del_child(self, child_id):
if child_id in self.children:
del self.children[child_id]
return True
else:
for child in self.children.values():
if child.del_children(child_id):
return True
return False
def get_obj_pose_info(self):
if "openable" in self.props and self.props["openable"]:
openness = self.props["openness"]
openable = True
else:
openness = None
openable = False
if "pickupable" in self.props:
pickupable = self.props["pickupable"]
else:
pickupable = False
if "isBroken" in self.props:
isBroken = self.props["isBroken"]
else:
isBroken = False
return {
"type": self.id.split("|")[0],
"position": self.position,
"rotation": self.rotation,
"openable": openable,
"openness": openness,
"pickupable": pickupable,
"broken": isBroken,
"objectId": self.id,
"name": self.id,
"parentReceptacles": [],
"bounding_box": None,
}
@WORLD_REGISTRY.register()
| # Copyright (c) BIGAI Research. All rights reserved.
# Licensed under the MIT license.
from __future__ import annotations
TeachPath = Path(__file__).parent
class TeachWall(Wall):
def __init__(
self,
wall_id: str,
*,
alias: Optional[str] = None,
geometry: Optional[Geometry] = None,
asset_id: Optional[str] = None,
room2room: Union[Tuple[str], str] = list(),
empty: bool,
**kwargs,
):
super().__init__(
wall_id,
alias=alias,
geometry=geometry,
asset_id=asset_id,
room2room=room2room,
**kwargs,
)
self.empty = empty
@classmethod
def create(cls, id, polys_2d):
empty = False
polys_2d = Polygon2D(polys_2d)
return cls(id, geometry=polys_2d, empty=empty)
def plot(self, axes=None):
if self.geometry is None:
return
x, y = self.geometry.shapely_geo.exterior.xy
if self.empty:
axes.plot(x, y, color="black", linestyle="-.", linewidth=0.5)
else:
axes.plot(x, y, color="black", linewidth=0.5)
axes.fill(x, y, color="gray")
def render(self):
if not self.geometry:
return
class TeachRoom(Room):
@classmethod
def create(cls, room_data):
polys_3d = room_data["floorPolygon"]
polys_2d = []
for p in polys_3d:
polys_2d.append((p[0], p[1]))
polys_2d = Polygon2D(polys_2d)
return cls(room_data["id"], geometry=polys_2d, asset_id=room_data["roomType"])
def plot(self, axes=None, color="aliceblue"):
if self.geometry is None:
return
x, y = self.geometry.shapely_geo.exterior.xy
axes.fill(x, y, color=color)
def render(self, fig=None):
if self.geometry is None:
return
if not fig:
fig = go.Figure()
x, y = self.geometry.shapely_geo.exterior.xy
fig.add_trace(
go.Scatter(
x=np.array(x),
y=np.array(y),
fill="toself",
fillcolor="aliceblue",
name=self.id,
line=dict(color="gray"),
)
)
class TeachObject(Object2D):
colorscales = list(CSS4_COLORS.keys())
color_registry = defaultdict()
def __init__(
self,
obj_id: str,
*,
alias: Optional[str] = None,
geometry: Optional[Polygon2D] = None,
asset_id: Optional[str] = None,
position: Point2D = None,
rotation: float = 0,
props: Dict[str, Any] = defaultdict(),
**kwargs,
) -> None:
super().__init__(
ObjectType.OBJECT,
obj_id,
alias=alias,
geometry=geometry,
asset_id=asset_id,
**kwargs,
)
self.chilren_types = [ObjectType.OBJECT]
if props is not None:
self.props.update(props)
self.category = self.id.split("|")[0]
if self.category not in TeachObject.color_registry:
select_color = random.choice(TeachObject.colorscales)
TeachObject.color_registry.update({self.category: select_color})
TeachObject.colorscales.remove(select_color)
self.color = TeachObject.color_registry.get(self.category)
# self.position = self.geometry.centroid
self.position = position
self.rotation = rotation
@classmethod
def create(cls, obj_data, objs_data):
if "Floor" in obj_data["objectId"]:
obj_data["axisAlignedBoundingBox"]["size"] = {"x": 0, "y": 0, "z": 0}
asset_id = obj_data["objectType"]
object_id = obj_data["objectId"]
props = obj_data
size = obj_data.get("axisAlignedBoundingBox").get("size")
center = obj_data.get("axisAlignedBoundingBox").get("center")
def get_bbox(center, size):
minx = center["x"] - (1 / 2) * size["x"]
maxx = center["x"] + (1 / 2) * size["x"]
minz = center["z"] - (1 / 2) * size["z"]
maxz = center["z"] + (1 / 2) * size["z"]
return [[minx, minz], [minx, maxz], [maxx, maxz], [maxx, minz]]
# bbox = get_bbox(bbox_cornerpoints)
bbox = get_bbox(center, size)
rotation = obj_data["rotation"]["y"]
polys_2d = None
center = Point2D(center["x"], center["z"])
position = Point2D(
obj_data.get("position").get("x"), obj_data.get("position").get("z")
)
if bbox:
# ul = center - Point2D(bbox["x"], bbox["z"]) * 0.5
# br = center + Point2D(bbox["x"], bbox["z"]) * 0.5
# polys_2d = Box2D(ul, br)
polys_2d = Polygon2D(bbox)
# TODO Box2D rotate ISSUE
# polys_2d.rotate(360 - rotation, origin=(center.x, center.y))
children = {}
if (
"receptacleObjectIds" in obj_data
and obj_data["receptacleObjectIds"] is not None
and len(obj_data["receptacleObjectIds"]) > 0
):
children_id = deepcopy(obj_data["receptacleObjectIds"])
while len(children_id) > 0:
c_id = children_id.pop(0)
for object_c in objs_data:
if object_c["objectId"] == c_id:
children[c_id] = TeachObject.create(object_c, objs_data)
break
return cls(
object_id,
geometry=polys_2d,
asset_id=asset_id,
position=center,
rotation=rotation,
children=children,
props=props,
)
def plot(self, axes=None):
if self.geometry is None:
return
x, y = self.geometry.shapely_geo.exterior.xy
axes.fill(x, y)
if len(self.children) > 0:
for c in self.children:
self.children[c].plot(axes=axes)
def render(self, fig):
if self.geometry is None:
return
x, y = self.geometry.shapely_geo.exterior.xy
fig.add_trace(
go.Scatter(
mode="lines",
x=np.array(x),
y=np.array(y),
fill="toself",
name=self.id,
fillcolor=self.color,
line=dict(width=0),
)
)
if len(self.children) > 0:
for c in self.children:
self.children[c].render(fig)
def find_all_children(self):
children = []
if len(self.children) > 0:
for child in self.children.values():
children.append(child)
children.extend(child.find_all_children())
return children
def del_child(self, child_id):
if child_id in self.children:
del self.children[child_id]
return True
else:
for child in self.children.values():
if child.del_children(child_id):
return True
return False
def get_obj_pose_info(self):
if "openable" in self.props and self.props["openable"]:
openness = self.props["openness"]
openable = True
else:
openness = None
openable = False
if "pickupable" in self.props:
pickupable = self.props["pickupable"]
else:
pickupable = False
if "isBroken" in self.props:
isBroken = self.props["isBroken"]
else:
isBroken = False
return {
"type": self.id.split("|")[0],
"position": self.position,
"rotation": self.rotation,
"openable": openable,
"openness": openness,
"pickupable": pickupable,
"broken": isBroken,
"objectId": self.id,
"name": self.id,
"parentReceptacles": [],
"bounding_box": None,
}
@WORLD_REGISTRY.register() | class TeachWorld(World): | 10 | 2023-11-01 01:47:00+00:00 | 12k |
radekd91/inferno | inferno/models/EmoSwinModule.py | [
{
"identifier": "class_from_str",
"path": "inferno/utils/other.py",
"snippet": "def class_from_str(str, module=None, none_on_fail = False) -> type:\n if module is None:\n module = sys.modules[__name__]\n if hasattr(module, str):\n cl = getattr(module, str)\n return cl\n elif str.lower() == 'none' or none_on_fail:\n return None\n raise RuntimeError(f\"Class '{str}' not found.\")"
},
{
"identifier": "AffectNetExpressions",
"path": "inferno/datasets/AffectNetDataModule.py",
"snippet": "class AffectNetExpressions(Enum):\n Neutral = 0\n Happy = 1\n Sad = 2\n Surprise = 3\n Fear = 4\n Disgust = 5\n Anger = 6\n Contempt = 7\n None_ = 8\n Uncertain = 9\n Occluded = 10\n xxx = 11\n\n\n @staticmethod\n def from_str(string : str):\n string = string[0].upper() + string[1:]\n return AffectNetExpressions[string]\n\n # _expressions = {0: 'neutral', 1:'happy', 2:'sad', 3:'surprise', 4:'fear', 5:'disgust', 6:'anger', 7:'contempt', 8:'none'}"
},
{
"identifier": "Expression7",
"path": "inferno/datasets/AffWild2Dataset.py",
"snippet": "class Expression7(Enum):\n Neutral = 0\n Anger = 1\n Disgust = 2\n Fear = 3\n Happiness = 4\n Sadness = 5\n Surprise = 6\n None_ = 7"
},
{
"identifier": "_log_array_image",
"path": "inferno/utils/lightning_logging.py",
"snippet": "def _log_array_image(path, image, caption=None):\n image = _fix_image(image)\n if path is not None:\n imsave(path, image)\n return image"
},
{
"identifier": "_log_wandb_image",
"path": "inferno/utils/lightning_logging.py",
"snippet": "def _log_wandb_image(path, image, caption=None):\n path.parent.mkdir(parents=True, exist_ok=True)\n image = _fix_image(image)\n imsave(path, image)\n if caption is not None:\n caption_file = Path(path).parent / (Path(path).stem + \".txt\")\n with open(caption_file, \"w\") as f:\n f.write(caption)\n wandb_image = Image(str(path), caption=caption)\n return wandb_image"
},
{
"identifier": "_torch_image2np",
"path": "inferno/utils/lightning_logging.py",
"snippet": "def _torch_image2np(torch_image):\n image = torch_image.detach().cpu().numpy()\n if len(image.shape) == 4:\n image = image.transpose([0, 2, 3, 1])\n elif len(image.shape) == 3:\n image = image.transpose([1, 2, 0])\n return image"
},
{
"identifier": "EmotionRecognitionBaseModule",
"path": "inferno/models/EmotionRecognitionModuleBase.py",
"snippet": "class EmotionRecognitionBaseModule(pl.LightningModule):\n \"\"\"\n EmotionRecognitionBaseModule is a base class for emotion prediction (valence and arousal, expression classification and/or action units)\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n\n \"\"\"\n super().__init__()\n self.config = config\n\n if 'v_activation' in config.model.keys():\n self.v_activation = class_from_str(self.config.model.v_activation, sys.modules[__name__])\n else:\n self.v_activation = None\n\n if 'a_activation' in config.model.keys():\n self.a_activation = class_from_str(self.config.model.a_activation, sys.modules[__name__])\n else:\n self.a_activation = None\n\n if 'exp_activation' in config.model.keys():\n self.exp_activation = class_from_str(self.config.model.exp_activation, sys.modules[__name__])\n else:\n self.exp_activation = F.log_softmax\n\n if 'AU_activation' in config.model.keys():\n self.AU_activation = class_from_str(self.config.model.AU_activation, sys.modules[__name__])\n else:\n self.AU_activation = None\n\n self.va_loss = loss_from_cfg(config.model, 'va_loss')\n self.v_loss = loss_from_cfg(config.model, 'v_loss')\n self.a_loss = loss_from_cfg(config.model, 'a_loss')\n self.exp_loss = loss_from_cfg(config.model, 'exp_loss')\n self.AU_loss = loss_from_cfg(config.model, 'AU_loss')\n\n # self.val_conf_mat = pl.metrics.ConfusionMatrix(self.num_classes, 'true')\n # self.val_conf_mat = pl.metrics.ConfusionMatrix(self.num_classes, 'true')\n\n # @property\n def predicts_valence(self):\n return self.config.model.predict_valence\n\n def trains_valence(self):\n return self.config.model.v_loss not in [None, 'None', 'none', 0, False] \\\n and bool(self.config.model.v_loss)\n\n # @property\n def predicts_arousal(self):\n return self.config.model.predict_arousal\n\n def trains_arousal(self):\n return self.config.model.a_loss not in [None, 'None', 'none', 0, False] \\\n and bool(self.config.model.a_loss)\n\n # @property\n def predicts_expression(self):\n return self.config.model.predict_expression\n\n def trains_expression(self):\n return self.config.model.exp_loss not in [None, 'None', 'none', 0, False] \\\n and bool(self.config.model.exp_loss)\n\n # @property\n def predicts_AUs(self):\n if 'predict_AUs' in self.config.model.keys() and self.config.model.predict_AUs:\n return self.config.model.predict_AUs\n return 0\n\n def trains_AUs(self):\n return self.config.model.AU_loss not in [None, 'None', 'none', 0, False] \\\n and bool(self.config.model.AU_loss)\n\n def forward(self, image):\n raise NotImplementedError()\n\n def _get_trainable_parameters(self):\n raise NotImplementedError()\n\n def configure_optimizers(self):\n trainable_params = []\n trainable_params += list(self._get_trainable_parameters())\n\n if self.config.learning.optimizer == 'Adam':\n opt = torch.optim.Adam(\n trainable_params,\n lr=self.config.learning.learning_rate,\n amsgrad=False)\n elif self.config.learning.optimizer == 'AdaBound':\n import adabound\n opt = adabound.AdaBound(\n trainable_params,\n lr=self.config.learning.learning_rate,\n final_lr=self.config.learning.final_learning_rate\n )\n\n elif self.config.learning.optimizer == 'SGD':\n opt = torch.optim.SGD(\n trainable_params,\n lr=self.config.learning.learning_rate)\n else:\n raise ValueError(f\"Unsupported optimizer: '{self.config.learning.optimizer}'\")\n\n optimizers = [opt]\n schedulers = []\n\n opt_dict = {}\n opt_dict['optimizer'] = opt\n if 'learning_rate_patience' in self.config.learning.keys():\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt,\n patience=self.config.learning.learning_rate_patience,\n factor=self.config.learning.learning_rate_decay,\n mode=self.config.learning.lr_sched_mode)\n schedulers += [scheduler]\n opt_dict['lr_scheduler'] = scheduler\n opt_dict['monitor'] = 'val_loss_total'\n elif 'learning_rate_decay' in self.config.learning.keys():\n scheduler = torch.optim.lr_scheduler.ExponentialLR(opt, gamma=self.config.learning.learning_rate_decay)\n opt_dict['lr_scheduler'] = scheduler\n schedulers += [scheduler]\n return opt_dict\n #\n # if len(schedulers) == 0:\n # return opt\n #\n # return optimizers, schedulers\n\n def _get_step_loss_weights(self, training):\n va_loss_weights = {}\n for key in self.v_loss:\n va_loss_weights[key] = self.v_loss[key]\n\n for key in self.a_loss:\n va_loss_weights[key] = self.a_loss[key]\n\n for key in self.va_loss:\n va_loss_weights[key] = self.va_loss[key]\n\n # if training:\n # return va_loss_weights\n\n n_terms = len(va_loss_weights)\n\n if 'va_loss_scheme' in self.config.model.keys():\n if not training and self.config.model.va_loss_scheme == 'shake':\n for key in va_loss_weights:\n va_loss_weights[key] = np.random.rand(1)[0]\n total_w = 0.\n for key in va_loss_weights:\n total_w += va_loss_weights[key]\n for key in va_loss_weights:\n va_loss_weights[key] /= total_w\n elif self.config.model.va_loss_scheme == 'norm':\n total_w = 0.\n for key in va_loss_weights:\n total_w += va_loss_weights[key]\n\n for key in va_loss_weights:\n va_loss_weights[key] /= total_w\n return va_loss_weights\n\n\n # def validation_epoch_end(self, outputs) -> None:\n # if isinstance(self.logger, WandbLogger):\n # import wandb\n #\n # {\"conf_mat\": wandb.plot.confusion_matrix(probs=None,\n # y_true = ground_truth, preds = predictions,\n # class_names = class_names}\n #\n # self.wandb_logger\n\n\n def _compute_loss(self,\n pred,\n gt,\n class_weight,\n training=True,\n pred_prefix=\"\",\n valence_sample_weight=None,\n arousal_sample_weight=None,\n va_sample_weight=None,\n expression_sample_weight=None,\n au_positive_weights=None\n ):\n losses = {}\n metrics = {}\n\n scheme = None if 'va_loss_scheme' not in self.config.model.keys() else self.config.model.va_loss_scheme\n if self.v_loss is not None and self.a_loss is not None and self.va_loss is not None:\n loss_term_weights = _get_step_loss_weights(self.v_loss, self.a_loss, self.va_loss, scheme, training)\n\n if 'continuous_va_balancing' in self.config.model.keys() and self.config.model.continuous_va_balancing != \"none\":\n if self.config.model.continuous_va_balancing == '1d':\n v_weight = valence_sample_weight\n a_weight = arousal_sample_weight\n elif self.config.model.continuous_va_balancing == '2d':\n v_weight = va_sample_weight\n a_weight = va_sample_weight\n elif self.config.model.continuous_va_balancing == 'expr':\n v_weight = expression_sample_weight\n a_weight = expression_sample_weight\n else:\n raise RuntimeError(f\"Invalid continuous affect balancing\"\n f\" '{self.config.model.continuous_va_balancing}'\")\n else:\n v_weight = None\n a_weight = None\n\n if self.predicts_valence() and self.trains_valence():\n losses, metrics = v_or_a_loss(self.v_loss, pred, gt, loss_term_weights, metrics, losses, \"valence\",\n pred_prefix=pred_prefix, permit_dropping_corr=not training,\n sample_weights=v_weight)\n if self.predicts_arousal() and self.trains_arousal():\n losses, metrics = v_or_a_loss(self.a_loss, pred, gt, loss_term_weights, metrics, losses, \"arousal\",\n pred_prefix=pred_prefix, permit_dropping_corr=not training,\n sample_weights=a_weight)\n if self.predicts_arousal() and self.predicts_valence() and self.trains_arousal() and self.trains_valence():\n losses, metrics = va_loss(self.va_loss, pred, gt, loss_term_weights, metrics, losses, pred_prefix=pred_prefix,\n permit_dropping_corr=not training, sample_weights=v_weight)\n\n if self.predicts_expression() and self.trains_expression():\n losses, metrics = exp_loss(self.exp_loss, pred, gt, class_weight, metrics, losses,\n self.config.model.expression_balancing, self.num_classes, pred_prefix=pred_prefix)\n\n\n if self.predicts_AUs():\n if self.predicts_AUs() == 12:\n au_type = ActionUnitTypes.EMOTIONET12\n else:\n raise ValueError(f\"Predicting {self.predicts_AUs()} is not supported.\")\n losses, metrics = AU_loss(self.AU_loss, pred, gt, metrics, losses, au_type,\n class_weights=au_positive_weights, pred_prefix=pred_prefix)\n\n # if not training:\n # self.val_conf_mat(pred[pred_prefix + \"expr_classification\"], gt[\"expr_classification\"][:, 0])\n\n # if pred[pred_prefix + \"valence\"] is not None:\n # metrics[pred_prefix + \"v_mae\"] = F.l1_loss(pred[pred_prefix + \"valence\"], gt[\"valence\"])\n # metrics[pred_prefix + \"v_mse\"] = F.mse_loss(pred[pred_prefix + \"valence\"], gt[\"valence\"])\n # metrics[pred_prefix + \"v_rmse\"] = torch.sqrt(metrics[pred_prefix + \"v_mse\"])\n # metrics[pred_prefix + \"v_pcc\"] = PCC_torch(pred[pred_prefix + \"valence\"], gt[\"valence\"], batch_first=False)\n # metrics[pred_prefix + \"v_ccc\"] = CCC_torch(pred[pred_prefix + \"valence\"], gt[\"valence\"], batch_first=False)\n # metrics[pred_prefix + \"v_sagr\"] = SAGR_torch(pred[pred_prefix + \"valence\"], gt[\"valence\"])\n # # metrics[\"v_icc\"] = ICC_torch(pred[\"valence\"], gt[\"valence\"])\n # if self.v_loss is not None:\n # if callable(self.v_loss):\n # losses[\"v\"] = self.v_loss(pred[pred_prefix + \"valence\"], gt[\"valence\"])\n # elif isinstance(self.v_loss, dict):\n # for name, weight in self.v_loss.items():\n # # losses[name] = metrics[name]*weight\n # losses[name] = metrics[pred_prefix + name]*weights[name]\n # else:\n # raise RuntimeError(f\"Uknown expression loss '{self.v_loss}'\")\n #\n # if pred[pred_prefix + \"arousal\"] is not None:\n # metrics[pred_prefix + \"a_mae\"] = F.l1_loss(pred[pred_prefix + \"arousal\"], gt[\"arousal\"])\n # metrics[pred_prefix + \"a_mse\"] = F.mse_loss(pred[pred_prefix + \"arousal\"], gt[\"arousal\"])\n # metrics[pred_prefix + \"a_rmse\"] = torch.sqrt( metrics[pred_prefix + \"a_mse\"])\n # metrics[pred_prefix + \"a_pcc\"] = PCC_torch(pred[pred_prefix + \"arousal\"], gt[\"arousal\"], batch_first=False)\n # metrics[pred_prefix + \"a_ccc\"] = CCC_torch(pred[pred_prefix + \"arousal\"], gt[\"arousal\"], batch_first=False)\n # metrics[pred_prefix + \"a_sagr\"] = SAGR_torch(pred[pred_prefix + \"arousal\"], gt[\"arousal\"])\n # # metrics[\"a_icc\"] = ICC_torch(pred[\"arousal\"], gt[\"arousal\"])\n # if self.a_loss is not None:\n # if callable(self.a_loss):\n # losses[pred_prefix + \"a\"] = self.a_loss(pred[pred_prefix + \"arousal\"], gt[\"arousal\"])\n # elif isinstance(self.a_loss, dict):\n # for name, weight in self.a_loss.items():\n # # losses[name] = metrics[name]*weight\n # losses[pred_prefix + name] = metrics[pred_prefix + name]*weights[name]\n # else:\n # raise RuntimeError(f\"Uknown expression loss '{self.a_loss}'\")\n #\n # if pred[pred_prefix + \"valence\"] is not None and pred[pred_prefix + \"arousal\"] is not None:\n # va_pred = torch.cat([pred[pred_prefix + \"valence\"], pred[pred_prefix + \"arousal\"]], dim=1)\n # va_gt = torch.cat([gt[\"valence\"], gt[\"arousal\"]], dim=1)\n # metrics[pred_prefix + \"va_mae\"] = F.l1_loss(va_pred, va_gt)\n # metrics[pred_prefix + \"va_mse\"] = F.mse_loss(va_pred, va_gt)\n # metrics[pred_prefix + \"va_rmse\"] = torch.sqrt(metrics[pred_prefix + \"va_mse\"])\n # metrics[pred_prefix + \"va_lpcc\"] = (1. - 0.5*(metrics[pred_prefix + \"a_pcc\"] + metrics[pred_prefix + \"v_pcc\"]))[0][0]\n # metrics[pred_prefix + \"va_lccc\"] = (1. - 0.5*(metrics[pred_prefix + \"a_ccc\"] + metrics[pred_prefix + \"v_ccc\"]))[0][0]\n # if self.va_loss is not None:\n # if callable(self.va_loss):\n # losses[pred_prefix + \"va\"] = self.va_loss(va_pred, va_gt)\n # elif isinstance(self.va_loss, dict):\n # for name, weight in self.va_loss.items():\n # # losses[name] = metrics[name]*weight\n # losses[pred_prefix + name] = metrics[pred_prefix + name] * weights[name]\n # else:\n # raise RuntimeError(f\"Uknown expression loss '{self.va_loss}'\")\n #\n # if pred[pred_prefix + \"expr_classification\"] is not None:\n # if self.config.model.expression_balancing:\n # weight = class_weight\n # else:\n # weight = torch.ones_like(class_weight)\n #\n # # metrics[\"expr_cross_entropy\"] = F.cross_entropy(pred[\"expr_classification\"], gt[\"expr_classification\"][:, 0], torch.ones_like(class_weight))\n # # metrics[\"expr_weighted_cross_entropy\"] = F.cross_entropy(pred[\"expr_classification\"], gt[\"expr_classification\"][:, 0], class_weight)\n # metrics[pred_prefix + \"expr_nll\"] = F.nll_loss(pred[pred_prefix + \"expr_classification\"],\n # gt[\"expr_classification\"][:, 0],\n # torch.ones_like(class_weight))\n # metrics[pred_prefix + \"expr_weighted_nll\"] = F.nll_loss(pred[pred_prefix + \"expr_classification\"],\n # gt[\"expr_classification\"][:, 0],\n # class_weight)\n # metrics[pred_prefix + \"expr_acc\"] = ACC_torch( torch.argmax(pred[pred_prefix + \"expr_classification\"], dim=1),\n # gt[\"expr_classification\"][:, 0])\n #\n #\n # if self.exp_loss is not None:\n # if callable(self.exp_loss):\n # losses[pred_prefix + \"expr\"] = self.exp_loss(pred[pred_prefix + \"expr_classification\"], gt[\"expr_classification\"][:, 0], weight)\n # elif isinstance(self.exp_loss, dict):\n # for name, weight in self.exp_loss.items():\n # losses[pred_prefix + name] = metrics[pred_prefix + name]*weight\n # else:\n # raise RuntimeError(f\"Uknown expression loss '{self.exp_loss}'\")\n\n return losses, metrics\n\n def compute_loss(self,\n pred,\n gt,\n class_weight,\n valence_sample_weight=None,\n arousal_sample_weight=None,\n va_sample_weight=None,\n expression_sample_weight=None,\n au_positive_weights=None,\n training=True):\n losses, metrics = self._compute_loss(pred, gt, class_weight, training,\n valence_sample_weight=valence_sample_weight,\n arousal_sample_weight=arousal_sample_weight,\n va_sample_weight=va_sample_weight,\n expression_sample_weight=expression_sample_weight,\n au_positive_weights=au_positive_weights\n )\n loss = 0.\n for key, value in losses.items():\n if value.ndim == 0:\n loss += value\n elif value.ndim == 1:\n loss += value[0]\n else:\n raise RuntimeError(f\"Invalid loss shape for term '{key}': {value.shape}\")\n losses[\"total\"] = loss\n return losses, metrics\n\n def training_step(self, batch, batch_idx, *args, **kwargs):\n values = self.forward(batch)\n # valence_pred = values[\"valence\"]\n # arousal_pred = values[\"arousal\"]\n # expr_classification_pred = values[\"expr_classification\"]\n\n valence_sample_weight = batch[\"valence_sample_weight\"] if \"valence_sample_weight\" in batch.keys() else None\n arousal_sample_weight = batch[\"arousal_sample_weight\"] if \"arousal_sample_weight\" in batch.keys() else None\n va_sample_weight = batch[\"va_sample_weight\"] if \"va_sample_weight\" in batch.keys() else None\n expression_sample_weight = batch[\"expression_sample_weight\"] if \"expression_sample_weight\" in batch.keys() else None\n\n gt = {}\n if self.predicts_valence() and self.trains_valence():\n valence_gt = batch[\"va\"][:, 0:1]\n gt[\"valence\"] = valence_gt\n if self.predicts_arousal() and self.trains_arousal():\n arousal_gt = batch[\"va\"][:, 1:2]\n gt[\"arousal\"] = arousal_gt\n if self.predicts_expression() and self.trains_expression():\n expr_classification_gt = batch[\"affectnetexp\"]\n gt[\"expr_classification\"] = expr_classification_gt\n if \"expression_weight\" in batch.keys():\n class_weight = batch[\"expression_weight\"][0]\n else:\n class_weight = None\n else:\n class_weight = None\n\n if self.predicts_AUs():\n gt[\"AUs\"] = batch[\"au\"]\n if \"au_pos_weights\" in batch.keys():\n au_positive_weights = batch[\"au_pos_weights\"][0]\n else:\n au_positive_weights = None\n else:\n au_positive_weights = None\n\n pred = values\n losses, metrics = self.compute_loss(pred, gt, class_weight, training=True,\n valence_sample_weight=valence_sample_weight,\n arousal_sample_weight=arousal_sample_weight,\n va_sample_weight=va_sample_weight,\n expression_sample_weight=expression_sample_weight,\n au_positive_weights=au_positive_weights,\n )\n\n self._log_losses_and_metrics(losses, metrics, \"train\")\n total_loss = losses[\"total\"]\n # print(\"total_loss: \", total_loss.item())\n return total_loss\n\n def validation_step(self, batch, batch_idx, dataloader_idx=None):\n values = self.forward(batch)\n # valence_pred = values[\"valence\"]\n # arousal_pred = values[\"arousal\"]\n # expr_classification_pred = values[\"expr_classification\"]\n\n gt = {}\n if self.predicts_valence():\n valence_gt = batch[\"va\"][:, 0:1]\n gt[\"valence\"] = valence_gt\n if self.predicts_arousal():\n arousal_gt = batch[\"va\"][:, 1:2]\n gt[\"arousal\"] = arousal_gt\n if self.predicts_expression():\n if \"affectnetexp\" in batch.keys():\n expr_classification_gt = batch[\"affectnetexp\"]\n if \"expression_weight\" in batch.keys():\n class_weight = batch[\"expression_weight\"][0]\n else:\n class_weight = None\n gt[\"expr_classification\"] = expr_classification_gt\n else:\n class_weight = None\n else:\n class_weight = None\n\n if self.predicts_AUs():\n gt[\"AUs\"] = batch[\"au\"]\n\n if \"au_pos_weights\" in batch.keys():\n au_positive_weights = batch[\"au_pos_weights\"][0]\n else:\n au_positive_weights = None\n else:\n au_positive_weights = None\n\n pred = values\n # pred = {}\n # pred[\"valence\"] = valence_pred\n # pred[\"arousal\"] = arousal_pred\n # pred[\"expr_classification\"] = expr_classification_pred\n\n valence_sample_weight = batch[\"valence_sample_weight\"] if \"valence_sample_weight\" in batch.keys() else None\n arousal_sample_weight = batch[\"arousal_sample_weight\"] if \"arousal_sample_weight\" in batch.keys() else None\n va_sample_weight = batch[\"va_sample_weight\"] if \"va_sample_weight\" in batch.keys() else None\n expression_sample_weight = batch[\n \"expression_sample_weight\"] if \"expression_sample_weight\" in batch.keys() else None\n\n losses, metrics = self.compute_loss(pred, gt, class_weight, training=False,\n valence_sample_weight=valence_sample_weight,\n arousal_sample_weight=arousal_sample_weight,\n va_sample_weight=va_sample_weight,\n expression_sample_weight=expression_sample_weight,\n au_positive_weights=au_positive_weights,\n )\n\n self._log_losses_and_metrics(losses, metrics, \"val\")\n # visdict = self._test_visualization(values, batch, batch_idx, dataloader_idx=dataloader_idx)\n total_loss = losses[\"total\"]\n # print(\"total_loss: \", total_loss.item())\n return total_loss\n\n def _test_visualization(self, output_values, input_batch, batch_idx, dataloader_idx=None):\n raise NotImplementedError()\n\n def test_step(self, batch, batch_idx, dataloader_idx=None):\n values = self.forward(batch)\n # valence_pred = values[\"valence\"]\n # arousal_pred = values[\"arousal\"]\n # expr_classification_pred = values[\"expr_classification\"]\n if \"expression_weight\" in batch.keys():\n class_weight = batch[\"expression_weight\"][0]\n else:\n class_weight = None\n\n gt = {}\n if \"va\" in batch.keys():\n valence_gt = batch[\"va\"][:, 0:1]\n arousal_gt = batch[\"va\"][:, 1:2]\n if self.predicts_valence():\n gt[\"valence\"] = valence_gt\n if self.predicts_arousal():\n gt[\"arousal\"] = arousal_gt\n if \"affectnetexp\" in batch.keys():\n expr_classification_gt = batch[\"affectnetexp\"]\n if self.predicts_expression():\n gt[\"expr_classification\"] = expr_classification_gt\n\n au_positive_weights = None\n if \"AUs\" in batch.keys():\n if self.predicts_AUs():\n gt[\"AUs\"] = batch[\"AUs\"]\n if 'au_pos_weights' in batch.keys():\n au_positive_weights = batch['au_pos_weights'][0]\n\n pred = values\n losses, metrics = self.compute_loss(pred, gt, class_weight,\n au_positive_weights=au_positive_weights,\n training=False)\n\n if self.config.learning.test_vis_frequency > 0:\n if batch_idx % self.config.learning.test_vis_frequency == 0:\n self._test_visualization(values, batch, batch_idx, dataloader_idx=dataloader_idx)\n\n self._log_losses_and_metrics(losses, metrics, \"test\")\n self.logger.log_metrics({f\"test_step\": batch_idx})\n\n def _log_losses_and_metrics(self, losses, metrics, stage):\n if stage in [\"train\", \"val\"]:\n on_epoch = True\n on_step = False\n self.log_dict({f\"{stage}_loss_\" + key: value for key, value in losses.items()}, on_epoch=on_epoch,\n on_step=on_step, sync_dist=True)\n self.log_dict({f\"{stage}_metric_\" + key: value for key, value in metrics.items()}, on_epoch=on_epoch,\n on_step=on_step, sync_dist=True)\n else:\n # on_epoch = False\n on_epoch = True\n on_step = True\n # self.logger.log_metrics({f\"{stage}_loss_\" + key: value.detach().cpu() for key, value in\n # losses.items()}) # , on_epoch=on_epoch, on_step=on_step)\n # #\n # self.logger.log_metrics({f\"{stage}_metric_\" + key: value.detach().cpu() for key, value in\n # metrics.items()}) # , on_epoch=on_epoch, on_step=on_step)\n self.log_dict({f\"{stage}_loss_\" + key: value.detach().cpu() for key, value in\n losses.items()}, on_epoch=on_epoch, on_step=on_step)\n #\n self.log_dict({f\"{stage}_metric_\" + key: value.detach().cpu() for key, value in\n metrics.items()}, on_epoch=on_epoch, on_step=on_step)"
},
{
"identifier": "create_swin_backbone",
"path": "inferno/models/Swin.py",
"snippet": "def create_swin_backbone(swin_cfg, num_classes, img_size, load_pretrained_swin=False, pretrained_model=None):\n \"\"\"\n Returns a SWIN backbone with a head of size num_classes.\n \"\"\"\n\n with open_dict(swin_cfg):\n swin_cfg.MODEL.NUM_CLASSES = num_classes\n swin_cfg.MODEL.SWIN.PATCH_SIZE = 4\n swin_cfg.MODEL.SWIN.IN_CHANS = 3\n swin_cfg.MODEL.SWIN.MLP_RATIO = 4.\n swin_cfg.MODEL.SWIN.QKV_BIAS = True\n swin_cfg.MODEL.SWIN.QK_SCALE = None\n swin_cfg.MODEL.SWIN.APE = False\n swin_cfg.MODEL.SWIN.PATCH_NORM = True\n\n # Dropout rate\n if 'DROP_RATE' not in swin_cfg.MODEL.keys():\n swin_cfg.MODEL.DROP_RATE = 0.0\n # Drop path rate\n if 'DROP_PATH_RATE' not in swin_cfg.MODEL.keys():\n swin_cfg.MODEL.DROP_PATH_RATE = 0.1\n # Label Smoothing\n\n if 'DROP_PATH_RATE' not in swin_cfg.MODEL.keys():\n swin_cfg.MODEL.LABEL_SMOOTHING = 0.1\n\n swin_cfg.DATA = {}\n swin_cfg.DATA.IMG_SIZE = img_size\n\n swin_cfg.TRAIN = {}\n swin_cfg.TRAIN.USE_CHECKPOINT = False\n\n # # Swin Transformer parameters\n # _C.MODEL.SWIN = CN()\n # _C.MODEL.SWIN.PATCH_SIZE = 4\n # _C.MODEL.SWIN.IN_CHANS = 3\n # _C.MODEL.SWIN.EMBED_DIM = 96\n # _C.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n # _C.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n # _C.MODEL.SWIN.WINDOW_SIZE = 7\n # _C.MODEL.SWIN.MLP_RATIO = 4.\n # _C.MODEL.SWIN.QKV_BIAS = True\n # _C.MODEL.SWIN.QK_SCALE = None\n # _C.MODEL.SWIN.APE = False\n # _C.MODEL.SWIN.PATCH_NORM = True\n\n swin = build_model(swin_cfg)\n\n if load_pretrained_swin:\n # load the pretrained model from the official repo\n path_to_model = swin_path / \"pretrained_models\" / (\n pretrained_model + \".pth\")\n state_dict = torch.load(path_to_model)\n # delete the head of the model from the state_dict - we have a different number of outputs\n del state_dict['model']['head.weight']\n del state_dict['model']['head.bias']\n swin.load_state_dict(state_dict['model'], strict=False)\n print(f\"Loading pretrained model from '{path_to_model}'\")\n\n return swin"
}
] | import sys
import torch
import pytorch_lightning as pl
import numpy as np
import torch.nn.functional as F
import pytorch_lightning.plugins.environments.lightning_environment as le
from inferno.utils.other import class_from_str
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.loggers import WandbLogger
from inferno.datasets.AffectNetDataModule import AffectNetExpressions
from inferno.datasets.AffWild2Dataset import Expression7
from pathlib import Path
from inferno.utils.lightning_logging import _log_array_image, _log_wandb_image, _torch_image2np
from inferno.models.EmotionRecognitionModuleBase import EmotionRecognitionBaseModule
from omegaconf import open_dict
from .Swin import create_swin_backbone | 9,122 | self.config.model.swin_type )
self.num_classes = self.n_expression
def get_last_feature_size(self):
return self.swin.num_features
def _forward(self, images):
output, emo_feat_2 = self.swin(images, include_features=True)
out_idx = 0
if self.predicts_expression():
expr_classification = output[:, out_idx:(out_idx + self.n_expression)]
if self.exp_activation is not None:
expr_classification = self.exp_activation(expr_classification, dim=1)
out_idx += self.n_expression
else:
expr_classification = None
if self.predicts_valence():
valence = output[:, out_idx:(out_idx + 1)]
if self.v_activation is not None:
valence = self.v_activation(valence)
out_idx += 1
else:
valence = None
if self.predicts_arousal():
arousal = output[:, out_idx:(out_idx + 1)]
if self.a_activation is not None:
arousal = self.a_activation(arousal)
out_idx += 1
else:
arousal = None
if self.predicts_AUs():
num_AUs = self.config.model.predict_AUs
AUs = output[:, out_idx:(out_idx + num_AUs)]
if self.AU_activation is not None:
AUs = self.AU_activation(AUs)
out_idx += num_AUs
else:
AUs = None
assert out_idx == output.shape[1]
values = {}
values["emo_feat_2"] = emo_feat_2
values["valence"] = valence
values["arousal"] = arousal
values["expr_classification"] = expr_classification
values["AUs"] = AUs
return values
def forward(self, batch):
images = batch['image']
if len(images.shape) == 5:
K = images.shape[1]
elif len(images.shape) == 4:
K = 1
else:
raise RuntimeError("Invalid image batch dimensions.")
# print("Batch size!")
# print(images.shape)
images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1])
emotion = self._forward(images)
valence = emotion['valence']
arousal = emotion['arousal']
# emotion['expression'] = emotion['expression']
# classes_probs = F.softmax(emotion['expression'])
# expression = self.exp_activation(emotion['expr_classification'], dim=1)
values = {}
if self.predicts_valence():
values['valence'] = valence.view(-1,1)
if self.predicts_arousal():
values['arousal'] = arousal.view(-1,1)
# values['expr_classification'] = expression
values['expr_classification'] = emotion['expr_classification']
if self.predicts_AUs():
values['AUs'] = emotion['AUs']
values['emo_feat_2'] = emotion['emo_feat_2']
# TODO: WARNING: HACK
if 'n_expression' not in self.config.data.keys():
if self.n_expression == 8:
raise NotImplementedError("This here should not be called")
values['expr_classification'] = torch.cat([
values['expr_classification'], torch.zeros_like(values['expr_classification'][:, 0:1])
+ 2*values['expr_classification'].min()],
dim=1)
return values
def _get_trainable_parameters(self):
return list(self.swin.parameters())
## we can leave the default implementation
# def train(self, mode=True):
# pass
def _vae_2_str(self, valence=None, arousal=None, affnet_expr=None, expr7=None, prefix=""):
caption = ""
if len(prefix) > 0:
prefix += "_"
if valence is not None and not np.isnan(valence).any():
caption += prefix + "valence= %.03f\n" % valence
if arousal is not None and not np.isnan(arousal).any():
caption += prefix + "arousal= %.03f\n" % arousal
if affnet_expr is not None and not np.isnan(affnet_expr).any():
caption += prefix + "expression= %s \n" % AffectNetExpressions(affnet_expr).name
if expr7 is not None and not np.isnan(expr7).any():
| """
Author: Radek Danecek
Copyright (c) 2022, Radek Danecek
All rights reserved.
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2022 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at [email protected]
# For commercial licensing contact, please contact [email protected]
"""
class EmoSwinModule(EmotionRecognitionBaseModule):
"""
Emotion Recognitition module which uses Swin Transformer as its backbone. Currently Resnet-50 and VGG are supported.
"""
def __init__(self, config):
super().__init__(config)
# self.n_expression = 9 # we use all affectnet classes (included none) for now
self.n_expression = self.config.data.n_expression if 'n_expression' in self.config.data.keys() else 9 # we use all affectnet classes (included none) for now
self.num_outputs = 0
if self.config.model.predict_expression:
self.num_outputs += self.n_expression
self.num_classes = self.n_expression
if self.config.model.predict_valence:
self.num_outputs += 1
if self.config.model.predict_arousal:
self.num_outputs += 1
if 'predict_AUs' in self.config.model.keys() and self.config.model.predict_AUs:
self.num_outputs += self.config.model.predict_AUs
with open_dict(config.model.swin_cfg):
self.swin = create_swin_backbone(config.model.swin_cfg,
self.num_outputs,
config.data.image_size,
config.model.load_pretrained_swin,
self.config.model.swin_type )
self.num_classes = self.n_expression
def get_last_feature_size(self):
return self.swin.num_features
def _forward(self, images):
output, emo_feat_2 = self.swin(images, include_features=True)
out_idx = 0
if self.predicts_expression():
expr_classification = output[:, out_idx:(out_idx + self.n_expression)]
if self.exp_activation is not None:
expr_classification = self.exp_activation(expr_classification, dim=1)
out_idx += self.n_expression
else:
expr_classification = None
if self.predicts_valence():
valence = output[:, out_idx:(out_idx + 1)]
if self.v_activation is not None:
valence = self.v_activation(valence)
out_idx += 1
else:
valence = None
if self.predicts_arousal():
arousal = output[:, out_idx:(out_idx + 1)]
if self.a_activation is not None:
arousal = self.a_activation(arousal)
out_idx += 1
else:
arousal = None
if self.predicts_AUs():
num_AUs = self.config.model.predict_AUs
AUs = output[:, out_idx:(out_idx + num_AUs)]
if self.AU_activation is not None:
AUs = self.AU_activation(AUs)
out_idx += num_AUs
else:
AUs = None
assert out_idx == output.shape[1]
values = {}
values["emo_feat_2"] = emo_feat_2
values["valence"] = valence
values["arousal"] = arousal
values["expr_classification"] = expr_classification
values["AUs"] = AUs
return values
def forward(self, batch):
images = batch['image']
if len(images.shape) == 5:
K = images.shape[1]
elif len(images.shape) == 4:
K = 1
else:
raise RuntimeError("Invalid image batch dimensions.")
# print("Batch size!")
# print(images.shape)
images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1])
emotion = self._forward(images)
valence = emotion['valence']
arousal = emotion['arousal']
# emotion['expression'] = emotion['expression']
# classes_probs = F.softmax(emotion['expression'])
# expression = self.exp_activation(emotion['expr_classification'], dim=1)
values = {}
if self.predicts_valence():
values['valence'] = valence.view(-1,1)
if self.predicts_arousal():
values['arousal'] = arousal.view(-1,1)
# values['expr_classification'] = expression
values['expr_classification'] = emotion['expr_classification']
if self.predicts_AUs():
values['AUs'] = emotion['AUs']
values['emo_feat_2'] = emotion['emo_feat_2']
# TODO: WARNING: HACK
if 'n_expression' not in self.config.data.keys():
if self.n_expression == 8:
raise NotImplementedError("This here should not be called")
values['expr_classification'] = torch.cat([
values['expr_classification'], torch.zeros_like(values['expr_classification'][:, 0:1])
+ 2*values['expr_classification'].min()],
dim=1)
return values
def _get_trainable_parameters(self):
return list(self.swin.parameters())
## we can leave the default implementation
# def train(self, mode=True):
# pass
def _vae_2_str(self, valence=None, arousal=None, affnet_expr=None, expr7=None, prefix=""):
caption = ""
if len(prefix) > 0:
prefix += "_"
if valence is not None and not np.isnan(valence).any():
caption += prefix + "valence= %.03f\n" % valence
if arousal is not None and not np.isnan(arousal).any():
caption += prefix + "arousal= %.03f\n" % arousal
if affnet_expr is not None and not np.isnan(affnet_expr).any():
caption += prefix + "expression= %s \n" % AffectNetExpressions(affnet_expr).name
if expr7 is not None and not np.isnan(expr7).any(): | caption += prefix +"expression= %s \n" % Expression7(expr7).name | 2 | 2023-11-07 20:13:32+00:00 | 12k |
hxz393/ConfigCenterComparer | ui/dialog_settings_main.py | [
{
"identifier": "LANG_DICTS",
"path": "config/lang_dict_all.py",
"snippet": "LANG_DICTS = {\n 'English': {\n 'main_1': 'Ready',\n 'main_2': ' &Start ',\n 'main_3': ' &Edit ',\n 'main_4': ' &Options',\n 'main_5': ' &Help ',\n 'label_status_error': 'Error occurred',\n 'ui.action_exit_1': 'Quit',\n 'ui.action_exit_2': 'Quit the application',\n 'ui.action_debug_1': 'Debug',\n 'ui.action_debug_2': 'Debugging in progress...',\n 'ui.action_compare_1': 'Compare',\n 'ui.action_compare_2': 'Find duplicate configurations within the same environment',\n 'ui.action_compare_3': 'Value',\n 'ui.action_compare_4': 'Please run data retrieval first',\n 'ui.action_save_1': 'Export',\n 'ui.action_save_2': 'Export table data to file',\n 'ui.action_save_3': 'Choose a file to save',\n 'ui.action_save_5': 'File saved successfully.',\n 'ui.action_save_7': 'Failed to save file.',\n 'ui.action_save_8': 'Failed to extract data from the table.',\n 'ui.action_update_1': 'Check Updates',\n 'ui.action_update_2': 'Check for Updates Online',\n 'ui.action_update_3': 'Failed to Check for Updates!',\n 'ui.action_update_4': 'New Version Available',\n 'ui.action_update_5': 'No Update Needed',\n 'ui.action_update_6': 'Current Version: ',\n 'ui.action_update_7': 'Latest Version: ',\n 'ui.action_update_8': 'Checking for Updates...',\n 'ui.action_update_9': 'Update Check Complete',\n 'ui.action_unskip_1': 'Unskip',\n 'ui.action_unskip_2': 'Remove selected configuration items from the skip list',\n 'ui.action_unskip_3': 'Unskip Completed',\n 'ui.action_skip_1': 'Skip',\n 'ui.action_skip_2': 'Mark selected configuration items as skipped',\n 'ui.action_skip_3': 'Added to Skip List Completed',\n 'ui.action_about_1': 'About',\n 'ui.action_about_2': 'Information about the Software',\n 'ui.action_copy_1': 'Copy',\n 'ui.action_copy_2': 'Copy Selected Content',\n 'ui.action_logs_1': 'View Logs',\n 'ui.action_logs_2': 'Open Log Viewer',\n 'ui.action_setting_1': 'Program Settings',\n 'ui.action_setting_2': 'Open Program Settings Dialog',\n 'ui.action_setting_connection_1': 'Connection Settings',\n 'ui.action_setting_connection_2': 'Open Database Connection Settings Dialog',\n 'ui.action_test_1': 'Test',\n 'ui.action_test_2': 'Test Database Connection',\n 'ui.action_test_3': 'Test failed, an exception occurred.',\n 'ui.action_test_4': 'Environment: ',\n 'ui.action_test_5': 'Connection OK!',\n 'ui.action_test_6': 'Connection Failed!',\n 'ui.action_test_7': 'Testing database connection...',\n 'ui.action_test_8': 'Test connection completed',\n 'ui.action_test_9': 'Test Skipped!',\n 'ui.action_test_10': 'Test Result: ',\n 'ui.action_start_1': 'Run',\n 'ui.action_start_2': 'Start execution, fetch configurations from database',\n 'ui.action_start_3': 'Querying database...',\n 'ui.action_start_4': 'No configurations fetched, please check database connection settings or view the error log.',\n 'ui.action_start_5': 'Error occurred while inserting table into data, please check the log for details.',\n 'ui.action_start_6': 'Error occurred while prepare data, please check the log for details.',\n 'ui.action_start_7': 'A critical error occurred, please view the log in help.',\n 'ui.action_start_8': 'Inconsistent',\n 'ui.action_start_9': 'Fully',\n 'ui.action_start_10': 'Partially',\n 'ui.action_start_11': 'No',\n 'ui.action_start_12': 'Yes',\n 'ui.action_start_13': 'Unknown',\n 'ui.table_main_1': 'Name',\n 'ui.table_main_2': 'Group',\n 'ui.table_main_3': 'Key',\n 'ui.table_main_4': 'modified time',\n 'ui.table_main_5': 'Consistency',\n 'ui.table_main_6': 'Skip',\n 'ui.filter_bar_1': 'Filter Name:',\n 'ui.filter_bar_2': 'Quick Filter:',\n 'ui.filter_bar_3': '--Show All--',\n 'ui.filter_bar_4': 'Fully Equal',\n 'ui.filter_bar_5': 'Partially Equal',\n 'ui.filter_bar_6': 'Skipped',\n 'ui.filter_bar_7': 'Invert',\n 'ui.filter_bar_8': 'Global Search:',\n 'ui.filter_bar_9': 'Search',\n 'ui.filter_bar_10': 'Reset',\n 'ui.filter_bar_11': 'Entries',\n 'ui.dialog_logs_1': 'View Logs',\n 'ui.dialog_logs_2': 'Log Level:',\n 'ui.dialog_logs_4': 'Feedback',\n 'ui.dialog_logs_5': 'Clear',\n 'ui.dialog_logs_6': 'Close',\n 'ui.dialog_logs_7': 'Refresh',\n 'ui.dialog_comparison_1': 'Comparison Results',\n 'ui.dialog_comparison_2': 'Public Namespace:',\n 'ui.dialog_comparison_3': 'Set',\n 'ui.dialog_comparison_4': 'Search Configuration:',\n 'ui.dialog_comparison_5': 'Public namespaces styled red to highlight global properties',\n 'ui.dialog_comparison_6': 'Search configuration keys and values',\n 'ui.dialog_settings_main_1': 'Settings',\n 'ui.dialog_settings_main_2': 'Select Language:',\n 'ui.dialog_settings_main_3': 'Select Configuration Center Type:',\n 'ui.dialog_settings_main_4': 'Select Apollo Service Name Field:',\n 'ui.dialog_settings_main_5': 'Main Settings',\n 'ui.dialog_settings_main_6': 'Replace Service Name (Original), split by space:',\n 'ui.dialog_settings_main_7': 'Replace Service Name (New), split by space:',\n 'ui.dialog_settings_main_8': 'Trim Service Name (Prefix), split by space:',\n 'ui.dialog_settings_main_9': 'Trim Service Name (Suffix), split by space:',\n 'ui.dialog_settings_main_10': 'Service Name Alignment',\n 'ui.dialog_settings_main_11': 'Confirm',\n 'ui.dialog_settings_main_12': 'Cancel',\n 'ui.dialog_settings_main_13': 'Configuration Saved Successfully!',\n 'ui.dialog_settings_main_15': 'Changing Language Requires Restart to Take Effect, Restart Now?',\n 'ui.dialog_settings_main_16': 'Table Color Switch, turning off can enhance performance:',\n 'ui.dialog_settings_connection_1': 'Database Configuration',\n 'ui.dialog_settings_connection_2': 'Production',\n 'ui.dialog_settings_connection_3': 'Preview',\n 'ui.dialog_settings_connection_4': 'Testing',\n 'ui.dialog_settings_connection_5': 'Development',\n 'ui.dialog_settings_connection_6': 'SSH Tunnel',\n 'ui.dialog_settings_connection_7': 'Enable:',\n 'ui.dialog_settings_connection_8': 'Address:',\n 'ui.dialog_settings_connection_9': 'Port:',\n 'ui.dialog_settings_connection_10': 'Username:',\n 'ui.dialog_settings_connection_11': 'Password:',\n 'ui.dialog_settings_connection_12': 'Database:',\n 'ui.dialog_settings_connection_13': 'MySQL Connection',\n 'ui.dialog_about_1': 'About',\n 'ui.dialog_about_2': 'Configuration Center Comparison Tool',\n 'ui.dialog_about_3': 'Version: ',\n 'ui.dialog_about_4': 'Author :',\n 'ui.dialog_about_5': 'Website :',\n 'ui.dialog_about_6': 'Homepage:',\n 'ui.dialog_about_7': \"\"\"\n <p style=\"text-align: center; font-size: 16px; font-weight: bold;\">Introduction</p>\n <p style=\"text-align: justify;\">\n This is a specialized comparison tool for configuration centers, capable of comparing configurations across different clusters, with a basic filtering and search functionality.\n </p>\n <p style=\"text-align: justify;\">\n Currently supported configuration centers and versions:\n </p>\n <p><b> Apollo:</b>2.0.0, 2.1.0</p>\n <p><b> Nacos:</b>v2.1.0</p>\n \n <p style=\"text-align: center; font-size: 16px; font-weight: bold;\">Getting Help</p>\n <p style=\"text-align: justify;\">\n Please refer to the project homepage or website for usage instructions before running. If you encounter errors during use, first check the logs for troubleshooting. For bugs and suggestions, please submit an issue on the project homepage.<br>\n </p>\n \n <p style=\"text-align: center; font-size: 16px; font-weight: bold;\">Build Tools</p>\n <p style=\"text-align: justify;\">\n ConfigCenterComparer was built using the following tools:\n </p>\n <p><b> Program:</b>Python 3.10.4</p>\n <p><b> Interface:</b>PyQT 5.15.10</p>\n <p><b> Icons:</b><a href='https://icons8.com/'>icons8.com</a></p>\n \"\"\"\n },\n '中文简体': {\n 'main_1': '准备就绪',\n 'main_2': '开始(&S)',\n 'main_3': '编辑(&E)',\n 'main_4': '选项(&O)',\n 'main_5': '帮助(&H)',\n 'label_status_error': '发生错误!',\n 'ui.action_exit_1': '退出程序',\n 'ui.action_exit_2': '立即退出程序',\n 'ui.action_debug_1': '调试程序',\n 'ui.action_debug_2': '调试程序...',\n 'ui.action_compare_1': '配置查重',\n 'ui.action_compare_2': '查找相同环境内重复配置',\n 'ui.action_compare_3': '值',\n 'ui.action_compare_4': '请先运行获取数据',\n 'ui.action_save_1': '导出列表',\n 'ui.action_save_2': '导出当前列表到文件',\n 'ui.action_save_3': '保存到文件',\n 'ui.action_save_5': '文件保存成功',\n 'ui.action_save_7': '文件保存失败。',\n 'ui.action_save_8': '获取表格数据失败。',\n 'ui.action_update_1': '检查更新',\n 'ui.action_update_2': '在线检查更新',\n 'ui.action_update_3': '检查更新失败!',\n 'ui.action_update_4': '有新版发布',\n 'ui.action_update_5': '不需要更新',\n 'ui.action_update_6': '当前版本:',\n 'ui.action_update_7': '最新版本:',\n 'ui.action_update_8': '检查更新中...',\n 'ui.action_update_9': '检查更新完成',\n 'ui.action_unskip_1': '取消忽略',\n 'ui.action_unskip_2': '选中配置项从忽略列表中移除',\n 'ui.action_unskip_3': '取消忽略完成',\n 'ui.action_skip_1': '加入忽略',\n 'ui.action_skip_2': '将选中配置项标记为忽略',\n 'ui.action_skip_3': '已加入忽略列表',\n 'ui.action_about_1': '关于软件',\n 'ui.action_about_2': '软件相关信息',\n 'ui.action_copy_1': '复制内容',\n 'ui.action_copy_2': '复制选择的内容',\n 'ui.action_logs_1': '查看日志',\n 'ui.action_logs_2': '打开日志文件',\n 'ui.action_setting_1': '程序设置',\n 'ui.action_setting_2': '打开程序设置对话框',\n 'ui.action_setting_connection_1': '连接配置',\n 'ui.action_setting_connection_2': '打开数据库连接配置对话框',\n 'ui.action_test_1': '测试连接',\n 'ui.action_test_2': '测试数据库连接',\n 'ui.action_test_3': '测试失败,发生异常。',\n 'ui.action_test_4': '环境:',\n 'ui.action_test_5': '连接成功!',\n 'ui.action_test_6': '连接失败!',\n 'ui.action_test_7': '测试数据库连接中...',\n 'ui.action_test_8': '测试连接完成',\n 'ui.action_test_9': '跳过测试!',\n 'ui.action_test_10': '测试结果:',\n 'ui.action_start_1': '开始运行',\n 'ui.action_start_2': '开始运行,从数据库中获取配置',\n 'ui.action_start_3': '正在查询数据库...',\n 'ui.action_start_4': '没有获取到任何配置,请检查数据库连接设置或查看错误日志。',\n 'ui.action_start_5': '把列表插入数据时出现错误,请在帮助中查看日志。',\n 'ui.action_start_6': '合成表格数据失败,请在帮助中查看日志。',\n 'ui.action_start_7': '运行发生严重错误,请在帮助中查看日志。',\n 'ui.action_start_8': '不一致',\n 'ui.action_start_9': '完全一致',\n 'ui.action_start_10': '部分一致',\n 'ui.action_start_11': '否',\n 'ui.action_start_12': '是',\n 'ui.action_start_13': '未知状态',\n 'ui.table_main_1': '服务',\n 'ui.table_main_2': '分组',\n 'ui.table_main_3': '配置键',\n 'ui.table_main_4': '修改时间',\n 'ui.table_main_5': '一致性',\n 'ui.table_main_6': '忽略',\n 'ui.filter_bar_1': '过滤服务:',\n 'ui.filter_bar_2': '快速过滤:',\n 'ui.filter_bar_3': '--显示所有--',\n 'ui.filter_bar_4': '完全一致',\n 'ui.filter_bar_5': '部分一致',\n 'ui.filter_bar_6': '已忽略',\n 'ui.filter_bar_7': '反选',\n 'ui.filter_bar_8': '全局搜索:',\n 'ui.filter_bar_9': '搜索',\n 'ui.filter_bar_10': '重置',\n 'ui.filter_bar_11': '条配置',\n 'ui.dialog_logs_1': '查看日志',\n 'ui.dialog_logs_2': '日志等级:',\n 'ui.dialog_logs_4': '提交反馈',\n 'ui.dialog_logs_5': '清空',\n 'ui.dialog_logs_6': '关闭',\n 'ui.dialog_logs_7': '刷新',\n 'ui.dialog_comparison_1': '配置环境比较结果',\n 'ui.dialog_comparison_2': '公共配置:',\n 'ui.dialog_comparison_3': '设置',\n 'ui.dialog_comparison_4': '搜索配置:',\n 'ui.dialog_comparison_5': '设置公共配置字体为红色,以方便查看',\n 'ui.dialog_comparison_6': '搜索配置键和值',\n 'ui.dialog_settings_main_1': '设置页面',\n 'ui.dialog_settings_main_2': '选择语言:',\n 'ui.dialog_settings_main_3': '选择配置中心类型:',\n 'ui.dialog_settings_main_4': '选择 Apollo 服务名字段:',\n 'ui.dialog_settings_main_5': '主设置',\n 'ui.dialog_settings_main_6': '替换服务名(原名),按空格分词:',\n 'ui.dialog_settings_main_7': '替换服务名(新名),按空格分词:',\n 'ui.dialog_settings_main_8': '裁剪服务名(前缀),按空格分词:',\n 'ui.dialog_settings_main_9': '裁剪服务名(后缀),按空格分词:',\n 'ui.dialog_settings_main_10': '服务名对齐',\n 'ui.dialog_settings_main_11': '确认',\n 'ui.dialog_settings_main_12': '取消',\n 'ui.dialog_settings_main_13': '配置保存成功',\n 'ui.dialog_settings_main_15': '修改语言需要重启软件才生效,立即重启吗?',\n 'ui.dialog_settings_main_16': '表格颜色开关,关闭可提升运行速度:',\n 'ui.dialog_settings_connection_1': '数据库配置',\n 'ui.dialog_settings_connection_2': '生产环境',\n 'ui.dialog_settings_connection_3': '预览环境',\n 'ui.dialog_settings_connection_4': '测试环境',\n 'ui.dialog_settings_connection_5': '开发环境',\n 'ui.dialog_settings_connection_6': 'SSH 隧道配置',\n 'ui.dialog_settings_connection_7': '启用:',\n 'ui.dialog_settings_connection_8': '地址:',\n 'ui.dialog_settings_connection_9': '端口:',\n 'ui.dialog_settings_connection_10': '用户名:',\n 'ui.dialog_settings_connection_11': '密码:',\n 'ui.dialog_settings_connection_12': '库名:',\n 'ui.dialog_settings_connection_13': 'MySQL 连接配置',\n 'ui.dialog_about_1': '关于',\n 'ui.dialog_about_2': '配置中心对比工具',\n 'ui.dialog_about_3': '版本:',\n 'ui.dialog_about_4': '作者:',\n 'ui.dialog_about_5': '网站:',\n 'ui.dialog_about_6': '主页:',\n 'ui.dialog_about_7': \"\"\"\n <p style=\"text-align: center; font-size: 16px; font-weight: bold;\">程序简介</p>\n <p style=\"text-align: justify;\">\n 这是一个配置中心专用对比工具,可支持不同集群中配置对比,并带有简单过滤搜索功能。\n </p>\n <p style=\"text-align: justify;\">\n 目前支持配置中心和版本:\n </p>\n <p><b> Apollo:</b>2.0.0、2.1.0</p>\n <p><b> Nacos:</b>v2.1.0</p>\n\n <p style=\"text-align: center; font-size: 16px; font-weight: bold;\">获取帮助</p>\n <p style=\"text-align: justify;\">\n 运行前请查阅项目主页或网站中使用说明。如使用中遇到错误,可先查看日志自行排错。Bug 和建议请到项目主页提交 issue。<br>\n </p>\n\n <p style=\"text-align: center; font-size: 16px; font-weight: bold;\">构建工具</p>\n <p style=\"text-align: justify;\">\n ConfigCenterComparer 构建用到以下工具:\n </p>\n <p><b> 程序:</b>Python 3.10.4</p>\n <p><b> 界面:</b>PyQT 5.15.10</p>\n <p><b> 图标:</b><a href='https://icons8.com/'>icons8.com</a></p>\n \"\"\",\n },\n}"
},
{
"identifier": "CONFIG_CENTER_LIST",
"path": "config/settings.py",
"snippet": "CONFIG_CENTER_LIST = ['Apollo', 'Nacos', ]"
},
{
"identifier": "APOLLO_NAME_LIST",
"path": "config/settings.py",
"snippet": "APOLLO_NAME_LIST = ['AppId', 'Name', ]"
},
{
"identifier": "COLOR_SET_LIST",
"path": "config/settings.py",
"snippet": "COLOR_SET_LIST = ['ON', 'OFF', ]"
},
{
"identifier": "get_resource_path",
"path": "lib/get_resource_path.py",
"snippet": "def get_resource_path(relative_path: Union[str, os.PathLike]) -> Optional[str]:\n \"\"\"\n 获取资源的绝对路径。这个函数适用于 PyInstaller 打包后的可执行文件。\n\n :type relative_path: Union[str, os.PathLike]\n :param relative_path: 相对路径,可以是字符串或 os.PathLike 对象。\n :rtype: Optional[str]\n :return: 资源的绝对路径,如果发生错误则返回 None。\n \"\"\"\n\n try:\n base_path = getattr(sys, '_MEIPASS', os.path.abspath(\".\"))\n return os.path.join(base_path, os.path.normpath(relative_path))\n except Exception:\n logger.exception(\"An error occurred while retrieving resource path\")\n return None"
},
{
"identifier": "ConfigManager",
"path": "ui/config_manager.py",
"snippet": "class ConfigManager(QObject):\n \"\"\"\n 配置管理器类,负责管理和更新应用程序的配置信息。\n\n 该类包括获取和设置主配置、连接配置和跳过列表的方法,同时提供信号以通知配置更新。\n\n :ivar config_main_updated: 当主配置更新时发出的信号。\n :ivar config_connection_updated: 当连接配置更新时发出的信号。\n :ivar skip_list_updated: 当跳过列表更新时发出的信号。\n \"\"\"\n config_main_updated = pyqtSignal()\n config_connection_updated = pyqtSignal()\n skip_list_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._config_main, self._config_apollo, self._config_nacos = read_config_all()\n self._skip_list = read_file_to_list(CONFIG_SKIP_PATH) or []\n\n def get_config_main(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取主配置的副本。\n\n :return: 包含主配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._config_main)\n except Exception:\n logger.exception(\"Failed to get config_main.\")\n return None\n\n def get_config_connection(self) -> Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]:\n \"\"\"\n 根据当前配置中心获取连接配置的副本。\n\n :return: 包含连接配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n return copy.deepcopy(self._config_apollo)\n else:\n return copy.deepcopy(self._config_nacos)\n except Exception:\n logger.exception(\"Failed to get config_connection.\")\n return None\n\n def get_skip_list(self) -> Optional[List[str]]:\n \"\"\"\n 获取忽略列表的副本。\n\n :return: 包含跳过项的列表,如果出现错误则返回 None。\n :rtype: Optional[List[str]]\n \"\"\"\n try:\n return copy.deepcopy(self._skip_list)\n except Exception:\n logger.exception(\"Failed to get skip_list.\")\n return None\n\n def update_config_main(self, new_config: Dict[str, str]) -> None:\n \"\"\"\n 更新主配置。\n\n :param new_config: 新的主配置。\n :type new_config: Dict[str, str]\n \"\"\"\n try:\n self._config_main = new_config\n self.config_main_updated.emit()\n write_dict_to_json(CONFIG_MAIN_PATH, new_config)\n logger.info(\"Config updated: config_main\")\n except Exception:\n logger.exception(\"Failed to update config: config_main\")\n\n def update_config_connection(self, new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]) -> None:\n \"\"\"\n 更新连接配置。\n\n :param new_config: 新的连接配置。\n :type new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n self._config_apollo = new_config\n write_dict_to_json(CONFIG_APOLLO_PATH, new_config)\n else:\n self._config_nacos = new_config\n write_dict_to_json(CONFIG_NACOS_PATH, new_config)\n self.config_connection_updated.emit()\n logger.info(\"Config updated: config_connection\")\n except Exception:\n logger.exception(\"Failed to update config: config_connection\")\n\n def update_skip_list(self, new_config: List[str]) -> None:\n \"\"\"\n 更新忽略列表。\n\n :param new_config: 新忽略列表。\n :type new_config: List[str]\n \"\"\"\n try:\n self._skip_list = new_config\n # 写入到配置文件\n self.skip_list_updated.emit()\n write_list_to_file(CONFIG_SKIP_PATH, new_config)\n logger.info(\"Config updated: skip_list\")\n except Exception:\n logger.exception(\"Failed to update config: skip_list\")"
},
{
"identifier": "LangManager",
"path": "ui/lang_manager.py",
"snippet": "class LangManager(QObject):\n \"\"\"\n 语言管理类,用于管理和更新应用程序的语言字典。\n\n 此类继承自 QObject,可发出语言更新的信号。它通过 `get_lang_dict` 函数获取当前语言字典,并提供了更新语言的功能。\n\n :ivar _lang_dict: 当前使用的语言字典。\n :vartype _lang_dict: dict\n \"\"\"\n lang_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._lang_dict = get_lang_dict()\n\n def get_lang(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取当前使用的语言字典的副本。\n\n :return: 当前语言字典的深拷贝。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._lang_dict)\n except Exception:\n logger.exception(\"Failed to retrieve language dictionary.\")\n return None\n\n def update_lang(self, new_lang: str) -> None:\n \"\"\"\n 更新当前使用的语言字典。\n\n :param new_lang: 新语言的标识符。\n :type new_lang: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n self._lang_dict = LANG_DICTS.get(new_lang, \"English\")\n self.lang_updated.emit()\n logger.info(f\"Language changed to {new_lang}\")\n except Exception:\n logger.exception(f\"Failed to changed language to {new_lang}\")"
}
] | import logging
from typing import List, Tuple
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QDialog, QLineEdit, QDialogButtonBox, QHBoxLayout, QVBoxLayout, QGroupBox, QLabel, QComboBox
from config.lang_dict_all import LANG_DICTS
from config.settings import CONFIG_CENTER_LIST, APOLLO_NAME_LIST, COLOR_SET_LIST
from lib.get_resource_path import get_resource_path
from ui.config_manager import ConfigManager
from ui.lang_manager import LangManager | 7,562 | """
此模块提供了一个对话框界面,用于处理应用程序的主要设置。
包括语言设置、配置中心类型选择、服务名替换规则等功能。用户可以通过此对话框修改各项设置,并将其保存到配置文件中。
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
logger = logging.getLogger(__name__)
class DialogSettingsMain(QDialog):
"""
主设置对话框类。
此类提供了一个对话框界面,供用户修改应用程序的主要设置,例如语言、配置中心类型、服务名替换规则等。
它允许用户对这些设置进行更改,并通过按下确定按钮来保存这些更改。
:param lang_manager: 语言管理器实例。
:type lang_manager: LangManager
:param config_manager: 配置管理器实例。
:type config_manager: ConfigManager
:ivar status_updated: 用于发出状态更新信号的pyqtSignal实例。
:vartype status_updated: pyqtSignal
"""
status_updated = pyqtSignal(str)
def __init__(self,
lang_manager: LangManager,
config_manager: ConfigManager):
super().__init__(flags=Qt.Dialog | Qt.WindowCloseButtonHint)
# 初始化两个管理器
self.lang_manager = lang_manager
self.config_manager = config_manager
# 获取管理器中的配置
self.config_main = self.config_manager.get_config_main()
# 获取语言字典
self.lang = self.lang_manager.get_lang()
self.initUI()
def initUI(self) -> None:
"""
初始化用户界面。
此方法设置对话框的标题、图标、样式和大小,并创建主布局。在主布局中,它添加了主要设置组和额外设置组的布局,并在两者之间添加了弹性空间。最后,添加了按钮布局。
主要设置组包括语言选择、配置中心类型选择等,而额外设置组包括服务名替换规则的设置。此方法利用私有方法 `_create_main_group` 和 `_create_extra_group` 来创建这些组。
:return: 无返回值。
:rtype: None
"""
# 主窗口
self.setWindowTitle(self.lang['ui.dialog_settings_main_1'])
self.setWindowIcon(QIcon(get_resource_path('media/icons8-setting-26')))
self.setStyleSheet("font-size: 14px;")
self.setMinimumSize(370, 490)
# 主布局
layout = QVBoxLayout()
# 上层布局
layout.addWidget(self._create_main_group())
# 下层布局
layout.addWidget(self._create_extra_group())
# 在两个组件之间添加弹性空间
layout.addStretch()
# 按钮布局
layout.addLayout(self._create_buttons())
self.setLayout(layout)
def _create_main_group(self) -> QGroupBox:
"""
创建并返回主要设置组的布局。
此私有方法用于构建对话框中的主要设置组,包括语言选择、配置中心类型选择等。
:return: 配置好的主要设置组。
:rtype: QGroupBox
"""
main_layout = QVBoxLayout()
# 下拉框:选择语言
| """
此模块提供了一个对话框界面,用于处理应用程序的主要设置。
包括语言设置、配置中心类型选择、服务名替换规则等功能。用户可以通过此对话框修改各项设置,并将其保存到配置文件中。
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
logger = logging.getLogger(__name__)
class DialogSettingsMain(QDialog):
"""
主设置对话框类。
此类提供了一个对话框界面,供用户修改应用程序的主要设置,例如语言、配置中心类型、服务名替换规则等。
它允许用户对这些设置进行更改,并通过按下确定按钮来保存这些更改。
:param lang_manager: 语言管理器实例。
:type lang_manager: LangManager
:param config_manager: 配置管理器实例。
:type config_manager: ConfigManager
:ivar status_updated: 用于发出状态更新信号的pyqtSignal实例。
:vartype status_updated: pyqtSignal
"""
status_updated = pyqtSignal(str)
def __init__(self,
lang_manager: LangManager,
config_manager: ConfigManager):
super().__init__(flags=Qt.Dialog | Qt.WindowCloseButtonHint)
# 初始化两个管理器
self.lang_manager = lang_manager
self.config_manager = config_manager
# 获取管理器中的配置
self.config_main = self.config_manager.get_config_main()
# 获取语言字典
self.lang = self.lang_manager.get_lang()
self.initUI()
def initUI(self) -> None:
"""
初始化用户界面。
此方法设置对话框的标题、图标、样式和大小,并创建主布局。在主布局中,它添加了主要设置组和额外设置组的布局,并在两者之间添加了弹性空间。最后,添加了按钮布局。
主要设置组包括语言选择、配置中心类型选择等,而额外设置组包括服务名替换规则的设置。此方法利用私有方法 `_create_main_group` 和 `_create_extra_group` 来创建这些组。
:return: 无返回值。
:rtype: None
"""
# 主窗口
self.setWindowTitle(self.lang['ui.dialog_settings_main_1'])
self.setWindowIcon(QIcon(get_resource_path('media/icons8-setting-26')))
self.setStyleSheet("font-size: 14px;")
self.setMinimumSize(370, 490)
# 主布局
layout = QVBoxLayout()
# 上层布局
layout.addWidget(self._create_main_group())
# 下层布局
layout.addWidget(self._create_extra_group())
# 在两个组件之间添加弹性空间
layout.addStretch()
# 按钮布局
layout.addLayout(self._create_buttons())
self.setLayout(layout)
def _create_main_group(self) -> QGroupBox:
"""
创建并返回主要设置组的布局。
此私有方法用于构建对话框中的主要设置组,包括语言选择、配置中心类型选择等。
:return: 配置好的主要设置组。
:rtype: QGroupBox
"""
main_layout = QVBoxLayout()
# 下拉框:选择语言 | self.language_combo_box = self._create_combo_box(main_layout, LANG_DICTS.keys(), self.lang['ui.dialog_settings_main_2'], self.config_main.get('lang', 'English')) | 0 | 2023-11-07 01:02:38+00:00 | 12k |
pytorch-labs/ao | test/test.py | [
{
"identifier": "DynamicallyPerAxisQuantizedLinear",
"path": "torchao/quantization/dynamic_quant.py",
"snippet": "class DynamicallyPerAxisQuantizedLinear(torch.nn.Linear):\n \"\"\"\n This class is a replacement for `torch.nn.Linear`. It implements a\n quantized matmul using int8 dynamic symmetric per-token activation,\n and int8 symmetric per-channel weight quantization\n \"\"\"\n\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n ) -> None:\n super().__init__(in_features, out_features, bias)\n\n def forward(self, X: torch.Tensor, *args, **kwargs) -> torch.Tensor:\n \"\"\"\n Performs the forward pass of the quantized linear layer which consists\n of int8 dynamic symmetric per-token activation and int8 symmetric per-channel weight\n quantization\n\n Args:\n X (torch.Tensor): The input floating point tensor to the quantized linear layer.\n\n Returns:\n torch.Tensor: The output floating point tensor after the quantized matmul and rescale.\n\n \"\"\"\n\n Y = quant_int8_dynamic_per_token_linear(\n X, self.W_int_repr_t, self.W_scales, self.bias, X.dtype\n )\n return Y\n\n @classmethod\n def from_float(\n cls, mod: torch.nn.Linear\n ) -> \"DynamicallyPerAxisQuantizedLinear\":\n \"\"\"\n Converts a `mod` of class `torch.nn.Linear` to the\n `DynamicallyPerAxisQuantizedLinear` class\n\n Args:\n mod (torch.nn.Linear): The original `torch.nn.Linear` module to convert.\n\n Returns:\n DynamicallyPerAxisQuantizedLinear: The converted quantized linear module.\n\n \"\"\"\n\n # create the new module with a toy size to ensure initialization is fast\n fake_in_features, fake_out_features = 8, 8\n new_mod = cls(\n fake_in_features,\n fake_out_features,\n bias=mod.bias is not None,\n )\n new_mod.in_features = mod.in_features\n new_mod.out_features = mod.out_features\n W_int_repr, W_scales, _W_zps = dynamically_quantize_per_channel(\n mod.weight, -128, 127, torch.int8\n )\n new_mod.register_buffer(\"W_int_repr_t\", W_int_repr.contiguous().t())\n new_mod.W_scales = nn.Parameter(W_scales)\n new_mod.bias = mod.bias\n del new_mod.weight\n\n device_to_use = next(mod.parameters()).device\n new_mod.to(device_to_use)\n return new_mod"
},
{
"identifier": "apply_dynamic_quant",
"path": "torchao/quantization/quant_api.py",
"snippet": "def apply_dynamic_quant(model, filter_fn=None):\n \"\"\"\n Applies dynamic symmetric per-token activation and per-channel weight\n quantization to all linear layers in the given model using\n module swaps.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n lambda mod: DynamicallyPerAxisQuantizedLinear.from_float(mod),\n _is_linear if filter_fn is None else filter_fn,\n )"
},
{
"identifier": "apply_weight_only_int8_quant",
"path": "torchao/quantization/quant_api.py",
"snippet": "def apply_weight_only_int8_quant(model, filter_fn=None):\n \"\"\"\n Applies weight-only symmetric per-channel int8 quantization to all linear layers\n in the given model using module swaps.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n WeightOnlyInt8QuantLinear.from_float,\n _is_linear if filter_fn is None else filter_fn,\n )"
},
{
"identifier": "change_linear_weights_to_int8_dqtensors",
"path": "torchao/quantization/quant_api.py",
"snippet": "def change_linear_weights_to_int8_dqtensors(model, filter_fn=None):\n \"\"\"\n Converts all linear weight tensors to the `Int8DynamicallyQuantizedLinearWeight`\n Tensor subclass, effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n if filter_fn is None:\n filter_fn = (\n lambda *args:\n _is_linear(*args) and\n _in_features_greater_than_16(*args)\n )\n\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int8DynamicallyQuantizedLinearWeight),\n filter_fn\n )"
},
{
"identifier": "change_linear_weights_to_int8_woqtensors",
"path": "torchao/quantization/quant_api.py",
"snippet": "def change_linear_weights_to_int8_woqtensors(model, filter_fn=None):\n \"\"\"\n Converts all linear weight tensors to the\n `Int8WeightOnlyQuantizedLinearWeight` tensor subclass,\n effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int8WeightOnlyQuantizedLinearWeight),\n _is_linear if filter_fn is None else filter_fn,\n )"
},
{
"identifier": "change_linear_weights_to_int4_woqtensors",
"path": "torchao/quantization/quant_api.py",
"snippet": "def change_linear_weights_to_int4_woqtensors(model, **kwargs):\n \"\"\"\n Converts all linear weight tensors to the\n `Int4WeightOnlyQuantizedLinearWeight` tensor subclass,\n effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n filter_fn = kwargs.pop(\"filter_fn\", _is_linear)\n\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int4WeightOnlyQuantizedLinearWeight, **kwargs),\n filter_fn,\n )"
},
{
"identifier": "_replace_with_custom_fn_if_matches_filter",
"path": "torchao/quantization/quant_api.py",
"snippet": "def _replace_with_custom_fn_if_matches_filter(\n model, replacement_fn, filter_fn, cur_fqn=\"\"\n) -> None:\n \"\"\"\n For each `child` in `model`, replaces it with `replacement_fn(child)`\n if `filter_fn(child)` is `True`\n \"\"\"\n if filter_fn(model, cur_fqn[:-1]):\n model = replacement_fn(model)\n return model\n else:\n for name, child in model.named_children():\n new_child = _replace_with_custom_fn_if_matches_filter(\n child, replacement_fn, filter_fn, f\"{cur_fqn}{name}.\"\n )\n if new_child is not child:\n setattr(model, name, new_child)\n return model"
},
{
"identifier": "dequantize_per_channel",
"path": "torchao/quantization/quant_primitives.py",
"snippet": "def dequantize_per_channel(int_repr, scales, zero_points, out_dtype=torch.float32):\n # assumes axis is 0\n y = int_repr.transpose(0, 1)\n y = y.to(out_dtype)\n y = y - zero_points\n y = y * scales\n y = y.transpose(0, 1)\n return y"
},
{
"identifier": "dequantize_per_tensor",
"path": "torchao/quantization/quant_primitives.py",
"snippet": "def dequantize_per_tensor(int_repr, scale, zero_point, out_dtype=torch.float32):\n y = int_repr.to(out_dtype)\n if zero_point is not None:\n y -= zero_point\n return y * scale"
},
{
"identifier": "dynamically_quantize_per_channel",
"path": "torchao/quantization/quant_primitives.py",
"snippet": "def dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype):\n # assumes symmetric quantization\n # assumes axis == 0\n # assumes dense memory format\n # TODO(future): relax ^ as needed\n\n # default setup for affine quantization of activations\n eps = torch.finfo(torch.float32).eps\n\n # get min and max\n min_val, max_val = torch.aminmax(x, dim=1)\n\n # calculate scale and zero point based on min and max\n # reference: https://fburl.com/code/srbiybme\n min_val_neg = torch.min(min_val, torch.zeros_like(min_val))\n max_val_pos = torch.max(max_val, torch.zeros_like(max_val))\n device = min_val_neg.device\n\n # reference: https://fburl.com/code/4wll53rk\n max_val_pos = torch.max(-min_val_neg, max_val_pos)\n scale = max_val_pos / (float(quant_max - quant_min) / 2)\n # ensure scale is the same dtype as the original tensor\n scale = torch.clamp(scale, min=eps).to(x.dtype)\n zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device)\n\n # quantize based on qmin/qmax/scale/zp\n # reference: torch/ao/quantization/fx/_decomposed.py?lines=63\n x_div = x.transpose(0, 1) / scale\n x_round = torch.round(x_div)\n x_zp = x_round + zero_point\n x_zp = x_zp.transpose(0, 1)\n quant = torch.clamp(x_zp, quant_min, quant_max).to(target_dtype)\n\n return quant, scale, zero_point"
},
{
"identifier": "dynamically_quantize_per_tensor",
"path": "torchao/quantization/quant_primitives.py",
"snippet": "def dynamically_quantize_per_tensor(\n x,\n quant_min,\n quant_max,\n target_dtype,\n qscheme=torch.per_tensor_affine, # for now, reuse existing qscheme enum\n):\n # assumes affine quantization\n\n # default setup for affine quantization of activations\n eps = torch.finfo(torch.float32).eps\n\n if qscheme == torch.per_tensor_affine:\n # get min and max\n # TODO(future): make torch.aminmax work on cpu-half\n # min_val, max_val = torch.aminmax(x)\n min_val = torch.min(x)\n max_val = torch.max(x)\n\n # calculate scale and zero point based on min and max\n # reference: https://fburl.com/code/srbiybme\n min_val_neg = torch.min(min_val, torch.zeros_like(min_val))\n max_val_pos = torch.max(max_val, torch.zeros_like(max_val))\n device = min_val_neg.device\n\n scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min)\n # TODO(future): make torch.clamp with scalar work on cpu-half\n scale = torch.clamp(scale, min=eps).reshape(1)\n zero_point = quant_min - torch.round(min_val_neg / scale).to(torch.int)\n zero_point = torch.clamp(zero_point, quant_min, quant_max)\n\n # quantize based on qmin/qmax/scale/zp\n # reference: torch/ao/quantization/fx/_decomposed.py?lines=63\n quant = torch.clamp(\n torch.round(x / scale) + zero_point, quant_min, quant_max\n ).to(target_dtype)\n\n else:\n assert qscheme == torch.per_tensor_symmetric, f\"unsupported qscheme {qscheme}\"\n # assert quant_min == -1 * quant_max, \"unsupported quant_min/quant_max\"\n amax = torch.max(torch.abs(x))\n scale = amax / (float(quant_max - quant_min) / 2)\n scale = torch.clamp(scale, min=eps).reshape(1)\n quant = torch.clamp(torch.round(x / scale), quant_min, quant_max).to(\n target_dtype\n )\n # do not create a tensor for zero_point as this is expensive\n zero_point = None\n\n return quant, scale, zero_point"
},
{
"identifier": "quant_int8_dynamic_linear",
"path": "torchao/quantization/quant_primitives.py",
"snippet": "def quant_int8_dynamic_linear(\n x,\n x_quant_min,\n x_quant_max,\n x_q_dtype,\n w_vals_int8_t,\n w_scales,\n w_vals_int8_t_sums_int64,\n bias,\n out_dtype=torch.float32,\n):\n # like F.linear, but with int8 dynamic quantization of activation,\n # and a quantized weight\n x_vals_int8, x_scale, x_zp = dynamically_quantize_per_tensor(\n x, x_quant_min, x_quant_max, x_q_dtype\n )\n # w_vals_int8_t_sums_int64 = w_vals_int8_t.sum(dim=0)\n mm_out = quant_int8_matmul(\n x_vals_int8,\n x_scale,\n x_zp,\n w_vals_int8_t,\n w_vals_int8_t_sums_int64,\n w_scales,\n out_dtype,\n )\n if bias is not None:\n mm_out += bias\n return mm_out"
},
{
"identifier": "quant_int8_dynamic_per_token_linear",
"path": "torchao/quantization/quant_primitives.py",
"snippet": "def quant_int8_dynamic_per_token_linear(\n x,\n w_vals_int8_t,\n w_scales,\n bias,\n out_dtype,\n):\n # like F.linear, but with int8 dynamic quantization of activation,\n # and a quantized weight\n x_vals_int8, x_scales = quantize_activation_per_token_absmax(x)\n mm_out = quant_int8_per_token_matmul(\n x_vals_int8, x_scales, w_vals_int8_t, w_scales, out_dtype\n )\n if bias is not None:\n mm_out += bias\n return mm_out"
},
{
"identifier": "quantize_activation_per_token_absmax",
"path": "torchao/quantization/quant_primitives.py",
"snippet": "def quantize_activation_per_token_absmax(t):\n n_bits = 8\n # if the shape of t is [B, N, K], the shape of scales will be [B, N, 1]\n\n scales = t.abs().amax(dim=-1, keepdim=True)\n if scales.dtype == torch.float16:\n scales = (\n scales.float()\n ) # want float scales to avoid overflows for fp16, (bf16 has wide enough range)\n q_max = 2 ** (n_bits - 1) - 1\n scales = scales.clamp(min=1e-5).div(q_max)\n # Note: the original smoothquant does not clamp to qmin/qmax here,\n # but some of the tests with bfloat16 ended up with a flipped sign\n # if we don't clamp. TODO(future) look into this further.\n t = torch.round(t / scales).clamp(-127, 127).to(torch.int8)\n return t, scales"
},
{
"identifier": "safe_int_mm",
"path": "torchao/quantization/quant_primitives.py",
"snippet": "def safe_int_mm(input: torch.Tensor, mat2: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n This function wraps torch._int_mm and avoids several undesirable behaviors of the function for certain inputs while still\n returning correct results and being torch.compiled in a performant way.\n\n Assumes both tensors have dimension of 2.\n\n Note: no error checking for torch.compiled path, if input.shape = [i, j] and j<=16 then the triton kernel\n will error.\n\n Args:\n input (Tensor, int8): the first tensor to be multiplied\n mat2 (Tensor, int8): the second tensor to be multiplied\n\n Return:\n out (Tensor, int32): the result of the matmul with device matching that of the inputs\n \"\"\"\n\n # torch.compile path\n if dynamo_is_compiling() or \"FakeTensor\" in input.__repr__():\n return out_dtype(torch.ops.aten.mm.default, torch.int32, input, mat2)\n\n # error checking for cublas path\n assert (\n mat2.device == input.device\n ), f\"need both tensors to be on the same device but got {mat2.device} and {input.device}\"\n device_cpu = \"cpu\" in [mat2.device.type, input.device.type]\n # with input.shape = [i,j] and mat2.shape = [j,k]\n i_is_strictly_greater_than_16 = input.shape[0] > 16\n j_is_nonzero_multiple_of_8 = (input.shape[1] % 8 == 0) and (input.shape[1] > 0)\n k_is_nonzero_multiple_of_8 = (mat2.shape[1] % 8 == 0) and (mat2.shape[1] > 0)\n bad_dimensions_for_cublas = not (\n i_is_strictly_greater_than_16\n and j_is_nonzero_multiple_of_8\n and k_is_nonzero_multiple_of_8\n )\n\n if device_cpu or bad_dimensions_for_cublas:\n # fallback path\n return torch.matmul(input.cpu().to(torch.int32), mat2.cpu().to(torch.int32)).to(\n input.device.type\n )\n\n # cublas paths\n if not mat2.is_contiguous(): # silently gives incorrect result without this\n mat2 = mat2.contiguous()\n if (not input.is_contiguous()) and (\n input.shape[0] % 8 != 0\n ): # gives cryptic error without this\n input = (\n input.contiguous()\n ) # (it seems the transpose makes cublas check the above j constraint on i)\n return out_dtype(torch.ops.aten.mm.default, torch.int32, input, mat2)"
},
{
"identifier": "get_scale",
"path": "torchao/quantization/smoothquant.py",
"snippet": "def get_scale(X_absmax, W_absmax, alpha=0.5):\n \"\"\"\n Calculate the scale based on abs(max(X)), abs(max(W)) and alpha\n If X is of dimension `b*n*k` and W is dimension `k*m`, the returned\n scale is of dimension `k`.\n Note: X_absmax is calculated outside of this function because we\n need to keep a running version of it during calibration. W_absmax\n is calculated outside of this function for consistency with X_absmax.\n \"\"\"\n X_pow = torch.pow(X_absmax, alpha)\n W_pow = torch.pow(W_absmax, 1.0 - alpha)\n div = X_pow / W_pow\n return div.reshape(-1)"
},
{
"identifier": "smooth_fq_linear_to_inference",
"path": "torchao/quantization/smoothquant.py",
"snippet": "def smooth_fq_linear_to_inference(model, debug_skip_calibration=False) -> None:\n for _, mod in model.named_modules():\n if isinstance(mod, tuple(source_cls_to_target_cls.values())):\n if debug_skip_calibration:\n mod.set_debug_x_absmax()\n mod.to_inference()"
},
{
"identifier": "SmoothFakeDynamicallyQuantizedLinear",
"path": "torchao/quantization/smoothquant.py",
"snippet": "class SmoothFakeDynamicallyQuantizedLinear(SmoothFakeDynQuantMixin, torch.nn.Linear):\n \"\"\"\n This is a replacement for `torch.nn.Linear` which implements dynamic per-token\n activation quantization and dynamic per-channel weight quantization based on\n Smoothquant scaling.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n alpha = kwargs.pop(\"alpha\")\n super().__init__(*args, **kwargs)\n self.init_smoothquant_variables(alpha)\n\n def forward(self, X, *args, **kwargs):\n if self.calibrating:\n self.update_x_running_abs_max(X)\n Y = F.linear(X, self.weight, self.bias)\n else:\n if not self.debug_skip_scaling:\n # Ideally this would be fused into preceding layers\n # but in practice torch.compile fuses it with other\n # ops so the slowdown is minimal\n X = X / self.smooth_scale\n W_int_repr_t = (\n self.W_int_repr if self.store_w_int_repr_t else self.W_int_repr.t()\n )\n Y = quant_int8_dynamic_per_token_linear(\n X, W_int_repr_t, self.W_scales, self.bias, X.dtype\n )\n return Y\n\n @classmethod\n def from_float(cls, mod, alpha=0.5):\n \"\"\"\n Converts a `mod` of class `torch.nn.Linear` to the smooth fake quantized\n version of it. Note: requires calibration.\n \"\"\"\n # create the new module with a toy size to ensure initialization is fast\n fake_in_features, fake_out_features = 8, 8\n new_mod = cls(\n fake_in_features, fake_out_features, bias=mod.bias is not None, alpha=alpha\n )\n new_mod.in_features = mod.in_features\n new_mod.out_features = mod.out_features\n new_mod.weight = mod.weight\n new_mod.bias = mod.bias\n # TODO: test when creation is on cuda\n device_to_use = next(mod.parameters()).device\n new_mod.to(device_to_use)\n return new_mod\n\n def to_inference(self):\n \"\"\"\n Calculates the smoothquant scale based on calibration\n in preparation for inference\n \"\"\"\n assert self.x_running_abs_max is not None, \"no calibration data found\"\n self.calibrating = False\n self.smooth_scale = get_scale(\n self.x_running_abs_max,\n torch.max(torch.abs(self.weight.transpose(0, 1)), dim=1).values,\n alpha=self.alpha,\n )\n self.fold_weight()\n\n def set_debug_x_absmax(self):\n w_absmax = torch.max(torch.abs(self.weight.transpose(0, 1)), dim=1).values\n self.x_running_abs_max = w_absmax"
},
{
"identifier": "swap_linear_with_smooth_fq_linear",
"path": "torchao/quantization/smoothquant.py",
"snippet": "def swap_linear_with_smooth_fq_linear(\n model, skip_fqn_list=None, cur_fqn=\"\", alpha=0.5\n) -> None:\n\n name_to_child = dict(model.named_children())\n for name, child in name_to_child.items():\n if cur_fqn == \"\":\n new_fqn = name\n else:\n new_fqn = f\"{cur_fqn}.{name}\"\n if ((skip_fqn_list is None) or (new_fqn not in skip_fqn_list)) and (\n type(child) in source_cls_to_target_cls.keys()\n ):\n target_cls = source_cls_to_target_cls[type(child)]\n new_child = target_cls.from_float(child, alpha=alpha)\n setattr(model, name, new_child)\n else:\n swap_linear_with_smooth_fq_linear(child, skip_fqn_list, new_fqn, alpha)"
},
{
"identifier": "Int8DynamicallyQuantizedLinearWeight",
"path": "torchao/quantization/subclass.py",
"snippet": "class Int8DynamicallyQuantizedLinearWeight(QuantizedLinearWeightBase):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module, changes the\n linear op to a dynamically quantized linear op with symmetric per-token and per-channel\n quantization on the activation and weight respectively.\n \"\"\"\n\n @staticmethod\n def __new__(cls, int_data, q_scales, transposed, shape, **kwargs):\n kwargs[\"dtype\"] = kwargs.get(\"dtype\", q_scales.dtype)\n return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined]\n\n def __init__(self, int_data, q_scales, transposed, shape, **kwargs):\n self.q_scales = q_scales\n super().__init__(int_data, transposed)\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n return quant_int8_dynamic_per_token_linear(\n act_mat, w_qtensor.int_data, w_qtensor.q_scales, bias, act_mat.dtype\n )\n\n def dequantize(self, dtype=None):\n \"\"\"\n Obtain the dequantized version of the quantized tensor subclass\n \"\"\"\n dq_t = dequantize_per_channel(\n self.int_data.t(), self.q_scales, 0, self.dtype if dtype is None else dtype\n ).to(self.dtype)\n # data was transposed to dequantize so make sure shape is correct\n return dq_t if not self.transposed else dq_t.t()\n\n def int_repr(self):\n \"\"\"\n Get the internal integer representation of the quantized tensor\n \"\"\"\n return self.int_data if self.transposed else self.int_data.t()\n\n def q_params(self):\n \"\"\"\n Get the quantization scales for the quantized tensor\n \"\"\"\n return {\"q_scales\": self.q_scales}\n\n def to(self, *args, **kwargs):\n kwargs = self._get_to_kwargs(*args, **kwargs)\n return self.__class__(\n self.int_data.to(kwargs[\"device\"]),\n self.q_scales.to(kwargs[\"device\"]),\n self.transposed,\n self.shape,\n **kwargs,\n )\n\n def _apply_fn_to_data(self, fn):\n return self.__class__(\n fn(self.int_data), fn(self.q_scales), self.transposed, self.shape, dtype=self.dtype\n )\n\n def _change_shape(self, shape):\n return self.__class__(\n self.int_data, self.q_scales, self.transposed, shape, dtype=self.dtype\n )\n\n def __tensor_flatten__(self):\n return [\"int_data\", \"q_scales\"], [self.transposed, self.dtype, self.shape]\n\n @classmethod\n def __tensor_unflatten__(cls, tensor_data_dict, tensor_attributes, outer_size=None, outer_stride=None):\n int_data, q_scales = tensor_data_dict[\"int_data\"], tensor_data_dict[\"q_scales\"]\n transposed, dtype, shape = tensor_attributes\n return cls(int_data, q_scales, transposed, shape if outer_size is None else outer_size, dtype=dtype, strides=outer_stride)\n\n @classmethod\n def from_float(cls, input_float, qmin=-128, qmax=127):\n \"\"\"\n Method used to convert a linear weight tensor to an instance of the\n Int8DynamicallyQuantizedLinearWeight subclass.\n\n Example usage::\n\n model.lin_mod.weight = (\n Int8DynamicallyQuantizedLinearWeight.from_float(model.lin_mod.weight)\n )\n \"\"\"\n w_int_repr, w_scales, _ = dynamically_quantize_per_channel(\n input_float, qmin, qmax, torch.int8\n )\n # the desired representation shape for fast quantized matmul is\n # transposed compared to how it's stored as a linear weight,\n # i.e. we want in_channels as dim=0 and out_channels (and quantized axis) as dim=1\n # however the external representation of our tensor will maintain the correct\n # shape attribute which needs to be tracked directly.\n int_data = w_int_repr.contiguous().t()\n if cls is not Int8DynamicallyQuantizedLinearWeight:\n int_data = int_data.contiguous()\n return cls(\n int_data, w_scales, False, input_float.shape, dtype=input_float.dtype\n )"
},
{
"identifier": "Int8WeightOnlyQuantizedLinearWeight",
"path": "torchao/quantization/subclass.py",
"snippet": "class Int8WeightOnlyQuantizedLinearWeight(Int8DynamicallyQuantizedLinearWeight):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module,\n changes the linear op to a weight-only quantized linear op with symmetric\n per-channel quantization on the weight.\n \"\"\"\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n orig_dtype = act_mat.dtype\n y = torch.mm(act_mat.reshape(-1, act_mat.shape[-1]), w_qtensor.int_data.to(act_mat.dtype)) * w_qtensor.q_scales\n y = y.reshape(*act_mat.shape[:-1], y.shape[-1])\n if bias is not None:\n y += bias\n return y.to(orig_dtype)"
},
{
"identifier": "Int4WeightOnlyQuantizedLinearWeight",
"path": "torchao/quantization/subclass.py",
"snippet": "class Int4WeightOnlyQuantizedLinearWeight(QuantizedLinearWeightBase):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module,\n changes that linear op to a weight-only int4 quantized linear op with groupwise\n affine quantization on the weight.\n \"\"\"\n\n @staticmethod\n def __new__(\n cls,\n int_data,\n scales_and_zeros,\n transposed,\n shape,\n groupsize=128,\n inner_k_tiles=8,\n **kwargs,\n ):\n kwargs[\"dtype\"] = kwargs.get(\"dtype\", scales_and_zeros.dtype)\n return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined]\n\n def __init__(\n self,\n int_data,\n scales_and_zeros,\n transposed,\n shape,\n groupsize,\n inner_k_tiles,\n **kwargs,\n ):\n # the transposed flag tracks whether the tensor subclass has been transposed relative\n # to how a weight is normally stored in a linear i.e. [out_features, in_features].\n # tracking both transposed and shape is slightly redundant but corner cases like\n # square matrices can cause issues otherwise\n self.scales_and_zeros = scales_and_zeros\n self.groupsize = groupsize\n self.inner_k_tiles = inner_k_tiles\n super().__init__(int_data, transposed)\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n orig_act_size = act_mat.size()\n orig_dtype = act_mat.dtype\n\n # reshape and pad activation\n act_mat = act_mat.reshape(-1, act_mat.shape[-1]).to(torch.bfloat16)\n pad_size = find_multiple(act_mat.shape[-1], 1024)\n act_mat = torch.nn.functional.pad(act_mat, (0, pad_size - act_mat.shape[-1]))\n\n # matmul\n y = aten._weight_int4pack_mm(\n act_mat.contiguous(), w_qtensor.int_data, w_qtensor.groupsize, w_qtensor.scales_and_zeros\n )\n\n # remove out_feature padding\n orig_out_features = w_qtensor.shape[-1] if w_qtensor.transposed else w_qtensor.shape[-2]\n y = y[:, :orig_out_features]\n\n y = y.reshape(*orig_act_size[:-1], orig_out_features)\n if bias is not None:\n y += bias\n return y.to(orig_dtype)\n\n def dequantize(self):\n eye_shape = self.shape[1] if not self.transposed else self.shape[0]\n w_dq = self._quantized_op(\n torch.eye(eye_shape, device=self.device, dtype=self.dtype), self, None\n )\n # we dequantized using linear with the identity matrix, output has shape [in_channels, out_channels]\n # so we need to transpose back to get the original shape unless self.transposed is set.\n w_dq = w_dq if self.transposed else w_dq.t()\n return w_dq.to(self.dtype)\n\n def int_repr(self):\n return self.int_data\n\n def q_params(self):\n scales, zero_points = unpack_tinygemm_scales_and_zeros(\n self.scales_and_zeros,\n )\n return {\"q_scales\": scales, \"q_zero_points\": zero_points}\n\n def to(self, *args, **kwargs):\n kwargs = self._get_to_kwargs(*args, **kwargs)\n return self.__class__(\n self.int_data.to(kwargs[\"device\"]),\n self.scales_and_zeros.to(kwargs[\"device\"]),\n self.transposed,\n self.shape,\n self.groupsize,\n self.inner_k_tiles,\n **kwargs,\n )\n\n def _apply_fn_to_data(self, fn):\n return self.__class__(\n fn(self.int_data),\n fn(self.scales_and_zeros),\n self.transposed,\n self.shape,\n self.groupsize,\n self.inner_k_tiles,\n dtype=self.dtype,\n )\n\n def _change_shape(self, shape):\n return self.__class__(\n self.int_data,\n self.scales_and_zeros,\n self.transposed,\n shape,\n self.groupsize,\n self.inner_k_tiles,\n dtype=self.dtype\n )\n\n def __tensor_flatten__(self):\n return [\"int_data\", \"scales_and_zeros\"], (\n self.transposed,\n self.groupsize,\n self.inner_k_tiles,\n self.dtype,\n self.shape\n )\n\n @classmethod\n def __tensor_unflatten__(cls, tensor_data_dict, attributes, outer_size=None, outer_stride=None):\n int_data, scales_and_zeros = (\n tensor_data_dict[\"int_data\"],\n tensor_data_dict[\"scales_and_zeros\"],\n )\n transposed, groupsize, inner_k_tiles, dtype, shape = attributes\n return cls(\n int_data,\n scales_and_zeros,\n transposed,\n shape if outer_size is None else outer_size,\n groupsize,\n inner_k_tiles,\n dtype=dtype,\n strides=outer_stride,\n )\n\n @classmethod\n def from_float(cls, input_float, groupsize=128, inner_k_tiles=8):\n \"\"\"\n Method used to convert a linear weight tensor to an instance of the\n Int4WeightOnlyQuantizedLinearWeight subclass.\n\n Example usage::\n\n model.lin_mod.weight = (\n Int4WeightOnlyQuantizedLinearWeight.from_float(model.lin_mod.weight)\n )\n \"\"\"\n assert groupsize in [256, 128, 64, 32]\n assert inner_k_tiles in [8, 4, 2]\n orig_shape = input_float.shape\n orig_out_features, orig_in_features = input_float.shape\n\n # padding\n in_features = find_multiple(orig_in_features, 1024)\n out_features = find_multiple(orig_out_features, 8)\n input_float = torch.nn.functional.pad(\n input_float, (0, in_features - orig_in_features, 0, out_features - orig_out_features)\n )\n\n # quantization and packing\n input_int4x8, scales_and_zeros = groupwise_affine_quantize_tensor(\n input_float, 4, groupsize\n )\n int_data = aten._convert_weight_to_int4pack(\n input_int4x8, inner_k_tiles\n )\n\n return cls(\n int_data,\n scales_and_zeros,\n False,\n orig_shape,\n groupsize,\n inner_k_tiles,\n dtype=input_float.dtype,\n )"
},
{
"identifier": "_apply_logging_hook",
"path": "torchao/quantization/utils.py",
"snippet": "def find_multiple(n: int, k: int) -> int:\ndef compute_error(x, y):\ndef _get_logging_hook(fqn):\n def forward_hook(module, input):\ndef _apply_logging_hook(model):\n def __torch_dispatch__(self, func, types, args=(), kwargs=None):\ndef get_model_size_in_bytes(model):\nclass LoggingTensorMode(TorchDispatchMode):"
}
] | import copy
import unittest
import torch
import torch.nn as nn
import os
from torch._inductor.utils import run_and_get_code
from torch._dynamo import config
from torch.ao.quantization import MinMaxObserver, QConfigMapping
from torchao.quantization.dynamic_quant import (
DynamicallyPerAxisQuantizedLinear,
)
from torchao.quantization.quant_api import (
apply_dynamic_quant,
apply_weight_only_int8_quant,
change_linear_weights_to_int8_dqtensors,
change_linear_weights_to_int8_woqtensors,
change_linear_weights_to_int4_woqtensors,
_replace_with_custom_fn_if_matches_filter,
)
from torchao.quantization.quant_primitives import (
dequantize_per_channel,
dequantize_per_tensor,
dynamically_quantize_per_channel,
dynamically_quantize_per_tensor,
quant_int8_dynamic_linear,
quant_int8_dynamic_per_token_linear,
quantize_activation_per_token_absmax,
safe_int_mm,
)
from torchao.quantization.smoothquant import (
get_scale,
smooth_fq_linear_to_inference,
SmoothFakeDynamicallyQuantizedLinear,
swap_linear_with_smooth_fq_linear,
)
from torchao.quantization.subclass import (
Int8DynamicallyQuantizedLinearWeight,
Int8WeightOnlyQuantizedLinearWeight,
Int4WeightOnlyQuantizedLinearWeight
)
from torchao.quantization.utils import (
_apply_logging_hook,
compute_error,
compute_error as SQNR,
_fqn_to_op_to_shape_to_count,
LoggingTensorMode,
)
from torch.ao.quantization.quantize_fx import convert_to_reference_fx, prepare_fx
from transformers import ( # type: ignore[import-untyped]
DistilBertModel,
DistilBertTokenizer,
) | 9,994 | w_shape = (7, 9)
for i in range(3):
X = torch.randn(x_shape) * 10
W = torch.randn(w_shape)
s = get_scale(
torch.amax(torch.abs(X), dim=(0, 1)),
torch.amax(torch.abs(W), dim=1),
alpha=0.5,
)
Y = torch.matmul(X, W)
Y_ref = torch.matmul(
X / s.reshape(1, 1, -1),
torch.matmul(torch.diag(s), W),
)
assert torch.allclose(Y, Y_ref, atol=1e-3, rtol=1e-3), "not close!"
def _test_smooth_linear_impl(self, x_shape, lin_shape, device):
# so we can use the full range
torch.backends.quantized.engine = "qnnpack"
x = torch.randn(*x_shape, device=device) * 9 + 10
lin_fp32 = nn.Linear(*lin_shape, device=device) # misc: ignore
lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float(
copy.deepcopy(lin_fp32), alpha=0.25
)
lin_smooth_skip_scaling = SmoothFakeDynamicallyQuantizedLinear.from_float(
copy.deepcopy(lin_fp32), alpha=0.25
)
lin_fp32_copy = copy.deepcopy(lin_fp32) # assignment: ignore
lin_fp32_copy.qconfig = torch.ao.quantization.QConfig( # assignment: ignore
activation=None,
weight=torch.ao.quantization.default_per_channel_weight_observer,
)
lin_dynamic_q = torch.ao.nn.quantized.dynamic.Linear.from_float(
lin_fp32_copy.cpu()
)
y_ref = lin_fp32(x)
# calibrate the smoothquant versions
y_smooth_nocalib = lin_smooth(x)
_ = lin_smooth_skip_scaling(x)
lin_smooth.to_inference()
lin_smooth_skip_scaling.debug_skip_scaling = True
lin_smooth_skip_scaling.to_inference()
# verify that with scaling turned off, numerics match quantized version
y_smooth_fq_only = lin_smooth_skip_scaling(x)
y_smooth_fq = lin_smooth(x)
y_dynamic_q = lin_dynamic_q(x.cpu()).to(device)
# print('y_ref', y_ref)
# print('y_smooth_nocalib', y_smooth_nocalib)
# print('y_smooth_fq', y_smooth_fq)
# print('y_smooth_fq_only', y_smooth_fq_only)
# print('y_dynamic_q', y_dynamic_q)
sqnr_smooth_fq = compute_error(y_ref, y_smooth_fq)
sqnr_dynamic_q = compute_error(y_ref, y_dynamic_q)
sqnr_fq = compute_error(y_smooth_fq_only, y_dynamic_q)
# print('sqnr_smooth', sqnr_smooth_fq, 'sqnr_dynamic', sqnr_dynamic_q, 'sqnr_fq', sqnr_fq)
assert torch.allclose(
y_ref, y_smooth_nocalib
), "y_ref not close to y_smooth_nocalib"
# after https://github.com/pytorch-labs/ao_benchmarks/pull/32,
# numerics do not match exactly between production c++ code
# and this Python code
# assert torch.allclose(
# y_smooth_fq_only, y_dynamic_q,
# atol=torch.max(y_smooth_fq_only).item()*0.01,
# rtol=0.00001), \
# 'y_smooth_fq_only not close to y_dynamic_q'
self.assertTrue(sqnr_smooth_fq.item() >= 40.0)
self.assertTrue(sqnr_dynamic_q.item() >= 40.0)
self.assertTrue(sqnr_fq.item() >= 40.0)
def test_smooth_linear_cpu(self):
self._test_smooth_linear_impl((1, 5, 3), (3, 4), "cpu")
def test_smooth_linear_cuda(self):
if not torch.cuda.is_available():
print("no cuda, skip")
return
self._test_smooth_linear_impl((1, 32, 32), (32, 16), "cuda")
def test_smooth_linear_edge_cases(self):
# so we can use the full range
torch.backends.quantized.engine = "qnnpack"
lin_fp32 = nn.Linear(3, 4)
lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float(
lin_fp32, alpha=0.25
)
# test different ranks
x0 = torch.randn(4, 5, 3)
x1 = torch.randn(1, 8, 5, 3)
x2 = torch.randn(2, 3, 7, 5, 3)
# calibrate
_ = lin_smooth(x0)
_ = lin_smooth(x1)
_ = lin_smooth(x2)
# inference
lin_smooth.to_inference()
_ = lin_smooth(x0)
_ = lin_smooth(x1)
_ = lin_smooth(x2)
def test_swap(self):
m = nn.Sequential(
nn.Sequential(nn.Linear(4, 4), nn.ReLU(), nn.Linear(4, 4)),
nn.Linear(4, 4),
)
m_copy = copy.deepcopy(m)
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# mypy: ignore-errors
torch.manual_seed(0)
config.cache_size_limit = 100
class SmoothquantUnitTest(unittest.TestCase):
# first, let's reproduce the graphic from the paper, Figure 4, to ensure
# we are calculating the scales correctly
def test_figure_4(self):
X = torch.FloatTensor([1, -16, 2, 6, -2, 8, -1, -9]).reshape(1, 2, 4)
W = torch.FloatTensor([2, 1, -2, 1, -1, -1, 2, -1, -2, -1, -1, 1]).reshape(4, 3)
X_mul_W = torch.matmul(X, W)
smoothquant_scale = get_scale(
torch.amax(torch.abs(X), dim=(0, 1)),
torch.amax(torch.abs(W), dim=1),
alpha=0.5,
)
# reproduce scaled calculation
X_scaled = X / smoothquant_scale.reshape(1, 1, -1)
W_scaled = torch.matmul(torch.diag(smoothquant_scale), W)
X_scaled_mul_scaled_W = torch.matmul(X_scaled, W_scaled)
assert torch.allclose(X_mul_W, X_scaled_mul_scaled_W), "not close!"
assert X_mul_W.shape == X_scaled_mul_scaled_W.shape
# next, run the above test on a sample of representative inputs
def test_tensors(self):
x_shape = (1, 5, 7)
w_shape = (7, 9)
for i in range(3):
X = torch.randn(x_shape) * 10
W = torch.randn(w_shape)
s = get_scale(
torch.amax(torch.abs(X), dim=(0, 1)),
torch.amax(torch.abs(W), dim=1),
alpha=0.5,
)
Y = torch.matmul(X, W)
Y_ref = torch.matmul(
X / s.reshape(1, 1, -1),
torch.matmul(torch.diag(s), W),
)
assert torch.allclose(Y, Y_ref, atol=1e-3, rtol=1e-3), "not close!"
def _test_smooth_linear_impl(self, x_shape, lin_shape, device):
# so we can use the full range
torch.backends.quantized.engine = "qnnpack"
x = torch.randn(*x_shape, device=device) * 9 + 10
lin_fp32 = nn.Linear(*lin_shape, device=device) # misc: ignore
lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float(
copy.deepcopy(lin_fp32), alpha=0.25
)
lin_smooth_skip_scaling = SmoothFakeDynamicallyQuantizedLinear.from_float(
copy.deepcopy(lin_fp32), alpha=0.25
)
lin_fp32_copy = copy.deepcopy(lin_fp32) # assignment: ignore
lin_fp32_copy.qconfig = torch.ao.quantization.QConfig( # assignment: ignore
activation=None,
weight=torch.ao.quantization.default_per_channel_weight_observer,
)
lin_dynamic_q = torch.ao.nn.quantized.dynamic.Linear.from_float(
lin_fp32_copy.cpu()
)
y_ref = lin_fp32(x)
# calibrate the smoothquant versions
y_smooth_nocalib = lin_smooth(x)
_ = lin_smooth_skip_scaling(x)
lin_smooth.to_inference()
lin_smooth_skip_scaling.debug_skip_scaling = True
lin_smooth_skip_scaling.to_inference()
# verify that with scaling turned off, numerics match quantized version
y_smooth_fq_only = lin_smooth_skip_scaling(x)
y_smooth_fq = lin_smooth(x)
y_dynamic_q = lin_dynamic_q(x.cpu()).to(device)
# print('y_ref', y_ref)
# print('y_smooth_nocalib', y_smooth_nocalib)
# print('y_smooth_fq', y_smooth_fq)
# print('y_smooth_fq_only', y_smooth_fq_only)
# print('y_dynamic_q', y_dynamic_q)
sqnr_smooth_fq = compute_error(y_ref, y_smooth_fq)
sqnr_dynamic_q = compute_error(y_ref, y_dynamic_q)
sqnr_fq = compute_error(y_smooth_fq_only, y_dynamic_q)
# print('sqnr_smooth', sqnr_smooth_fq, 'sqnr_dynamic', sqnr_dynamic_q, 'sqnr_fq', sqnr_fq)
assert torch.allclose(
y_ref, y_smooth_nocalib
), "y_ref not close to y_smooth_nocalib"
# after https://github.com/pytorch-labs/ao_benchmarks/pull/32,
# numerics do not match exactly between production c++ code
# and this Python code
# assert torch.allclose(
# y_smooth_fq_only, y_dynamic_q,
# atol=torch.max(y_smooth_fq_only).item()*0.01,
# rtol=0.00001), \
# 'y_smooth_fq_only not close to y_dynamic_q'
self.assertTrue(sqnr_smooth_fq.item() >= 40.0)
self.assertTrue(sqnr_dynamic_q.item() >= 40.0)
self.assertTrue(sqnr_fq.item() >= 40.0)
def test_smooth_linear_cpu(self):
self._test_smooth_linear_impl((1, 5, 3), (3, 4), "cpu")
def test_smooth_linear_cuda(self):
if not torch.cuda.is_available():
print("no cuda, skip")
return
self._test_smooth_linear_impl((1, 32, 32), (32, 16), "cuda")
def test_smooth_linear_edge_cases(self):
# so we can use the full range
torch.backends.quantized.engine = "qnnpack"
lin_fp32 = nn.Linear(3, 4)
lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float(
lin_fp32, alpha=0.25
)
# test different ranks
x0 = torch.randn(4, 5, 3)
x1 = torch.randn(1, 8, 5, 3)
x2 = torch.randn(2, 3, 7, 5, 3)
# calibrate
_ = lin_smooth(x0)
_ = lin_smooth(x1)
_ = lin_smooth(x2)
# inference
lin_smooth.to_inference()
_ = lin_smooth(x0)
_ = lin_smooth(x1)
_ = lin_smooth(x2)
def test_swap(self):
m = nn.Sequential(
nn.Sequential(nn.Linear(4, 4), nn.ReLU(), nn.Linear(4, 4)),
nn.Linear(4, 4),
)
m_copy = copy.deepcopy(m) | swap_linear_with_smooth_fq_linear(m_copy, skip_fqn_list=["0.2"]) | 18 | 2023-11-03 21:27:36+00:00 | 12k |
google-research/semivl | semivl.py | [
{
"identifier": "get_palette",
"path": "datasets/palettes.py",
"snippet": "def get_palette(dataset):\n if dataset == 'pascal':\n return VOC_PALETTE\n elif dataset == 'cityscapes':\n return CITYSCAPES_PALETTE\n elif dataset == 'coco':\n return COCO_PALETTE\n elif dataset == 'ade':\n return ADE_PALETTE\n else:\n raise ValueError(dataset)"
},
{
"identifier": "get_git_revision",
"path": "experiments.py",
"snippet": "def get_git_revision() -> str:\n try:\n return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip()\n except subprocess.CalledProcessError:\n return ''"
},
{
"identifier": "build_model",
"path": "model/builder.py",
"snippet": "def build_model(cfg):\n model_type = cfg['model']\n if model_type == 'deeplabv3plus':\n model = DeepLabV3Plus(cfg)\n elif 'mmseg.' in model_type:\n model_type = model_type.replace('mmseg.', '')\n model_cfg_file = f'configs/_base_/models/{model_type}.py'\n mmseg_cfg = Config.fromfile(model_cfg_file)\n mmseg_cfg['model']['decode_head']['num_classes'] = cfg['nclass']\n if 'zegclip' in model_type or 'vlm' in model_type:\n if mmseg_cfg['img_size'] != cfg['crop_size']:\n print('Modify model image_size to match crop_size', cfg['crop_size'])\n nested_set(mmseg_cfg, 'img_size', cfg['crop_size'])\n nested_set(mmseg_cfg, 'model.backbone.img_size', (cfg['crop_size'], cfg['crop_size']))\n nested_set(mmseg_cfg, 'model.decode_head.img_size', cfg['crop_size'])\n emb_dataset_prefix = {\n 'pascal': 'voc12_wbg',\n 'cityscapes': 'cityscapes',\n 'coco': 'coco',\n 'ade': 'ade',\n }[cfg['dataset']]\n text_embedding_variant = cfg['text_embedding_variant']\n text_embedding = f'configs/_base_/datasets/text_embedding/{emb_dataset_prefix}_{text_embedding_variant}.npy'\n nested_set(mmseg_cfg, 'model.load_text_embedding', text_embedding)\n mcc_text_embedding_variant = cfg['mcc_text']\n mcc_text_embedding = f'configs/_base_/datasets/text_embedding/{emb_dataset_prefix}_{mcc_text_embedding_variant}.npy'\n nested_set(mmseg_cfg, 'model.load_mcc_text_embedding', mcc_text_embedding)\n pl_text_embedding_variant = cfg['pl_text']\n pl_text_embedding = f'configs/_base_/datasets/text_embedding/{emb_dataset_prefix}_{pl_text_embedding_variant}.npy'\n nested_set(mmseg_cfg, 'model.load_pl_text_embedding', pl_text_embedding)\n if mmseg_cfg['model']['decode_head']['type'] == 'ATMSingleHeadSeg':\n mmseg_cfg['model']['decode_head']['seen_idx'] = list(range(cfg['nclass']))\n mmseg_cfg['model']['decode_head']['all_idx'] = list(range(cfg['nclass']))\n if mmseg_cfg['model']['decode_head'].get('loss_decode') is not None and \\\n mmseg_cfg['model']['decode_head']['loss_decode']['type'] == 'SegLossPlus':\n mmseg_cfg['model']['decode_head']['loss_decode']['num_classes'] = cfg['nclass']\n if cfg['clip_encoder'] is not None:\n clip_encoder_cfg = Config.fromfile(f'configs/_base_/models/{cfg[\"clip_encoder\"]}.py')\n clip_encoder_cfg['img_size'] = mmseg_cfg['img_size']\n if cfg.get('mcc_fix_resize_pos'):\n clip_encoder_cfg['backbone']['img_size'] = mmseg_cfg['img_size']\n mmseg_cfg['model']['clip_encoder'] = clip_encoder_cfg['backbone']\n if 'model_args' in cfg:\n mmseg_cfg['model'].update(cfg['model_args'])\n model = build_segmentor(\n mmseg_cfg.model,\n train_cfg=mmseg_cfg.get('train_cfg'),\n test_cfg=mmseg_cfg.get('test_cfg'))\n model.disable_dropout = cfg['disable_dropout']\n model.fp_rate = cfg['fp_rate']\n model.forward = types.MethodType(forward_wrapper, model)\n model.init_weights()\n else:\n raise ValueError(model_type)\n \n return model"
},
{
"identifier": "evaluate",
"path": "third_party/unimatch/supervised.py",
"snippet": "def evaluate(model, loader, mode, cfg):\n model.eval()\n assert mode in ['original', 'center_crop', 'padded_sliding_window', 'zegclip_sliding_window', 'sliding_window']\n intersection_meter = AverageMeter()\n union_meter = AverageMeter()\n\n with torch.no_grad():\n for img, mask, id in tqdm(loader, total=len(loader)):\n \n img = img.cuda()\n pred = predict(model, img, mask, mode, cfg)\n\n intersection, union, target = \\\n intersectionAndUnion(pred.cpu().numpy(), mask.numpy(), cfg['nclass'], 255)\n\n reduced_intersection = torch.from_numpy(intersection).cuda()\n reduced_union = torch.from_numpy(union).cuda()\n reduced_target = torch.from_numpy(target).cuda()\n\n dist.all_reduce(reduced_intersection)\n dist.all_reduce(reduced_union)\n dist.all_reduce(reduced_target)\n\n intersection_meter.update(reduced_intersection.cpu().numpy())\n union_meter.update(reduced_union.cpu().numpy())\n\n iou_class = intersection_meter.sum / (union_meter.sum + 1e-10) * 100.0\n mIOU = np.mean(iou_class)\n\n return mIOU, iou_class"
},
{
"identifier": "SemiDataset",
"path": "third_party/unimatch/dataset/semi.py",
"snippet": "class SemiDataset(Dataset):\n def __init__(self, cfg, mode, id_path=None, nsample=None):\n self.name = cfg['dataset']\n self.root = os.path.expandvars(os.path.expanduser(cfg['data_root']))\n self.mode = mode\n self.size = cfg['crop_size']\n self.img_scale = cfg['img_scale']\n self.scale_ratio_range = cfg.get('scale_ratio_range', (0.5, 2.0))\n self.reduce_zero_label = cfg.get('reduce_zero_label', False)\n\n if isinstance(self.img_scale, list):\n self.img_scale = tuple(self.img_scale)\n self.labeled_photometric_distortion = cfg['labeled_photometric_distortion']\n\n if mode == 'train_l' or mode == 'train_u':\n with open(id_path, 'r') as f:\n self.ids = f.read().splitlines()\n if mode == 'train_l' and nsample is not None:\n self.ids *= math.ceil(nsample / len(self.ids))\n self.ids = self.ids[:nsample]\n else:\n if id_path is None:\n id_path = 'splits/%s/val.txt' % self.name\n with open(id_path, 'r') as f:\n self.ids = f.read().splitlines()\n\n def __getitem__(self, item):\n id = self.ids[item]\n img = Image.open(os.path.join(self.root, id.split(' ')[0])).convert('RGB')\n mask = Image.fromarray(np.array(Image.open(os.path.join(self.root, id.split(' ')[1]))))\n if self.reduce_zero_label:\n mask = np.array(mask)\n mask[mask == 0] = 255\n mask = mask - 1\n mask[mask == 254] = 255\n mask = Image.fromarray(mask)\n\n if self.mode == 'val':\n if self.img_scale is not None:\n res = Resize(img_scale=self.img_scale, min_size=512)(dict(\n img=np.array(img),\n ))\n img = Image.fromarray(res['img'])\n img, mask = normalize(img, mask)\n return img, mask, id\n\n if self.img_scale is not None:\n # print('Size before', img.size)\n res = Resize(img_scale=self.img_scale, ratio_range=self.scale_ratio_range)(dict(\n img=np.array(img),\n mask=np.array(mask),\n seg_fields=['mask']\n ))\n img = Image.fromarray(res['img'])\n mask = Image.fromarray(res['mask'])\n # print('Size after', mask.size)\n else:\n img, mask = resize(img, mask, self.scale_ratio_range)\n ignore_value = 254 if self.mode == 'train_u' else 255\n img, mask = crop(img, mask, self.size, ignore_value)\n img, mask = hflip(img, mask, p=0.5)\n\n if self.mode == 'train_l':\n if self.labeled_photometric_distortion:\n img = Image.fromarray(\n PhotoMetricDistortion()({'img': np.array(img)[..., ::-1]})['img'][..., ::-1]\n )\n return normalize(img, mask)\n\n img_w, img_s1, img_s2 = deepcopy(img), deepcopy(img), deepcopy(img)\n\n if random.random() < 0.8:\n img_s1 = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img_s1)\n img_s1 = transforms.RandomGrayscale(p=0.2)(img_s1)\n img_s1 = blur(img_s1, p=0.5)\n cutmix_box1 = obtain_cutmix_box(img_s1.size[0], p=0.5)\n\n if random.random() < 0.8:\n img_s2 = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img_s2)\n img_s2 = transforms.RandomGrayscale(p=0.2)(img_s2)\n img_s2 = blur(img_s2, p=0.5)\n cutmix_box2 = obtain_cutmix_box(img_s2.size[0], p=0.5)\n\n ignore_mask = Image.fromarray(np.zeros((mask.size[1], mask.size[0])))\n\n img_s1, ignore_mask = normalize(img_s1, ignore_mask)\n img_s2 = normalize(img_s2)\n\n mask = torch.from_numpy(np.array(mask)).long()\n ignore_mask[mask == 254] = 255\n\n return normalize(img_w), img_s1, img_s2, ignore_mask, cutmix_box1, cutmix_box2\n\n def __len__(self):\n return len(self.ids)"
},
{
"identifier": "CLASSES",
"path": "datasets/classes.py",
"snippet": "CLASSES = {'pascal': ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', \n 'car', 'cat', 'chair', 'cow', 'dining table', 'dog', 'horse', 'motorbike', \n 'person', 'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor'],\n \n 'cityscapes': ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light',\n 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car',\n 'truck', 'bus', 'train', 'motorcycle', 'bicycle'],\n \n 'coco': ['void', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', \n 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', \n 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',\n 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', \n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',\n 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', \n 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', \n 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', \n 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'branch', 'bridge', \n 'building-other', 'bush', 'cabinet', 'cage', 'cardboard', 'carpet', 'ceiling-other', \n 'ceiling-tile', 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain',\n 'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble', 'floor-other', 'floor-stone', \n 'floor-tile', 'floor-wood', 'flower', 'fog', 'food-other', 'fruit', 'furniture-other', \n 'grass', 'gravel', 'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal', \n 'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper', 'pavement', 'pillow', \n 'plant-other', 'plastic', 'platform', 'playingfield', 'railing', 'railroad', 'river', \n 'road', 'rock', 'roof', 'rug', 'salad', 'sand', 'sea', 'shelf', 'sky-other', 'skyscraper',\n 'snow', 'solid-other', 'stairs', 'stone', 'straw', 'structural-other', 'table', 'tent',\n 'textile-other', 'towel', 'tree', 'vegetable', 'wall-brick', 'wall-concrete', 'wall-other', \n 'wall-panel', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'waterdrops',\n 'window-blind', 'window-other', 'wood'],\n\n 'ade': ['wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', 'windowpane', 'grass', 'cabinet',\n 'sidewalk', 'person', 'earth', 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', 'water',\n 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair', 'seat', 'fence', 'desk',\n 'rock', 'wardrobe', 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', 'signboard',\n 'chest of drawers','counter', 'sand', 'sink', 'skyscraper', 'fireplace', 'refrigerator', 'grandstand',\n 'path', 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', 'stairway', 'river', 'bridge',\n 'bookcase', 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',\n 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine', 'hovel', 'bus',\n 'towel', 'light', 'truck', 'tower', 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',\n 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle',\n 'buffet', 'poster', 'stage', 'van', 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',\n 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', 'bag', 'minibike', 'cradle', 'oven',\n 'ball', 'food', 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', 'dishwasher',\n 'screen', 'blanket', 'sculpture', 'hood', 'sconce', 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier',\n 'crt screen', 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', 'clock', 'flag'],\n }"
},
{
"identifier": "ProbOhemCrossEntropy2d",
"path": "third_party/unimatch/util/ohem.py",
"snippet": "class ProbOhemCrossEntropy2d(nn.Module):\n def __init__(self, ignore_index, reduction='mean', thresh=0.7, min_kept=256,\n down_ratio=1, use_weight=False):\n super(ProbOhemCrossEntropy2d, self).__init__()\n self.ignore_index = ignore_index\n self.thresh = float(thresh)\n self.min_kept = int(min_kept)\n self.down_ratio = down_ratio\n if use_weight:\n weight = torch.FloatTensor(\n [0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489,\n 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955,\n 1.0865, 1.1529, 1.0507])\n self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,\n weight=weight,\n ignore_index=ignore_index)\n else:\n self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,\n ignore_index=ignore_index)\n\n def forward(self, pred, target):\n b, c, h, w = pred.size()\n target = target.view(-1)\n valid_mask = target.ne(self.ignore_index)\n target = target * valid_mask.long()\n num_valid = valid_mask.sum()\n\n prob = F.softmax(pred, dim=1)\n prob = (prob.transpose(0, 1)).reshape(c, -1)\n\n if self.min_kept > num_valid:\n pass\n elif num_valid > 0:\n prob = prob.masked_fill_(~valid_mask, 1)\n mask_prob = prob[\n target, torch.arange(len(target), dtype=torch.long)]\n threshold = self.thresh\n if self.min_kept > 0:\n index = mask_prob.argsort()\n threshold_index = index[min(len(index), self.min_kept) - 1]\n if mask_prob[threshold_index] > self.thresh:\n threshold = mask_prob[threshold_index]\n kept_mask = mask_prob.le(threshold)\n target = target * kept_mask.long()\n valid_mask = valid_mask * kept_mask\n\n target = target.masked_fill_(~valid_mask, self.ignore_index)\n target = target.view(b, h, w)\n\n return self.criterion(pred, target)"
},
{
"identifier": "setup_distributed",
"path": "third_party/unimatch/util/dist_helper.py",
"snippet": "def setup_distributed(backend=\"nccl\", port=None):\n \"\"\"AdaHessian Optimizer\n Lifted from https://github.com/BIGBALLON/distribuuuu/blob/master/distribuuuu/utils.py\n Originally licensed MIT, Copyright (c) 2020 Wei Li\n \"\"\"\n num_gpus = torch.cuda.device_count()\n\n rank = int(os.environ[\"RANK\"])\n world_size = int(os.environ[\"WORLD_SIZE\"])\n\n torch.cuda.set_device(rank % num_gpus)\n\n dist.init_process_group(\n backend=backend,\n world_size=world_size,\n rank=rank,\n )\n return rank, world_size"
},
{
"identifier": "count_params",
"path": "third_party/unimatch/util/utils.py",
"snippet": "def count_params(model):\n param_num = sum(p.numel() for p in model.parameters())\n return param_num / 1e6"
},
{
"identifier": "count_training_params",
"path": "third_party/unimatch/util/utils.py",
"snippet": "def count_training_params(model):\n param_num = sum(p.numel() for p in model.parameters() if p.requires_grad)\n return param_num / 1e6"
},
{
"identifier": "init_log",
"path": "third_party/unimatch/util/utils.py",
"snippet": "def init_log(name, level=logging.INFO):\n if (name, level) in logs:\n return\n logs.add((name, level))\n logger = logging.getLogger(name)\n logger.setLevel(level)\n ch = logging.StreamHandler()\n ch.setLevel(level)\n if \"SLURM_PROCID\" in os.environ:\n rank = int(os.environ[\"SLURM_PROCID\"])\n logger.addFilter(lambda record: rank == 0)\n else:\n rank = 0\n format_str = \"[%(asctime)s][%(levelname)8s] %(message)s\"\n formatter = logging.Formatter(format_str)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger"
},
{
"identifier": "gen_code_archive",
"path": "utils/gen_code_archive.py",
"snippet": "def gen_code_archive(out_dir, file='code.tar.gz'):\n archive = os.path.join(out_dir, file)\n os.makedirs(os.path.dirname(archive), exist_ok=True)\n with tarfile.open(archive, mode='w:gz') as tar:\n tar.add('.', filter=is_source_file)\n return archive"
},
{
"identifier": "plot_data",
"path": "utils/plot_utils.py",
"snippet": "def plot_data(ax, title, type, data, palette=None):\n data = data.cpu()\n if type == 'image':\n mean = torch.tensor([0.485, 0.456, 0.406])\n std = torch.tensor([0.229, 0.224, 0.225])\n data = data.permute([1, 2, 0]).mul(std).add(mean)\n ax.imshow(data)\n elif type == 'label':\n out = colorize_label(data.squeeze(0), palette)\n ax.imshow(out)\n elif type == 'prediction':\n data = data.squeeze(0).argmax(dim=0)\n out = colorize_label(data, palette)\n ax.imshow(out)\n elif type == 'heatmap':\n if data.shape[0] == 1:\n data = data.squeeze(0)\n ax.imshow(data)\n if title is not None:\n ax.set_title(title)\n ax.axis('off')"
},
{
"identifier": "DictAverageMeter",
"path": "utils/train_utils.py",
"snippet": "class DictAverageMeter(object):\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.avgs = {}\n self.sums = {}\n self.counts = {}\n\n def update(self, vals):\n for k, v in vals.items():\n if torch.is_tensor(v):\n v = v.detach()\n if k not in self.sums:\n self.sums[k] = 0\n self.counts[k] = 0\n self.sums[k] += v\n self.counts[k] += 1\n self.avgs[k] = torch.true_divide(self.sums[k], self.counts[k])\n\n def __str__(self):\n s = []\n for k, v in self.avgs.items():\n s.append(f'{k}: {v:.3f}')\n return ', '.join(s)"
},
{
"identifier": "confidence_weighted_loss",
"path": "utils/train_utils.py",
"snippet": "def confidence_weighted_loss(loss, conf_map, ignore_mask, cfg):\n assert loss.dim() == 3\n assert conf_map.dim() == 3\n assert ignore_mask.dim() == 3\n valid_mask = (ignore_mask != 255)\n sum_pixels = dict(dim=(1, 2), keepdim=True)\n if cfg['conf_mode'] == 'pixelwise':\n loss = loss * ((conf_map >= cfg['conf_thresh']) & valid_mask)\n loss = loss.sum() / valid_mask.sum().item()\n elif cfg['conf_mode'] == 'pixelratio':\n ratio_high_conf = ((conf_map >= cfg['conf_thresh']) & valid_mask).sum(**sum_pixels) / valid_mask.sum(**sum_pixels)\n loss = loss * ratio_high_conf\n loss = loss.sum() / valid_mask.sum().item()\n elif cfg['conf_mode'] == 'pixelavg':\n avg_conf = (conf_map * valid_mask).sum(**sum_pixels) / valid_mask.sum(**sum_pixels)\n loss = loss.sum() * avg_conf\n loss = loss.sum() / valid_mask.sum().item()\n else:\n raise ValueError(cfg['conf_mode'])\n return loss"
},
{
"identifier": "cutmix_img_",
"path": "utils/train_utils.py",
"snippet": "def cutmix_img_(img, img_mix, cutmix_box):\n img[cutmix_box.unsqueeze(1).expand(img.shape) == 1] = \\\n img_mix[cutmix_box.unsqueeze(1).expand(img.shape) == 1]"
},
{
"identifier": "cutmix_mask",
"path": "utils/train_utils.py",
"snippet": "def cutmix_mask(mask, mask_mix, cutmix_box):\n cutmixed = mask.clone()\n cutmixed[cutmix_box == 1] = mask_mix[cutmix_box == 1]\n return cutmixed"
},
{
"identifier": "__version__",
"path": "version.py",
"snippet": ""
}
] | import argparse
import logging
import math
import os
import pprint
import shutil
import uuid
import time
import mmcv
import torch
import torch.backends.cudnn as cudnn
import yaml
from datetime import datetime
from matplotlib import pyplot as plt
from mmseg.core import build_optimizer
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from datasets.palettes import get_palette
from experiments import get_git_revision
from model.builder import build_model
from third_party.unimatch.supervised import evaluate
from third_party.unimatch.dataset.semi import SemiDataset
from datasets.classes import CLASSES
from third_party.unimatch.util.ohem import ProbOhemCrossEntropy2d
from third_party.unimatch.util.dist_helper import setup_distributed
from third_party.unimatch.util.utils import count_params, count_training_params, init_log
from utils.gen_code_archive import gen_code_archive
from utils.plot_utils import plot_data
from utils.train_utils import (DictAverageMeter, confidence_weighted_loss,
cutmix_img_, cutmix_mask)
from version import __version__ | 7,944 | rank, world_size = setup_distributed(port=args.port)
if cfg['nccl_p2p_disable']:
os.environ["NCCL_P2P_DISABLE"] = str(1)
if rank == 0:
timestr = datetime.now().strftime("%y%m%d-%H%M")
uid = str(uuid.uuid4())[:5]
run_name = f'{timestr}_{cfg["name"]}_v{__version__}_{uid}'.replace('.', '-')
save_path = f'exp/exp-{cfg["exp"]}/{run_name}'
os.makedirs(save_path, exist_ok=True)
formatter = logging.Formatter(fmt='[%(asctime)s] [%(levelname)-8s] %(message)s')
fileHandler = logging.FileHandler(f'{save_path}/debug.log')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
all_args = {**cfg, **vars(args),
'labeled_id_path': labeled_id_path, 'unlabeled_id_path': unlabeled_id_path,
'ngpus': world_size, 'run_name': run_name, 'save_path': save_path,
'exec_git_rev': get_git_revision(), 'exec_version': __version__}
logger.info('{}\n'.format(pprint.pformat(all_args)))
writer = SummaryWriter(save_path)
shutil.copyfile(args.config, os.path.join(save_path, 'config.yaml'))
with open(os.path.join(save_path, 'all_args.yaml'), 'w') as f:
yaml.dump(all_args, f, default_flow_style=None, sort_keys=False, indent=2)
gen_code_archive(save_path)
cudnn.enabled = True
cudnn.benchmark = True
maskclip_consistency_lambda = cfg['maskclip_consistency_lambda']
mcc_conf_thresh = cfg['mcc_conf_thresh']
mcc_loss_reduce = cfg['mcc_loss_reduce']
assert mcc_loss_reduce in ['mean', 'mean_valid', 'mean_all']
assert cfg['use_fp']
assert cfg['pleval']
model = build_model(cfg)
if 'optimizer' not in cfg:
optimizer = SGD([{'params': model.backbone.parameters(), 'lr': cfg['lr']},
{'params': [param for name, param in model.named_parameters() if 'backbone' not in name],
'lr': cfg['lr'] * cfg['lr_multi']}], lr=cfg['lr'], momentum=0.9, weight_decay=1e-4)
else:
optimizer = build_optimizer(model, cfg['optimizer'])
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
if rank == 0:
logger.info(model)
logger.info(f'Total params: {count_params(model):.1f}M\n')
if hasattr(model, 'backbone'):
logger.info(f'Backbone params (training/total): {count_training_params(model.backbone):.1f}M/{count_params(model.backbone):.1f}M\n')
if hasattr(model, 'decode_head'):
logger.info(f'Decoder params (training/total): {count_training_params(model.decode_head):.1f}M/{count_params(model.decode_head):.1f}M\n')
local_rank = int(os.environ["LOCAL_RANK"])
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], broadcast_buffers=False,
output_device=local_rank, find_unused_parameters=True)
if cfg['criterion']['name'] == 'CELoss':
criterion_l = nn.CrossEntropyLoss(**cfg['criterion']['kwargs']).cuda(local_rank)
elif cfg['criterion']['name'] == 'OHEM':
criterion_l = ProbOhemCrossEntropy2d(**cfg['criterion']['kwargs']).cuda(local_rank)
elif cfg['criterion']['name'] == 'mmseg':
criterion_l = None
else:
raise ValueError(cfg['criterion_u']['name'])
if cfg['criterion_u'] == 'CELoss':
criterion_u = nn.CrossEntropyLoss(reduction='none').cuda(local_rank)
elif cfg['criterion_u'] == 'mmseg':
criterion_u = None
else:
raise ValueError(cfg['criterion_u'])
if maskclip_consistency_lambda != 0:
if mcc_loss_reduce == 'mean':
criterion_mc = nn.CrossEntropyLoss(ignore_index=255).cuda(local_rank)
elif mcc_loss_reduce in ['mean_valid', 'mean_all']:
criterion_mc = nn.CrossEntropyLoss(ignore_index=255, reduction='none').cuda(local_rank)
else:
raise ValueError(mcc_loss_reduce)
trainset_u = SemiDataset(cfg, 'train_u', id_path=unlabeled_id_path)
trainset_l = SemiDataset(cfg, 'train_l', id_path=labeled_id_path, nsample=len(trainset_u.ids))
valset = SemiDataset(cfg, 'val')
trainsampler_l = torch.utils.data.distributed.DistributedSampler(trainset_l)
trainloader_l = DataLoader(trainset_l, batch_size=cfg['batch_size'],
pin_memory=True, num_workers=1, drop_last=True, sampler=trainsampler_l)
trainsampler_u = torch.utils.data.distributed.DistributedSampler(trainset_u)
trainloader_u = DataLoader(trainset_u, batch_size=cfg['batch_size'],
pin_memory=True, num_workers=1, drop_last=True, sampler=trainsampler_u)
valsampler = torch.utils.data.distributed.DistributedSampler(valset)
valloader = DataLoader(valset, batch_size=1, pin_memory=True, num_workers=1,
drop_last=False, sampler=valsampler)
palette = get_palette(cfg['dataset'])
if cfg['iters'] is not None:
assert cfg['epochs'] is None
cfg['epochs'] = math.ceil(cfg['iters'] / len(trainloader_u))
total_iters = len(trainloader_u) * cfg['epochs']
scheduler_max_iters = cfg.get('scheduler_max_iters', total_iters)
assert scheduler_max_iters >= total_iters
if rank == 0:
logger.info(f'Train for {cfg["epochs"]} epochs / {total_iters} iterations.')
previous_best = 0.0
epoch = -1
for epoch in range(epoch + 1, cfg['epochs']):
if rank == 0:
logger.info('===========> Epoch: {:}, LR: {:.5f}, Previous best: {:.2f}'.format(
epoch, optimizer.param_groups[0]['lr'], previous_best))
| # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def compute_mc_loss(pred, mask, ign):
l_mc = criterion_mc(pred, mask)
if mcc_loss_reduce == 'mean_valid':
l_mc = l_mc.sum() / (ign != 255).sum()
if mcc_loss_reduce == 'mean_all':
l_mc = l_mc.sum() / ign.numel()
return l_mc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--port', default=None, type=int)
args = parser.parse_args()
with open(args.config, "r") as fp:
cfg = yaml.load(fp, Loader=yaml.Loader)
labeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/labeled.txt'
unlabeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/unlabeled.txt'
logger = init_log('global', logging.INFO)
logger.propagate = 0
mmcv.utils.get_logger('mmcv').setLevel('WARNING')
rank, world_size = setup_distributed(port=args.port)
if cfg['nccl_p2p_disable']:
os.environ["NCCL_P2P_DISABLE"] = str(1)
if rank == 0:
timestr = datetime.now().strftime("%y%m%d-%H%M")
uid = str(uuid.uuid4())[:5]
run_name = f'{timestr}_{cfg["name"]}_v{__version__}_{uid}'.replace('.', '-')
save_path = f'exp/exp-{cfg["exp"]}/{run_name}'
os.makedirs(save_path, exist_ok=True)
formatter = logging.Formatter(fmt='[%(asctime)s] [%(levelname)-8s] %(message)s')
fileHandler = logging.FileHandler(f'{save_path}/debug.log')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
all_args = {**cfg, **vars(args),
'labeled_id_path': labeled_id_path, 'unlabeled_id_path': unlabeled_id_path,
'ngpus': world_size, 'run_name': run_name, 'save_path': save_path,
'exec_git_rev': get_git_revision(), 'exec_version': __version__}
logger.info('{}\n'.format(pprint.pformat(all_args)))
writer = SummaryWriter(save_path)
shutil.copyfile(args.config, os.path.join(save_path, 'config.yaml'))
with open(os.path.join(save_path, 'all_args.yaml'), 'w') as f:
yaml.dump(all_args, f, default_flow_style=None, sort_keys=False, indent=2)
gen_code_archive(save_path)
cudnn.enabled = True
cudnn.benchmark = True
maskclip_consistency_lambda = cfg['maskclip_consistency_lambda']
mcc_conf_thresh = cfg['mcc_conf_thresh']
mcc_loss_reduce = cfg['mcc_loss_reduce']
assert mcc_loss_reduce in ['mean', 'mean_valid', 'mean_all']
assert cfg['use_fp']
assert cfg['pleval']
model = build_model(cfg)
if 'optimizer' not in cfg:
optimizer = SGD([{'params': model.backbone.parameters(), 'lr': cfg['lr']},
{'params': [param for name, param in model.named_parameters() if 'backbone' not in name],
'lr': cfg['lr'] * cfg['lr_multi']}], lr=cfg['lr'], momentum=0.9, weight_decay=1e-4)
else:
optimizer = build_optimizer(model, cfg['optimizer'])
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
if rank == 0:
logger.info(model)
logger.info(f'Total params: {count_params(model):.1f}M\n')
if hasattr(model, 'backbone'):
logger.info(f'Backbone params (training/total): {count_training_params(model.backbone):.1f}M/{count_params(model.backbone):.1f}M\n')
if hasattr(model, 'decode_head'):
logger.info(f'Decoder params (training/total): {count_training_params(model.decode_head):.1f}M/{count_params(model.decode_head):.1f}M\n')
local_rank = int(os.environ["LOCAL_RANK"])
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], broadcast_buffers=False,
output_device=local_rank, find_unused_parameters=True)
if cfg['criterion']['name'] == 'CELoss':
criterion_l = nn.CrossEntropyLoss(**cfg['criterion']['kwargs']).cuda(local_rank)
elif cfg['criterion']['name'] == 'OHEM':
criterion_l = ProbOhemCrossEntropy2d(**cfg['criterion']['kwargs']).cuda(local_rank)
elif cfg['criterion']['name'] == 'mmseg':
criterion_l = None
else:
raise ValueError(cfg['criterion_u']['name'])
if cfg['criterion_u'] == 'CELoss':
criterion_u = nn.CrossEntropyLoss(reduction='none').cuda(local_rank)
elif cfg['criterion_u'] == 'mmseg':
criterion_u = None
else:
raise ValueError(cfg['criterion_u'])
if maskclip_consistency_lambda != 0:
if mcc_loss_reduce == 'mean':
criterion_mc = nn.CrossEntropyLoss(ignore_index=255).cuda(local_rank)
elif mcc_loss_reduce in ['mean_valid', 'mean_all']:
criterion_mc = nn.CrossEntropyLoss(ignore_index=255, reduction='none').cuda(local_rank)
else:
raise ValueError(mcc_loss_reduce)
trainset_u = SemiDataset(cfg, 'train_u', id_path=unlabeled_id_path)
trainset_l = SemiDataset(cfg, 'train_l', id_path=labeled_id_path, nsample=len(trainset_u.ids))
valset = SemiDataset(cfg, 'val')
trainsampler_l = torch.utils.data.distributed.DistributedSampler(trainset_l)
trainloader_l = DataLoader(trainset_l, batch_size=cfg['batch_size'],
pin_memory=True, num_workers=1, drop_last=True, sampler=trainsampler_l)
trainsampler_u = torch.utils.data.distributed.DistributedSampler(trainset_u)
trainloader_u = DataLoader(trainset_u, batch_size=cfg['batch_size'],
pin_memory=True, num_workers=1, drop_last=True, sampler=trainsampler_u)
valsampler = torch.utils.data.distributed.DistributedSampler(valset)
valloader = DataLoader(valset, batch_size=1, pin_memory=True, num_workers=1,
drop_last=False, sampler=valsampler)
palette = get_palette(cfg['dataset'])
if cfg['iters'] is not None:
assert cfg['epochs'] is None
cfg['epochs'] = math.ceil(cfg['iters'] / len(trainloader_u))
total_iters = len(trainloader_u) * cfg['epochs']
scheduler_max_iters = cfg.get('scheduler_max_iters', total_iters)
assert scheduler_max_iters >= total_iters
if rank == 0:
logger.info(f'Train for {cfg["epochs"]} epochs / {total_iters} iterations.')
previous_best = 0.0
epoch = -1
for epoch in range(epoch + 1, cfg['epochs']):
if rank == 0:
logger.info('===========> Epoch: {:}, LR: {:.5f}, Previous best: {:.2f}'.format(
epoch, optimizer.param_groups[0]['lr'], previous_best))
| log_avg = DictAverageMeter() | 13 | 2023-11-02 14:49:38+00:00 | 12k |
intellerce/controlanimate | animatediff/models/unet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n output_states = ()\n\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n ):\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None):\n output_states = ()\n\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n ]\n attentions = []\n motion_modules = []\n\n for _ in range(num_layers):\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n in_channels // attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=in_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,):\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "animatediff/models/unet_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n):\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "animatediff/models/unet_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
},
{
"identifier": "InflatedConv3d",
"path": "animatediff/models/resnet.py",
"snippet": "class InflatedConv3d(LoRACompatibleConv):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
},
{
"identifier": "InflatedGroupNorm",
"path": "animatediff/models/resnet.py",
"snippet": "class InflatedGroupNorm(nn.GroupNorm):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
}
] | from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.loaders import UNet2DConditionLoadersMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from .unet_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d, InflatedGroupNorm
from typing import Any, Dict, List, Optional, Tuple, Union
from diffusers.models.attention_processor import (
ADDED_KV_ATTENTION_PROCESSORS,
CROSS_ATTENTION_PROCESSORS,
AttentionProcessor,
AttnAddedKVProcessor,
AttnProcessor,
)
from diffusers.utils import WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME
import os
import json
import pdb
import torch
import torch.nn as nn
import torch.utils.checkpoint
import safetensors | 8,470 | time_embedding_type: str = "positional",
time_embedding_dim: Optional[int] = None,
time_embedding_act_fn: Optional[str] = None,
timestep_post_act: Optional[str] = None,
time_cond_proj_dim: Optional[int] = None,
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
# self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
post_act_fn=timestep_post_act,
cond_proj_dim=time_cond_proj_dim,
)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
if class_embeddings_concat:
# The time embeddings are concatenated with the class embeddings. The dimension of the
# time embeddings passed to the down, middle, and up blocks is twice the dimension of the
# regular time embeddings
blocks_time_embed_dim = time_embed_dim * 2
else:
blocks_time_embed_dim = time_embed_dim
# self.time_embedding = TimestepEmbedding(
# timestep_input_dim,
# time_embed_dim,
# act_fn=act_fn,
# post_act_fn=timestep_post_act,
# cond_proj_dim=time_cond_proj_dim,
# )
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn":
| # Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
use_inflated_groupnorm=False,
addition_embed_type: Optional[str] = None,
addition_time_embed_dim: Optional[int] = None,
dropout: float = 0.0,
encoder_hid_dim: Optional[int] = None,
encoder_hid_dim_type: Optional[str] = None,
conv_in_kernel: int = 3,
conv_out_kernel: int = 3,
attention_type: str = "default",
class_embeddings_concat: bool = False,
mid_block_only_cross_attention: Optional[bool] = None,
cross_attention_norm: Optional[str] = None,
addition_embed_type_num_heads=64,
transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
time_embedding_type: str = "positional",
time_embedding_dim: Optional[int] = None,
time_embedding_act_fn: Optional[str] = None,
timestep_post_act: Optional[str] = None,
time_cond_proj_dim: Optional[int] = None,
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
# self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
post_act_fn=timestep_post_act,
cond_proj_dim=time_cond_proj_dim,
)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
if class_embeddings_concat:
# The time embeddings are concatenated with the class embeddings. The dimension of the
# time embeddings passed to the down, middle, and up blocks is twice the dimension of the
# regular time embeddings
blocks_time_embed_dim = time_embed_dim * 2
else:
blocks_time_embed_dim = time_embed_dim
# self.time_embedding = TimestepEmbedding(
# timestep_input_dim,
# time_embed_dim,
# act_fn=act_fn,
# post_act_fn=timestep_post_act,
# cond_proj_dim=time_cond_proj_dim,
# )
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn": | self.mid_block = UNetMidBlock3DCrossAttn( | 3 | 2023-11-04 01:35:44+00:00 | 12k |
Zaczero/openstreetmap-ng | src/services/user_signup_service.py | [
{
"identifier": "DB",
"path": "src/db.py",
"snippet": "DB = async_sessionmaker(\n DB_ENGINE,\n expire_on_commit=False,\n)"
},
{
"identifier": "auth_user",
"path": "src/lib/auth.py",
"snippet": "def auth_user() -> User | None:\n \"\"\"\n Get the authenticated user.\n \"\"\"\n\n return _context.get()[0]"
},
{
"identifier": "manual_auth_context",
"path": "src/lib/auth.py",
"snippet": "@contextmanager\ndef manual_auth_context(user: User):\n \"\"\"\n Context manager for manually authenticating the user.\n \"\"\"\n\n token = _context.set((user, ()))\n try:\n yield\n finally:\n _context.reset(token)"
},
{
"identifier": "Email",
"path": "src/lib/email.py",
"snippet": "class Email:\n @staticmethod\n def validate(email: str) -> str:\n \"\"\"\n Validate and normalize email address.\n\n Raises ValueError on error.\n\n >>> Email.normalize('example@ツ.ⓁⒾⒻⒺ')\n 'example@ツ.life'\n \"\"\"\n\n try:\n info = validate_email(email, check_deliverability=False, test_environment=TEST_ENV)\n except EmailNotValidError as e:\n logging.debug('Received invalid email address %r', email)\n raise ValueError('Invalid email address') from e\n\n return info.normalized\n\n @staticmethod\n async def validate_dns(email: str) -> None:\n \"\"\"\n Validate deliverability of email address.\n\n Raises ValueError on error.\n \"\"\"\n\n fn = partial(validate_email, check_deliverability=True, test_environment=TEST_ENV)\n\n try:\n await anyio.to_thread.run_sync(fn, email)\n except EmailNotValidError as e:\n logging.debug('Received invalid email address (dns check) %r', email)\n raise ValueError('Invalid email address') from e"
},
{
"identifier": "MessageCollector",
"path": "src/lib/message_collector.py",
"snippet": "class MessageCollector:\n def __init__(self) -> None:\n self._messages: dict[str | None, list[tuple[MessageSeverity, str]]] = defaultdict(list)\n\n def success(self, field: str | None, message: str) -> None:\n \"\"\"\n Collect a success message for a field.\n \"\"\"\n\n self._messages[field].append((MessageSeverity.success, message))\n\n def info(self, field: str | None, message: str) -> None:\n \"\"\"\n Collect an info message for a field.\n \"\"\"\n\n self._messages[field].append((MessageSeverity.info, message))\n\n def raise_error(self, field: str | None, message: str) -> NoReturn:\n \"\"\"\n Collect an error message for a field and raise a HTTPException.\n \"\"\"\n\n raise HTTPException(\n status.HTTP_400_BAD_REQUEST,\n detail=[\n {\n 'type': _MESSAGE_TYPE[MessageSeverity.error],\n 'loc': [None, field],\n 'msg': message,\n }\n ],\n )\n\n @property\n def result(self) -> dict:\n \"\"\"\n Return the collected messages as a dict.\n \"\"\"\n\n return {\n 'detail': [\n {\n 'type': _MESSAGE_TYPE[severity],\n 'loc': [None, field],\n 'msg': message,\n }\n for field, messages in self._messages.items()\n for severity, message in messages\n ]\n }"
},
{
"identifier": "PasswordHash",
"path": "src/lib/password_hash.py",
"snippet": "class PasswordHash:\n rehash_needed: bool | None = None\n\n def __init__(self, hasher: PasswordHasher):\n self._hasher = hasher\n\n def verify(self, password_hashed: str, salt: str | None, password: str) -> bool:\n \"\"\"\n Verify a password against a hash and optional salt.\n\n Returns `True` if the password matches, `False` otherwise.\n\n If the password matches but the hash needs to be rehashed, `rehash_needed` will be set to `True`.\n \"\"\"\n\n if self.rehash_needed is not None:\n raise RuntimeError(f'{self.verify.__qualname__} was reused')\n\n # argon2\n if password_hashed.startswith('$argon2'):\n if salt is not None:\n logging.warning('Unexpected salt for Argon2 hash')\n\n if self._hasher.verify(password_hashed, password):\n self.rehash_needed = self._hasher.check_needs_rehash(password_hashed)\n return True\n else:\n self.rehash_needed = False\n return False\n\n # rehash deprecated methods\n self.rehash_needed = True\n\n # md5 (deprecated)\n if len(password_hashed) == 32:\n valid_hash = md5(((salt or '') + password).encode()).hexdigest() # noqa: S324\n return compare_digest(password_hashed, valid_hash)\n\n # pbkdf2 (deprecated)\n if salt and '!' in salt:\n password_hashed_b = base64.b64decode(password_hashed)\n algorithm, iterations_, salt = salt.split('!')\n iterations = int(iterations_)\n valid_hash = pbkdf2_hmac(algorithm, password.encode(), salt.encode(), iterations, len(password_hashed_b))\n return compare_digest(password_hashed_b, valid_hash)\n\n hash_len = len(password_hashed)\n salt_len = len(salt or '')\n raise ValueError(f'Unknown password hash format: {hash_len=}, {salt_len=}')\n\n def hash(self, password: str) -> str: # noqa: A003\n \"\"\"\n Hash a password using latest recommended algorithm.\n \"\"\"\n\n return self._hasher.hash(password)\n\n @classmethod\n def default(cls) -> Self:\n \"\"\"\n Get a default password hasher.\n \"\"\"\n\n return cls(UserRole.get_password_hasher(()))"
},
{
"identifier": "t",
"path": "src/lib/translation.py",
"snippet": "def t(message: str, **kwargs) -> str:\n \"\"\"\n Get the translation for the given message.\n \"\"\"\n\n trans: GNUTranslations = _context_trans.get()\n return trans.gettext(message).format(**kwargs)"
},
{
"identifier": "translation_languages",
"path": "src/lib/translation.py",
"snippet": "def translation_languages() -> Sequence[str]:\n \"\"\"\n Get the languages from the translation context.\n \"\"\"\n\n return _context_langs.get()"
},
{
"identifier": "User",
"path": "src/models/db/user.py",
"snippet": "class User(Base.Sequential, CreatedAtMixin, RichTextMixin):\n __tablename__ = 'user'\n __rich_text_fields__ = (('description', TextFormat.markdown),)\n\n email: Mapped[str] = mapped_column(Unicode(EMAIL_MAX_LENGTH), nullable=False)\n display_name: Mapped[str] = mapped_column(Unicode, nullable=False)\n password_hashed: Mapped[str] = mapped_column(Unicode, nullable=False)\n created_ip: Mapped[IPv4Address | IPv6Address] = mapped_column(INET, nullable=False)\n\n status: Mapped[UserStatus] = mapped_column(Enum(UserStatus), nullable=False)\n\n auth_provider: Mapped[AuthProvider | None] = mapped_column(Enum(AuthProvider), nullable=True)\n auth_uid: Mapped[str | None] = mapped_column(Unicode, nullable=True)\n\n languages: Mapped[list[str]] = mapped_column(ARRAY(Unicode(LANGUAGE_CODE_MAX_LENGTH)), nullable=False)\n\n # defaults\n password_changed_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=func.now())\n password_salt: Mapped[str | None] = mapped_column(Unicode, nullable=True, default=None)\n consider_public_domain: Mapped[bool] = mapped_column(Boolean, nullable=False)\n roles: Mapped[list[UserRole]] = mapped_column(ARRAY(Enum(UserRole)), nullable=False, default=())\n description: Mapped[str] = mapped_column(UnicodeText, nullable=False, default='')\n description_rich_hash: Mapped[bytes | None] = mapped_column(LargeBinary(HASH_SIZE), nullable=True, default=None)\n description_rich: Mapped[CacheEntry | None] = relationship(\n CacheEntry,\n primaryjoin=CacheEntry.id == description_rich_hash,\n viewonly=True,\n default=None,\n lazy='raise',\n )\n editor: Mapped[Editor | None] = mapped_column(Enum(Editor), nullable=True, default=None)\n avatar_type: Mapped[AvatarType] = mapped_column(Enum(AvatarType), nullable=False, default=AvatarType.default)\n avatar_id: Mapped[str | None] = mapped_column(Unicode(STORAGE_KEY_MAX_LENGTH), nullable=True, default=None)\n home_point: Mapped[Point | None] = mapped_column(PointType, nullable=True, default=None)\n home_zoom: Mapped[int | None] = mapped_column(SmallInteger, nullable=True, default=None)\n\n # relationships (avoid circular imports)\n if TYPE_CHECKING:\n from src.models.db.oauth1_application import OAuth1Application\n from src.models.db.oauth2_application import OAuth2Application\n from src.models.db.user_block import UserBlock\n\n oauth1_applications: Mapped[list['OAuth1Application']] = relationship(\n back_populates='user',\n order_by='OAuth1Application.id.asc()',\n lazy='raise',\n )\n oauth2_applications: Mapped[list['OAuth2Application']] = relationship(\n back_populates='user',\n order_by='OAuth2Application.id.asc()',\n lazy='raise',\n )\n user_blocks_given: Mapped[list['UserBlock']] = relationship(\n back_populates='from_user',\n order_by='UserBlock.id.desc()',\n lazy='raise',\n )\n user_blocks_received: Mapped[list['UserBlock']] = relationship(\n back_populates='to_user',\n order_by='UserBlock.id.desc()',\n lazy='raise',\n )\n active_user_blocks_received: Mapped[list['UserBlock']] = relationship(\n back_populates='to_user',\n order_by='UserBlock.id.desc()',\n lazy='raise',\n primaryjoin='and_(UserBlock.to_user_id == User.id, UserBlock.expired == false())',\n viewonly=True,\n )\n\n __table_args__ = (\n UniqueConstraint(email),\n UniqueConstraint(display_name),\n )\n\n @validates('languages')\n def validate_languages(self, _: str, value: Sequence[str]):\n if len(value) > USER_LANGUAGES_LIMIT:\n raise ValueError('Too many languages')\n return value\n\n @validates('description')\n def validate_description(self, _: str, value: str):\n if len(value) > USER_DESCRIPTION_MAX_LENGTH:\n raise ValueError('Description is too long')\n return value\n\n @property\n def is_administrator(self) -> bool:\n \"\"\"\n Check if the user is an administrator.\n \"\"\"\n\n return UserRole.administrator in self.roles\n\n @property\n def is_moderator(self) -> bool:\n \"\"\"\n Check if the user is a moderator.\n \"\"\"\n\n return UserRole.moderator in self.roles or self.is_administrator\n\n @property\n def extended_scopes(self) -> Sequence[ExtendedScope]:\n \"\"\"\n Get the user's extended scopes.\n \"\"\"\n\n result = []\n\n # role-specific scopes\n if self.is_administrator:\n result.append(ExtendedScope.role_administrator)\n if self.is_moderator:\n result.append(ExtendedScope.role_moderator)\n\n return result\n\n @property\n def permalink(self) -> str:\n \"\"\"\n Get the user's permalink.\n\n >>> user.permalink\n 'https://www.openstreetmap.org/user/permalink/123456'\n \"\"\"\n\n return f'{APP_URL}/user/permalink/{self.id}'\n\n @property\n def languages_str(self) -> str:\n return ' '.join(self.languages)\n\n @languages_str.setter\n def languages_str(self, s: str) -> None:\n languages = s.split()\n languages = (t.strip()[:LANGUAGE_CODE_MAX_LENGTH].strip() for t in languages)\n languages = (normalize_language_case(t) for t in languages)\n languages = (t for t in languages if t)\n self.languages = tuple(set(languages))\n\n @property\n def preferred_diary_language(self) -> LanguageInfo:\n \"\"\"\n Get the user's preferred diary language.\n \"\"\"\n\n # return the first valid language\n for code in self.languages:\n if lang := get_language_info(code):\n return lang\n\n # fallback to default\n return get_language_info(DEFAULT_LANGUAGE)\n\n @property\n def changeset_max_size(self) -> int:\n \"\"\"\n Get the maximum changeset size for this user.\n \"\"\"\n\n return UserRole.get_changeset_max_size(self.roles)\n\n @property\n def password_hasher(self) -> PasswordHash:\n \"\"\"\n Get the password hash class for this user.\n \"\"\"\n\n return PasswordHash(UserRole.get_password_hasher(self.roles))\n\n @property\n def avatar_url(self) -> str:\n \"\"\"\n Get the url for the user's avatar image.\n \"\"\"\n\n # when using gravatar, use user id as the avatar id\n if self.avatar_type == AvatarType.gravatar:\n return Avatar.get_url(self.avatar_type, self.id)\n else:\n return Avatar.get_url(self.avatar_type, self.avatar_id)\n\n async def home_distance_to(self, point: Point | None) -> float | None:\n return haversine_distance(self.home_point, point) if self.home_point and point else None"
},
{
"identifier": "MailFromType",
"path": "src/models/mail_from_type.py",
"snippet": "class MailFromType(BaseEnum):\n system = 'system'\n message = 'message'\n diary_comment = 'diary_comment'"
},
{
"identifier": "UserTokenStruct",
"path": "src/models/msgspec/user_token_struct.py",
"snippet": "class UserTokenStruct(msgspec.Struct, omit_defaults=True, forbid_unknown_fields=True, array_like=True):\n version: int = 1\n id: int | UUID\n token: bytes\n\n def __init__(self, id: int | UUID, token: bytes):\n self.id = id\n self.token = token\n\n def __str__(self) -> str:\n \"\"\"\n Return a string representation of the user token struct.\n \"\"\"\n\n return urlsafe_b64encode(MSGSPEC_MSGPACK_ENCODER.encode(self)).decode()\n\n @classmethod\n def from_str(cls, s: str) -> Self:\n \"\"\"\n Parse the given string into a user token struct.\n \"\"\"\n\n buff = urlsafe_b64decode(s)\n\n try:\n obj: Self = MSGSPEC_MSGPACK_DECODER.decode(buff, type=cls)\n except Exception:\n raise_for().bad_user_token_struct()\n\n return obj"
},
{
"identifier": "EmailStr",
"path": "src/models/str.py",
"snippet": ""
},
{
"identifier": "UserStatus",
"path": "src/models/user_status.py",
"snippet": "class UserStatus(BaseEnum):\n pending = 'pending'\n active = 'active'"
},
{
"identifier": "UserRepository",
"path": "src/repositories/user_repository.py",
"snippet": "class UserRepository:\n @staticmethod\n async def find_one_by_id(user_id: int) -> User | None:\n \"\"\"\n Find a user by id.\n \"\"\"\n\n async with DB() as session:\n return await session.get(User, user_id, options=[get_joinedload()])\n\n @staticmethod\n async def find_one_by_display_name(display_name: str) -> User | None:\n \"\"\"\n Find a user by display name.\n \"\"\"\n\n async with DB() as session:\n stmt = (\n select(User)\n .options(get_joinedload())\n .where(\n User.display_name == display_name,\n )\n )\n\n return await session.scalar(stmt)\n\n @staticmethod\n async def find_one_by_email(email: str) -> User | None:\n \"\"\"\n Find a user by email.\n \"\"\"\n\n async with DB() as session:\n stmt = (\n select(User)\n .options(get_joinedload())\n .where(\n User.email == email,\n )\n )\n\n return await session.scalar(stmt)\n\n @staticmethod\n async def find_many_by_ids(user_ids: Sequence[int]) -> Sequence[User]:\n \"\"\"\n Find users by ids.\n \"\"\"\n\n async with DB() as session:\n stmt = (\n select(User)\n .options(get_joinedload())\n .where(\n User.id.in_(user_ids),\n )\n )\n\n return (await session.scalars(stmt)).all()\n\n @staticmethod\n async def find_many_nearby(\n point: Point,\n *,\n max_distance: float = NEARBY_USERS_RADIUS_METERS,\n limit: int | None = NEARBY_USERS_LIMIT,\n ) -> Sequence[User]:\n \"\"\"\n Find nearby users.\n\n Users position is determined by their home point.\n \"\"\"\n\n point_wkt = point.wkt\n\n async with DB() as session:\n stmt = (\n select(User)\n .options(get_joinedload())\n .where(\n User.home_point != null(),\n func.ST_DWithin(User.home_point, point_wkt, max_distance),\n )\n .order_by(func.ST_Distance(User.home_point, point_wkt))\n )\n\n if limit is not None:\n stmt = stmt.limit(limit)\n\n return (await session.scalars(stmt)).all()\n\n @staticmethod\n async def check_display_name_available(display_name: str) -> bool:\n \"\"\"\n Check if a display name is available.\n \"\"\"\n\n user = auth_user()\n\n if user:\n # check if the name is unchanged\n if user.display_name == display_name:\n return True\n\n # check if the name is available\n other_user = await UserRepository.find_one_by_display_name(display_name)\n return other_user is None or other_user.id == user.id\n\n else:\n # check if the name is available\n other_user = await UserRepository.find_one_by_display_name(display_name)\n return other_user is None\n\n @staticmethod\n async def check_email_available(email: str) -> bool:\n \"\"\"\n Check if an email is available.\n \"\"\"\n\n user = auth_user()\n\n if user:\n # check if the email is unchanged\n if user.email == email:\n return True\n\n # check if the email is available\n other_user = await UserRepository.find_one_by_email(email)\n return other_user is None or other_user.id == user.id\n\n else:\n # check if the email is available\n other_user = await UserRepository.find_one_by_email(email)\n return other_user is None"
},
{
"identifier": "AuthService",
"path": "src/services/auth_service.py",
"snippet": "class AuthService:\n @staticmethod\n async def authenticate(\n display_name_or_email: str,\n password: str,\n *,\n basic_request: Request | None,\n ) -> User | None:\n \"\"\"\n Authenticate a user by (display name or email) and password.\n\n If `basic_request` is provided, the password will be cached for a short time.\n\n Returns `None` if the user is not found or the password is incorrect.\n \"\"\"\n\n # TODO: normalize unicode & strip\n\n # dot in string indicates email, display name can't have a dot\n if '.' in display_name_or_email:\n try:\n email = display_name_or_email\n email = Email.validate(email)\n user = await UserRepository.find_one_by_email(email)\n except ValueError:\n user = None\n else:\n display_name = display_name_or_email\n user = await UserRepository.find_one_by_display_name(display_name)\n\n if not user:\n logging.debug('User not found %r', display_name_or_email)\n return None\n\n # fast password cache with extra entropy\n # used primarily for api basic auth user:pass which is a hot spot\n if basic_request:\n key = '\\0'.join(\n (\n SECRET,\n user.password_hashed,\n basic_request.client.host,\n basic_request.headers.get('user-agent', ''),\n password,\n )\n )\n\n async def factory() -> str:\n logging.debug('Fast password cache miss for user %r', user.id)\n ph = user.password_hasher\n ph_valid = ph.verify(user.password_hashed, user.password_salt, password)\n return 'OK' if ph_valid else ''\n\n # TODO: FAST_PASSWORD_CACHE_EXPIRE\n # TODO: expire on pass change\n cache = await CacheService.get_one_by_key(key, _CACHE_CONTEXT, factory)\n else:\n cache = None\n\n if cache:\n ph = None\n ph_valid = cache.value == 'OK'\n else:\n ph = user.password_hasher\n ph_valid = ph.verify(user.password_hashed, user.password_salt, password)\n\n if not ph_valid:\n logging.debug('Password mismatch for user %r', user.id)\n return None\n\n if ph and ph.rehash_needed:\n new_hash = ph.hash(password)\n\n async with DB() as session:\n stmt = (\n update(User)\n .where(User.id == user.id, User.password_hashed == user.password_hashed)\n .values({User.password_hashed: new_hash, User.password_salt: None})\n )\n\n await session.execute(stmt)\n\n user.password_hashed = new_hash\n user.password_salt = None\n logging.debug('Rehashed password for user %r', user.id)\n\n return user\n\n @staticmethod\n async def create_session(user_id: int) -> UserTokenStruct:\n \"\"\"\n Create a new user session token.\n \"\"\"\n\n token_b = secrets.token_bytes(32)\n token_hashed = hash_b(token_b, context=None)\n\n async with DB() as session:\n token = UserTokenSession(\n user_id=user_id,\n token_hashed=token_hashed,\n expires_at=utcnow() + USER_TOKEN_SESSION_EXPIRE,\n )\n\n session.add(token)\n\n return UserTokenStruct(token.id, token_b)\n\n @staticmethod\n async def authenticate_session(token_struct: UserTokenStruct) -> User | None:\n \"\"\"\n Authenticate a user by user session token.\n\n Returns `None` if the session is not found or the session key is incorrect.\n \"\"\"\n\n token = await UserTokenSessionRepository.find_one_by_token_struct(token_struct)\n\n if not token:\n logging.debug('Session not found %r', token_struct.id)\n return None\n\n return token.user\n\n @staticmethod\n async def authenticate_oauth(request: Request) -> tuple[User, Sequence[Scope]] | None:\n \"\"\"\n Authenticate a user by OAuth1.0 or OAuth2.0.\n\n Returns `None` if the request is not an OAuth request.\n\n Raises `OAuthError` if the request is an invalid OAuth request.\n \"\"\"\n\n authorization = request.headers.get('authorization')\n\n if not authorization:\n # oauth1 requests may use query params or body params\n oauth_version = 1\n else:\n scheme, _ = get_authorization_scheme_param(authorization)\n scheme = scheme.lower()\n\n if scheme == 'oauth':\n oauth_version = 1\n elif scheme == 'bearer':\n oauth_version = 2\n else:\n # not an OAuth request\n return None\n\n if oauth_version == 1:\n request_ = await OAuth1.convert_request(request)\n\n if not request_.signature:\n # not an OAuth request\n return None\n\n nonce = request_.oauth_params.get('oauth_nonce')\n timestamp = request_.timestamp\n await OAuth1NonceService.spend(nonce, timestamp)\n\n token = await OAuth1.parse_and_validate(request_)\n elif oauth_version == 2:\n token = await OAuth2.parse_and_validate(request)\n else:\n raise NotImplementedError(f'Unsupported OAuth version {oauth_version}')\n\n if not token.authorized_at:\n raise_for().oauth_bad_user_token()\n\n return token.user, token.scopes"
},
{
"identifier": "MailService",
"path": "src/services/mail_service.py",
"snippet": "class MailService:\n @staticmethod\n async def schedule(\n from_user: User | None,\n from_type: MailFromType,\n to_user: User,\n subject: str,\n template_name: str,\n template_data: dict,\n ref: str | None = None,\n priority: int = 0,\n ) -> None:\n \"\"\"\n Schedule a mail for later processing.\n \"\"\"\n\n # use destination user's preferred language\n with translation_context(to_user.languages):\n body = render(template_name, **template_data)\n\n async with DB() as session:\n mail = Mail(\n from_user_id=from_user.id if from_user else None,\n from_type=from_type,\n to_user_id=to_user.id,\n subject=subject,\n body=body,\n ref=ref,\n priority=priority,\n )\n\n logging.debug('Scheduling mail %r to %d with subject %r', mail.id, to_user.id, subject)\n session.add(mail)\n\n @staticmethod\n async def process_scheduled() -> None:\n \"\"\"\n Process the next scheduled mail.\n \"\"\"\n\n async with DB() as session, session.begin():\n now = utcnow()\n stmt = (\n select(Mail)\n .where(\n or_(\n Mail.processing_at == null(),\n Mail.processing_at <= now,\n )\n )\n .order_by(\n Mail.processing_counter,\n Mail.priority.desc(),\n Mail.created_at,\n )\n .with_for_update(skip_locked=True)\n .limit(1)\n )\n\n mail = await session.scalar(stmt)\n\n # nothing to do\n if not mail:\n return\n\n try:\n logging.info('Processing mail %r to %d with subject %r', mail.id, mail.to_user.id, mail.subject)\n\n with anyio.fail_after(MAIL_PROCESSING_TIMEOUT.total_seconds() - 5):\n await _send_smtp(mail)\n\n await session.delete(mail)\n\n except Exception:\n expires_at = mail.created_at + MAIL_UNPROCESSED_EXPIRE\n processing_at = now + timedelta(minutes=mail.processing_counter**MAIL_UNPROCESSED_EXPONENT)\n\n if expires_at < processing_at:\n logging.warning(\n 'Expiring unprocessed mail %r, created at: %r',\n mail.id,\n mail.created_at,\n exc_info=True,\n )\n await session.delete(mail)\n return\n\n logging.info('Requeuing unprocessed mail %r', mail.id, exc_info=True)\n mail.processing_counter += 1\n mail.processing_at = processing_at\n # TODO: redundant? await session.commit()"
},
{
"identifier": "UserTokenAccountConfirmService",
"path": "src/services/user_token_account_confirm_service.py",
"snippet": "class UserTokenAccountConfirmService:\n @staticmethod\n async def create() -> UserTokenStruct:\n \"\"\"\n Create a new user account confirmation token.\n \"\"\"\n\n token_b = secrets.token_bytes(32)\n token_hashed = hash_b(token_b, context=None)\n\n async with DB() as session:\n token = UserTokenAccountConfirm(\n user_id=auth_user().id,\n token_hashed=token_hashed,\n expires_at=utcnow() + USER_TOKEN_ACCOUNT_CONFIRM_EXPIRE,\n )\n\n session.add(token)\n\n return UserTokenStruct(token.id, token_b)\n\n @staticmethod\n async def confirm(token_struct: UserTokenStruct) -> None:\n \"\"\"\n Confirm a user account.\n \"\"\"\n\n token = await UserTokenAccountConfirmRepository.find_one_by_token_struct(token_struct)\n\n if not token:\n raise_for().bad_user_token_struct()\n\n # NOTE: potential timing attack, but the impact is negligible\n async with DB() as session, session.begin():\n stmt = (\n update(User)\n .where(\n User.id == token.user_id,\n User.status == UserStatus.pending,\n )\n .values({User.status: UserStatus.active})\n )\n\n await session.execute(stmt)\n\n stmt = delete(UserTokenAccountConfirm).where(\n UserTokenAccountConfirm.id == token_struct.id,\n )\n\n await session.execute(stmt)"
},
{
"identifier": "parse_request_ip",
"path": "src/utils.py",
"snippet": "def parse_request_ip(request: Request) -> IPv4Address | IPv6Address:\n \"\"\"\n Parse the client IP address from a `Request`.\n \"\"\"\n\n return ip_address(request.client.host)"
}
] | from fastapi import Request
from src.db import DB
from src.lib.auth import auth_user, manual_auth_context
from src.lib.email import Email
from src.lib.message_collector import MessageCollector
from src.lib.password_hash import PasswordHash
from src.lib.translation import t, translation_languages
from src.models.db.user import User
from src.models.mail_from_type import MailFromType
from src.models.msgspec.user_token_struct import UserTokenStruct
from src.models.str import EmailStr, PasswordStr, UserNameStr
from src.models.user_status import UserStatus
from src.repositories.user_repository import UserRepository
from src.services.auth_service import AuthService
from src.services.mail_service import MailService
from src.services.user_token_account_confirm_service import UserTokenAccountConfirmService
from src.utils import parse_request_ip | 7,364 |
class UserSignupService:
@staticmethod
async def signup(
request: Request,
collector: MessageCollector,
*,
display_name: UserNameStr,
email: EmailStr,
password: PasswordStr,
) -> UserTokenStruct:
"""
Create a new user.
Returns a new user session token.
"""
# some early validation
if not await UserRepository.check_display_name_available(display_name):
collector.raise_error('display_name', t('user.display_name_already_taken'))
if not await UserRepository.check_email_available(email):
collector.raise_error('email', t('user.email_already_taken'))
if not await Email.validate_dns(email):
collector.raise_error('email', t('user.invalid_email'))
# precompute values to reduce transaction time
password_hashed = PasswordHash.default().hash(password)
created_ip = parse_request_ip(request)
languages = translation_languages()
# create user
async with DB() as session:
user = User(
email=email,
display_name=display_name,
password_hashed=password_hashed,
created_ip=created_ip,
status=UserStatus.pending,
auth_provider=None, # TODO: support
auth_uid=None,
languages=languages,
)
session.add(user)
with manual_auth_context(user):
await UserSignupService.send_confirm_email()
return await AuthService.create_session(user.id)
@staticmethod
async def send_confirm_email() -> None:
"""
Send a confirmation email for the account.
"""
token = await UserTokenAccountConfirmService.create()
await MailService.schedule(
from_user=None,
|
class UserSignupService:
@staticmethod
async def signup(
request: Request,
collector: MessageCollector,
*,
display_name: UserNameStr,
email: EmailStr,
password: PasswordStr,
) -> UserTokenStruct:
"""
Create a new user.
Returns a new user session token.
"""
# some early validation
if not await UserRepository.check_display_name_available(display_name):
collector.raise_error('display_name', t('user.display_name_already_taken'))
if not await UserRepository.check_email_available(email):
collector.raise_error('email', t('user.email_already_taken'))
if not await Email.validate_dns(email):
collector.raise_error('email', t('user.invalid_email'))
# precompute values to reduce transaction time
password_hashed = PasswordHash.default().hash(password)
created_ip = parse_request_ip(request)
languages = translation_languages()
# create user
async with DB() as session:
user = User(
email=email,
display_name=display_name,
password_hashed=password_hashed,
created_ip=created_ip,
status=UserStatus.pending,
auth_provider=None, # TODO: support
auth_uid=None,
languages=languages,
)
session.add(user)
with manual_auth_context(user):
await UserSignupService.send_confirm_email()
return await AuthService.create_session(user.id)
@staticmethod
async def send_confirm_email() -> None:
"""
Send a confirmation email for the account.
"""
token = await UserTokenAccountConfirmService.create()
await MailService.schedule(
from_user=None, | from_type=MailFromType.system, | 9 | 2023-11-04 01:12:13+00:00 | 12k |
codefuse-ai/Collinear-Constrained-Attention | train/trainer/atorch_trainer.py | [
{
"identifier": "print_rank_0",
"path": "utils/common_utils.py",
"snippet": "TASK2ID = {}\nID2TASK = {}\n L = args.num_hidden_layers\n V = args.vocab_size\ndef get_rank():\ndef get_local_rank():\ndef is_main_process():\ndef is_local_main_process():\ndef print_rank_0(*message):\ndef get_world_size():\ndef wait_for_everyone():\ndef atorch_init_distributed(backend=\"nccl\"):\ndef atorch_reset_distributed():\ndef _goes_first(is_main):\ndef get_model_params_num(model):\ndef main_process_first():\ndef unwrap_model(model):\ndef honor_type(obj, generator):\ndef recursively_apply(\n func,\n data,\n *args,\n test_type=lambda t: isinstance(t, torch.Tensor),\n error_on_other_type=False,\n **kwargs,\n):\ndef gather(tensor):\n def _gpu_gather_one(tensor):\ndef save_ckpt(model, optimizer, lr_scheduler, epoch, steps, save_path, logger):\ndef scheduler_and_resume(args, train_dataloader, model, optimizer, checkpoint):\ndef get_computation_speed(batch_size_per_device, seq_len, step_time):\ndef human_readable_flops(num):\ndef get_tflops_new(args, batch_size, seq_len, step_time):\ndef get_tflops_megatron(total_model_param, hidden_size, num_hidden_layers, \n batch_size_per_device, seq_len, step_time):\ndef is_old_version(path):\ndef generate_task_id(data_paths, train_mode):\n def __init__(self, patience=7, verbose=False, delta=0):\n def __call__(self, val_loss, model):\n def save_checkpoint(self, val_loss, model):\nclass EarlyStopping:"
},
{
"identifier": "FAMO",
"path": "utils/auto_accelerate_utils.py",
"snippet": "class FAMO:\n \"\"\"\n Fast Adaptive Multitask Optimization.\n \"\"\"\n def __init__(\n self,\n n_tasks: int,\n device: torch.device,\n mode: str = 'famo_valid',\n gamma: float = 0.001, # the regularization coefficient, default: 0.001\n w_lr: float = 0.025, # the learning rate of the task logits, default: 0.025\n max_norm: float = 1.0, # the maximum gradient norm\n ):\n self.min_losses = torch.zeros(n_tasks).to(device)\n self.w = torch.tensor([0.0] * n_tasks, device=device, requires_grad=True)\n self.w_opt = torch.optim.Adam([self.w], lr=w_lr, weight_decay=gamma)\n self.max_norm = max_norm\n self.n_tasks = n_tasks\n self.device = device\n self.first_train_step = True\n self.first_valid_step = True\n self.print_loss = None\n self.mode = mode\n self.prev_train_loss = None\n self.prev_valid_loss = None\n self.ratio_valid_task_loss_prev = torch.zeros(len(ID2TASK)).to(device)\n self.global_steps = 0\n self.z = None\n \n def set_min_losses(self, losses):\n self.min_losses = losses\n\n def get_weighted_loss(self, losses):\n self.prev_train_loss = losses\n self.z = F.softmax(self.w * 1, -1)\n # if is_main_process() and (self.global_steps % 10 == 0):\n # logger.info(f\"complete_steps: {self.global_steps}, per_task_weight: {self.z}\")\n if -1e20 in self.ratio_valid_task_loss_prev and self.mode == 'famo_valid_ema':\n self.z = F.softmax(torch.where(self.ratio_valid_task_loss_prev == -1e20, -1e20, self.z), -1)\n if self.global_steps % 10 == 0:\n print_rank_0(f'ratio_valid_task_loss_prev is {self.ratio_valid_task_loss_prev}, after, z is {self.z}')\n D = losses - self.min_losses + 1e-8\n if self.mode.startswith('famo_train'):\n c = (self.z / D).sum().detach()\n loss = (D.log() * self.z / c).sum()\n else:\n loss = (D * self.z).sum()\n return loss\n\n def update(self, curr_loss):\n if self.mode.startswith('famo_valid') and self.first_valid_step:\n self.first_valid_step = False\n self.prev_valid_loss = curr_loss\n return\n if self.mode.startswith('famo_train'):\n prev_loss = self.prev_train_loss\n else:\n prev_loss = self.prev_valid_loss\n self.prev_valid_loss = curr_loss\n delta = (prev_loss - self.min_losses + 1e-8).log() - \\\n (curr_loss - self.min_losses + 1e-8).log()\n with torch.enable_grad():\n d = torch.autograd.grad(F.softmax(self.w, -1),\n self.w,\n grad_outputs=delta.detach())[0]\n self.w_opt.zero_grad()\n self.w.grad = d\n self.w_opt.step()\n\n def backward(\n self,\n losses: torch.Tensor,\n shared_parameters: Union[\n List[torch.nn.parameter.Parameter], torch.Tensor\n ] = None,\n ):\n \"\"\"\n Parameters\n ----------\n losses :\n shared_parameters :\n task_specific_parameters :\n last_shared_parameters : parameters of last shared layer/block\n Returns\n -------\n Loss, extra outputs\n \"\"\"\n loss = self.get_weighted_loss(losses=losses)\n # if self.max_norm > 0 and shared_parameters is not None:\n # torch.nn.utils.clip_grad_norm_(shared_parameters, self.max_norm)\n # loss.backward()\n return loss"
},
{
"identifier": "get_ltor_masks_and_position_ids",
"path": "utils/auto_accelerate_utils.py",
"snippet": "def get_ltor_masks_and_position_ids(data):\n \"\"\"Build masks and position id for left to right model.\"\"\"\n\n # Extract batch size and sequence length.\n batch_size, seq_length = data.size()\n\n # Attention mask (lower triangular).\n # attention_mask = get_attn_mask(\n # seq_length=seq_length,\n # device=data.device,\n # )\n attention_mask = torch.ones((batch_size, seq_length), device=data.device)\n\n # Position ids.\n position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)\n position_ids = position_ids.unsqueeze(0).expand_as(data).clone()\n\n return attention_mask, position_ids"
},
{
"identifier": "SelfPacedStatus",
"path": "utils/auto_accelerate_utils.py",
"snippet": "class SelfPacedStatus:\n def __init__(self, interval=20):\n super(SelfPacedStatus, self).__init__()\n self.complete_steps = None\n self.current_epoch = None\n self.mode = None\n self.task_loss_prev = None\n self.w = None\n self.interval = interval\n \n def update(self, complete_steps, current_epoch, mode, task_loss_prev):\n self.complete_steps = complete_steps\n self.current_epoch = current_epoch\n self.mode = mode\n self.task_loss_prev = task_loss_prev"
},
{
"identifier": "GPTNeoXLayer",
"path": "model/gpt_neox/modeling_gpt_neox.py",
"snippet": "class GPTNeoXLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_parallel_residual = config.use_parallel_residual\n self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.attention = GPTNeoXAttention(config)\n self.mlp = GPTNeoXMLP(config)\n\n def forward(\n self,\n hidden_states: Optional[torch.FloatTensor],\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = False,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n ):\n\n attention_layer_outputs = self.attention(\n self.input_layernorm(hidden_states),\n attention_mask=attention_mask,\n position_ids=position_ids,\n layer_past=layer_past,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n attn_output = attention_layer_outputs[0] # output_attn: attn_output, present, (attn_weights)\n outputs = attention_layer_outputs[1:]\n\n if self.use_parallel_residual:\n # pseudocode:\n # x = x + attn(ln1(x)) + mlp(ln2(x))\n mlp_output = self.mlp(self.post_attention_layernorm(hidden_states))\n hidden_states = mlp_output + attn_output + hidden_states\n else:\n # pseudocode:\n # x = x + attn(ln1(x))\n # x = x + mlp(ln2(x))\n attn_output = attn_output + hidden_states\n mlp_output = self.mlp(self.post_attention_layernorm(attn_output))\n hidden_states = mlp_output + attn_output\n\n if use_cache:\n outputs = (hidden_states,) + outputs # hidden_states, present, (attn_weights)\n else:\n outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights)\n\n return outputs"
},
{
"identifier": "GPTNeoXAttention",
"path": "model/gpt_neox/modeling_gpt_neox.py",
"snippet": "class GPTNeoXAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.num_attention_heads = config.num_attention_heads\n self.hidden_size = config.hidden_size\n if self.hidden_size % self.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size is not divisble by the number of attention heads! Make sure to update them\"\n )\n self.head_size = self.hidden_size // self.num_attention_heads\n self.rotary_ndims = int(self.head_size * config.rotary_pct)\n self._init_bias(config.max_position_embeddings)\n self.register_buffer(\"masked_bias\", torch.tensor(-1e9), persistent=False)\n self._init_rope()\n self.register_buffer(\n \"norm_factor\",\n torch.sqrt(torch.tensor(self.head_size, dtype=torch.float32)).to(torch.get_default_dtype()),\n persistent=False,\n )\n self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size)\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n\n def _init_bias(self, max_positions, device=None):\n self.register_buffer(\n \"bias\",\n torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(\n 1, 1, max_positions, max_positions\n ),\n persistent=False,\n )\n if device is not None:\n self.bias = self.bias.to(device)\n\n def _init_rope(self):\n if self.config.rope_scaling is None:\n self.rotary_emb = GPTNeoXRotaryEmbedding(\n self.rotary_ndims, self.config.max_position_embeddings, base=self.config.rotary_emb_base\n )\n else:\n scaling_type = self.config.rope_scaling[\"type\"]\n scaling_factor = self.config.rope_scaling[\"factor\"]\n if scaling_type == \"linear\":\n self.rotary_emb = GPTNeoXLinearScalingRotaryEmbedding(\n self.rotary_ndims,\n self.config.max_position_embeddings,\n base=self.config.rotary_emb_base,\n scaling_factor=scaling_factor,\n )\n elif scaling_type == \"dynamic\":\n self.rotary_emb = GPTNeoXDynamicNTKScalingRotaryEmbedding(\n self.rotary_ndims,\n self.config.max_position_embeddings,\n base=self.config.rotary_emb_base,\n scaling_factor=scaling_factor,\n )\n else:\n raise ValueError(f\"Unknown RoPE scaling type {scaling_type}\")\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: torch.FloatTensor,\n position_ids: torch.LongTensor,\n head_mask: Optional[torch.FloatTensor] = None,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n use_cache: Optional[bool] = False,\n output_attentions: Optional[bool] = False,\n ):\n has_layer_past = layer_past is not None\n\n # Compute QKV\n # Attention heads [batch, seq_len, hidden_size]\n # --> [batch, seq_len, (np * 3 * head_size)]\n qkv = self.query_key_value(hidden_states)\n\n # [batch, seq_len, (num_heads * 3 * head_size)]\n # --> [batch, seq_len, num_heads, 3 * head_size]\n new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size)\n qkv = qkv.view(*new_qkv_shape)\n\n # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size]\n query = qkv[..., : self.head_size].permute(0, 2, 1, 3)\n t_layer = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3)\n value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3)\n\n t_layer_1 = t_layer[..., : t_layer.shape[-1] // 2]\n t_layer_2 = t_layer[..., t_layer.shape[-1] // 2 :]\n t_layer = (t_layer_1+t_layer_2)/2\n\n t_layer = F.relu(t_layer)\n\n t_layer = torch.cat((t_layer, t_layer), dim=-1)\n\n # Compute rotary embeddings on rotary_ndims\n query_rot = query[..., : self.rotary_ndims]\n query_pass = query[..., self.rotary_ndims :]\n t_rot = t_layer[..., : self.rotary_ndims]\n t_pass = t_layer[..., self.rotary_ndims :]\n\n # Compute token offset for rotary embeddings (when decoding)\n seq_len = t_layer.shape[-2]\n if has_layer_past:\n seq_len += layer_past[0].shape[-2]\n cos, sin = self.rotary_emb(value, seq_len=seq_len)\n query_rot, t_layer = apply_rotary_pos_emb(query_rot, t_rot, cos, sin, position_ids)\n query_rot = torch.cat((query_rot, query_pass), dim=-1)\n t_layer = torch.cat((t_layer, t_pass), dim=-1)\n\n # Cache QKV values\n if has_layer_past:\n past_t = layer_past[0]\n past_value = layer_past[1]\n t_layer = torch.cat((past_t, t_layer), dim=-2)\n value = torch.cat((past_value, value), dim=-2)\n present = (t_layer, value) if use_cache else None\n\n # Compute attention\n attn_output, attn_weights = self._attn(query, t_layer, query_rot, value, attention_mask, head_mask)\n\n # Reshape outputs\n attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size)\n attn_output = self.dense(attn_output)\n\n outputs = (attn_output, present)\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n @classmethod\n def _split_heads(cls, tensor, num_attention_heads, attn_head_size):\n \"\"\"\n Splits hidden dim into attn_head_size and num_attention_heads\n \"\"\"\n # tensor: [bs, seq_len, hidden_size]\n new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)\n # -> [bs, seq_len, num_attention_heads, attn_head_size]\n tensor = tensor.view(new_shape)\n # -> [bs, num_attention_heads, seq_len, attn_head_size]\n tensor = tensor.permute(0, 2, 1, 3)\n return tensor\n\n @classmethod\n def _merge_heads(cls, tensor, num_attention_heads, attn_head_size):\n \"\"\"\n Merges attn_head_size dim and num_attn_heads dim into hidden dim\n \"\"\"\n # tensor [bs, num_attention_heads, seq_len, attn_head_size]\n tensor = tensor.permute(0, 2, 1, 3).contiguous()\n # -> [bs, seq_len, num_attention_heads, attn_head_size]\n tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size)\n # -> [bs, seq_len, hidden_size]\n return tensor\n\n def _attn(self, query, t_layer, query_rot, value, attention_mask=None, head_mask=None):\n # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size]\n # compute causal mask from causal mask buffer\n batch_size, num_attention_heads, query_length, attn_head_size = query.size()\n key_length = t_layer.size(-2)\n\n # dynamically increase the causal mask with the key length, if needed.\n if key_length > self.bias.shape[-1]:\n self._init_bias(key_length, device=t_layer.device)\n causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]\n\n # query = query.view(batch_size * num_attention_heads, query_length, attn_head_size)\n # key = key.view(batch_size * num_attention_heads, key_length, attn_head_size)\n # attn_scores = torch.zeros(\n # batch_size * num_attention_heads,\n # query_length,\n # key_length,\n # dtype=query.dtype,\n # device=key.device,\n # )\n # attn_scores = torch.baddbmm(\n # attn_scores,\n # query,\n # key.transpose(1, 2),\n # beta=1.0,\n # alpha=(torch.tensor(1.0, dtype=self.norm_factor.dtype, device=self.norm_factor.device) / self.norm_factor),\n # )\n # attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length)\n\n # print(query.shape)\n # print(t_layer.shape)\n # print(query_rot.shape)\n\n attn_scores = contract(\n # 'nbpd,sbpd,nbpd->bpns',\n 'bpnd,bpsd,bpnd->bpns',\n query, # [sq, b, np, hn] [b,np,sq,hn]\n t_layer, #[sk, b, np, hn] [b,np,sk,hn]\n query_rot, # [sq, b, np, hn] [b,np,sq,hn]\n backend='torch'\n ) / self.norm_factor\n\n mask_value = torch.finfo(attn_scores.dtype).min\n # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.\n # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`\n mask_value = torch.tensor(mask_value, dtype=attn_scores.dtype).to(attn_scores.device)\n attn_scores = torch.where(causal_mask, attn_scores, mask_value)\n\n if attention_mask is not None:\n # Apply the attention mask\n attn_scores = attn_scores + attention_mask\n\n attn_weights = nn.functional.softmax(attn_scores, dim=-1)\n attn_weights = attn_weights.to(value.dtype)\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n attn_output = torch.matmul(attn_weights, value)\n return attn_output, attn_weights"
},
{
"identifier": "GPTNeoXMLP",
"path": "model/gpt_neox/modeling_gpt_neox.py",
"snippet": "class GPTNeoXMLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size)\n self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size)\n self.act = ACT2FN[config.hidden_act]\n\n def forward(self, hidden_states):\n hidden_states = self.dense_h_to_4h(hidden_states)\n hidden_states = self.act(hidden_states)\n hidden_states = self.dense_4h_to_h(hidden_states)\n return hidden_states"
},
{
"identifier": "LlamaDecoderLayer",
"path": "model/llama/modeling_llama.py",
"snippet": "class LlamaDecoderLayer(nn.Module):\n def __init__(self, config: LlamaConfig):\n super().__init__()\n self.hidden_size = config.hidden_size\n self.self_attn = LlamaAttention(config=config)\n self.mlp = LlamaMLP(config)\n self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = False,\n ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n \"\"\"\n Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n \"\"\"\n\n residual = hidden_states\n\n hidden_states = self.input_layernorm(hidden_states)\n\n # Self Attention\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n use_cache=use_cache,\n )\n hidden_states = residual + hidden_states\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.post_attention_layernorm(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = residual + hidden_states\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (self_attn_weights,)\n\n if use_cache:\n outputs += (present_key_value,)\n\n return outputs"
},
{
"identifier": "LlamaAttention",
"path": "model/llama/modeling_llama.py",
"snippet": "class LlamaAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(self, config: LlamaConfig):\n super().__init__()\n self.config = config\n self.hidden_size = config.hidden_size\n self.num_heads = config.num_attention_heads\n self.head_dim = self.hidden_size // self.num_heads\n self.num_key_value_heads = config.num_key_value_heads\n self.num_key_value_groups = self.num_heads // self.num_key_value_heads\n self.max_position_embeddings = config.max_position_embeddings\n\n #20230803 T需要保持非负\n self.relu = ACT2FN['relu']\n\n if (self.head_dim * self.num_heads) != self.hidden_size:\n raise ValueError(\n f\"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}\"\n f\" and `num_heads`: {self.num_heads}).\"\n )\n self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)\n #20230803 K改为T\n self.t_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)\n # self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)\n self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)\n self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)\n self._init_rope()\n\n def _init_rope(self):\n if self.config.rope_scaling is None:\n self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)\n else:\n scaling_type = self.config.rope_scaling[\"type\"]\n scaling_factor = self.config.rope_scaling[\"factor\"]\n if scaling_type == \"linear\":\n self.rotary_emb = LlamaLinearScalingRotaryEmbedding(\n self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor\n )\n elif scaling_type == \"dynamic\":\n self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(\n self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor\n )\n else:\n raise ValueError(f\"Unknown RoPE scaling type {scaling_type}\")\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n bsz, q_len, _ = hidden_states.size()\n\n # todo tp>1\n if self.config.pretraining_tp > 1:\n key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp\n query_slices = self.q_proj.weight.split(\n (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0\n )\n key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)\n value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)\n\n query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]\n query_states = torch.cat(query_states, dim=-1)\n\n key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]\n key_states = torch.cat(key_states, dim=-1)\n\n value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]\n value_states = torch.cat(value_states, dim=-1)\n\n else:\n query_states = self.q_proj(hidden_states)\n #20230803 K改为T\n t_states = self.t_proj(hidden_states)\n # key_states = self.k_proj(hidden_states)\n value_states = self.v_proj(hidden_states)\n\n query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)\n\n #20230803 T的定义\n t_states = t_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n t_states_1 = t_states[..., : t_states.shape[-1] // 2]\n t_states_2 = t_states[..., t_states.shape[-1] // 2 :]\n t_states = (t_states_1+t_states_2)/2\n t_states = F.relu(t_states)\n t_states = torch.cat((t_states, t_states), dim=-1)\n\n # key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n\n kv_seq_len = t_states.shape[-2]\n if past_key_value is not None:\n kv_seq_len += past_key_value[0].shape[-2]\n cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)\n query_rot, t_states = apply_rotary_pos_emb(query_states, t_states, cos, sin, position_ids)\n\n if past_key_value is not None:\n # reuse k, v, self_attention\n t_states = torch.cat([past_key_value[0], t_states], dim=2)\n value_states = torch.cat([past_key_value[1], value_states], dim=2)\n\n past_key_value = (t_states, value_states) if use_cache else None\n\n # repeat k/v heads if n_kv_heads < n_heads\n t_states = repeat_kv(t_states, self.num_key_value_groups)\n value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n attn_weights = contract(\n 'bpnd,bpsd,bpnd->bpns',\n query_states, # [b,p,sq,d]\n t_states, # [b,p,sk,d]\n query_rot, # [b,p,sq,d]\n backend='torch'\n ) / math.sqrt(self.head_dim)\n # attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)\n\n if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):\n raise ValueError(\n f\"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is\"\n f\" {attn_weights.size()}\"\n )\n\n if attention_mask is not None:\n if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}\"\n )\n attn_weights = attn_weights + attention_mask\n\n # upcast attention to fp32\n attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)\n attn_output = torch.matmul(attn_weights, value_states)\n\n if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is\"\n f\" {attn_output.size()}\"\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous()\n attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)\n\n if self.config.pretraining_tp > 1:\n attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)\n o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)\n attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])\n else:\n attn_output = self.o_proj(attn_output)\n\n if not output_attentions:\n attn_weights = None\n\n return attn_output, attn_weights, past_key_value"
},
{
"identifier": "LlamaMLP",
"path": "model/llama/modeling_llama.py",
"snippet": "class LlamaMLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.hidden_size = config.hidden_size\n self.intermediate_size = config.intermediate_size\n self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)\n self.act_fn = ACT2FN[config.hidden_act]\n\n def forward(self, x):\n if self.config.pretraining_tp > 1:\n slice = self.intermediate_size // self.config.pretraining_tp\n gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)\n up_proj_slices = self.up_proj.weight.split(slice, dim=0)\n down_proj_slices = self.down_proj.weight.split(slice, dim=1)\n\n gate_proj = torch.cat(\n [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1\n )\n up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)\n\n intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)\n down_proj = [\n F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)\n ]\n down_proj = sum(down_proj)\n else:\n down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))\n\n return down_proj"
},
{
"identifier": "PeftModel",
"path": "model/peft/modeling_peft.py",
"snippet": "class AntPeftForCausalLM(PeftModelForCausalLM):\nclass AntPeftForEmbedding(PeftModel):\n def __init__(self, model, peft_config: PeftConfig, adapter_name: str = \"default\"):\n def set_route_id(self, route_id: int):\n def expand_external_router(self, path: str):\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n route_id: int = 0,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n def save_pretrained(self, save_directory, **kwargs):\n def load_adapter(self, model_id, adapter_name, is_trainable=False, **kwargs):\n def from_pretrained(\n cls,\n model,\n model_id: str,\n adapter_name: str = \"default\",\n is_trainable: bool = False,\n resume_from_checkpoint: bool = False,\n **kwargs\n ):\n def __init__(self, model, peft_config: PeftConfig, adapter_name: str = \"default\"):\n def set_route_id(self, route_id: int):\n def expand_external_router(self, path: str):\n def forward(\n self,\n query_ids: torch.Tensor,\n query_position_ids: torch.Tensor = None,\n query_attention_mask: torch.Tensor = None,\n query_mask: torch.Tensor = None,\n passage_ids: torch.Tensor = None,\n passage_position_ids: torch.Tensor = None,\n passage_attention_mask: torch.Tensor = None,\n passage_mask: torch.Tensor = None,\n route_id: int = 0,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n def save_pretrained(self, save_directory, **kwargs):\n def load_adapter(self, model_id, adapter_name, is_trainable=False, **kwargs):"
}
] | import datetime
import json
import logging
import math
import os
import random
import re
import shutil
import time
import warnings
import gc
import numpy as np
import atorch
import torch
from functools import partial
from pathlib import Path
from deepspeed.ops.adam import DeepSpeedCPUAdam
from torch.distributed.fsdp import FullStateDictConfig
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import StateDictType
from torch.optim.lr_scheduler import LambdaLR, CosineAnnealingLR, CosineAnnealingWarmRestarts
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from tqdm.auto import tqdm
from transformers import get_scheduler as get_scheduler_trans
from transformers.modeling_utils import PreTrainedModel, unwrap_model
from transformers.trainer import (
OPTIMIZER_NAME,
SCHEDULER_NAME,
TRAINER_STATE_NAME,
TRAINING_ARGS_NAME
)
from transformers.trainer_pt_utils import reissue_pt_warnings
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
)
from transformers.utils import WEIGHTS_NAME
from torch.nn import CrossEntropyLoss
from utils.common_utils import print_rank_0, get_tflops_megatron, get_computation_speed, TASK2ID, ID2TASK, EarlyStopping, logger
from utils.auto_accelerate_utils import FAMO, get_ltor_masks_and_position_ids, SelfPacedStatus
from atorch.auto import auto_accelerate
from atorch.utils.version import torch_version
from model.gpt_neox.modeling_gpt_neox import GPTNeoXLayer, GPTNeoXAttention, GPTNeoXMLP
from model.llama.modeling_llama import LlamaDecoderLayer, LlamaAttention, LlamaMLP
from model.glm.modeling_glm import GLMBlock
from torch.cuda.amp import GradScaler
from apex.optimizers import FusedSGD
from model.peft.modeling_peft import PeftModel | 9,788 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HYPER_PARAMETER_NAME = 'hyper_parameters.json'
ATORCH_CHECKPOINT_NAME = 'atorch_checkpoint.bin'
EPOCH_CHECKPOINT_NAME = 'epoch'
FAMO_CHECKPOINT_NAME = 'famo_checkpoint'
EMA_CHECKPOINT_NAME = 'ema_checkpoint'
# logger = logging.getLogger(__name__)
def is_local_main_process():
return atorch.local_rank() == 0
def is_global_main_process():
return atorch.rank() == 0
def has_inf_or_nan(x):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
def count_model_params(model):
trainable_params = 0
all_params = 0
for param in model.parameters():
num_params = param.numel()
all_params += num_params
if param.requires_grad:
trainable_params += num_params
return all_params, trainable_params
class AtorchArguments:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def get_linear_schedule_with_log_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
def lr_lambda(current_step: int):
inverse_log_warm_up = 1.0 / math.log(num_warmup_steps)
if current_step == 0:
return 0.0
if current_step < num_warmup_steps:
return inverse_log_warm_up * math.log(current_step)
return max(
0.0, float(num_training_steps - current_step) /
float(max(1, num_training_steps - num_warmup_steps))
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_scheduler(name, optimizer, num_warmup_steps, num_training_steps):
scheduler_map = {
'log_warmup_linear_decay': get_linear_schedule_with_log_warmup}
try:
lr_scheduler = get_scheduler_trans(
name, optimizer, num_warmup_steps, num_training_steps)
return lr_scheduler
except Exception:
schedule_func = scheduler_map[name]
return schedule_func(optimizer, num_warmup_steps, num_training_steps)
class AtorchTrainer:
def __init__(self,
model,
args,
train_dataset,
valid_dataset,
tokenizer=None,
callbacks=None,
no_save_atorch_checkpoint=None,
save_pytorch_model_bin_checkpoint=True,
train_peft=False,
rank=0,
max_shard_size='10GB',
files_to_save=None,
args_to_save=None,
data_collator=None,
my_loss_func=None,
**kwargs,
):
self.args = args
self.TASK2ID = TASK2ID
| #!/usr/bin/env python
# coding=utf-8
# Copyright (c) 2023 Ant Group. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HYPER_PARAMETER_NAME = 'hyper_parameters.json'
ATORCH_CHECKPOINT_NAME = 'atorch_checkpoint.bin'
EPOCH_CHECKPOINT_NAME = 'epoch'
FAMO_CHECKPOINT_NAME = 'famo_checkpoint'
EMA_CHECKPOINT_NAME = 'ema_checkpoint'
# logger = logging.getLogger(__name__)
def is_local_main_process():
return atorch.local_rank() == 0
def is_global_main_process():
return atorch.rank() == 0
def has_inf_or_nan(x):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
def count_model_params(model):
trainable_params = 0
all_params = 0
for param in model.parameters():
num_params = param.numel()
all_params += num_params
if param.requires_grad:
trainable_params += num_params
return all_params, trainable_params
class AtorchArguments:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def get_linear_schedule_with_log_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
def lr_lambda(current_step: int):
inverse_log_warm_up = 1.0 / math.log(num_warmup_steps)
if current_step == 0:
return 0.0
if current_step < num_warmup_steps:
return inverse_log_warm_up * math.log(current_step)
return max(
0.0, float(num_training_steps - current_step) /
float(max(1, num_training_steps - num_warmup_steps))
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_scheduler(name, optimizer, num_warmup_steps, num_training_steps):
scheduler_map = {
'log_warmup_linear_decay': get_linear_schedule_with_log_warmup}
try:
lr_scheduler = get_scheduler_trans(
name, optimizer, num_warmup_steps, num_training_steps)
return lr_scheduler
except Exception:
schedule_func = scheduler_map[name]
return schedule_func(optimizer, num_warmup_steps, num_training_steps)
class AtorchTrainer:
def __init__(self,
model,
args,
train_dataset,
valid_dataset,
tokenizer=None,
callbacks=None,
no_save_atorch_checkpoint=None,
save_pytorch_model_bin_checkpoint=True,
train_peft=False,
rank=0,
max_shard_size='10GB',
files_to_save=None,
args_to_save=None,
data_collator=None,
my_loss_func=None,
**kwargs,
):
self.args = args
self.TASK2ID = TASK2ID | self.ID2TASK = ID2TASK | 0 | 2023-11-02 01:37:01+00:00 | 12k |
bytedance/cryostar | projects/star/train_density.py | [
{
"identifier": "StarfileDataSet",
"path": "cryostar/utils/dataio.py",
"snippet": "class StarfileDataSet(Dataset):\n\n def __init__(self, cfg: StarfileDatasetConfig):\n super().__init__()\n self.cfg = cfg\n self.df = starfile.read(Path(cfg.starfile_path))\n\n if \"optics\" in self.df:\n optics_df = self.df[\"optics\"]\n particles_df = self.df[\"particles\"]\n else:\n optics_df = None\n particles_df = self.df\n self.particles_df = particles_df\n\n if cfg.apix is None:\n if optics_df is not None and \"rlnImagePixelSize\" in optics_df:\n self.apix = float(optics_df[\"rlnImagePixelSize\"][0])\n print(f\"Infer dataset apix={self.apix} from first optic group.\")\n elif \"rlnDetectorPixelSize\" in particles_df and \"rlnMagnification\" in particles_df:\n self.apix = float(particles_df[\"rlnDetectorPixelSize\"][0] / particles_df[\"rlnMagnification\"][0] * 1e4)\n print(f\"Infer dataset apix={self.apix} from first particle meta data.\")\n else:\n raise AttributeError(\"Cannot parse apix from starfile, please set it in config by hand.\")\n else:\n self.apix = cfg.apix\n\n if cfg.side_shape is None:\n tmp_mrc_path = osp.join(cfg.dataset_dir, particles_df[\"rlnImageName\"][0].split('@')[-1])\n with mrcfile.mmap(tmp_mrc_path, mode=\"r\", permissive=True) as m:\n self.side_shape = m.data.shape[-1]\n print(f\"Infer dataset side_shape={self.side_shape} from the 1st particle.\")\n else:\n self.side_shape = cfg.side_shape\n\n self.num_proj = len(particles_df)\n\n self.down_side_shape = self.side_shape\n if cfg.down_side_shape is not None:\n self.down_side_shape = cfg.down_side_shape\n\n if cfg.mask_rad is not None:\n self.mask = Mask(self.down_side_shape, cfg.mask_rad)\n\n self.f_mu = None\n self.f_std = None\n\n def __len__(self):\n return self.num_proj\n\n def estimate_normalization(self):\n if self.f_mu is None and self.f_std is None:\n f_sub_data = []\n # I have checked that the standard deviation of 10/100/1000 particles is similar\n for i in range(0, len(self), len(self) // 100):\n f_sub_data.append(self[i][\"fproj\"])\n f_sub_data = torch.cat(f_sub_data, dim=0)\n # self.f_mu = torch.mean(f_sub_data)\n self.f_mu = 0.0 # just follow cryodrgn\n self.f_std = torch.std(f_sub_data).item()\n else:\n raise Exception(\"The normalization factor has been estimated!\")\n\n def __getitem__(self, idx):\n item_row = self.particles_df.iloc[idx]\n try:\n img_name_raw = item_row[\"rlnImageName\"]\n in_mrc_idx, img_name = item_row[\"rlnImageName\"].split(\"@\")\n in_mrc_idx = int(in_mrc_idx) - 1\n mrc_path = osp.join(self.cfg.dataset_dir, img_name)\n with mrcfile.mmap(mrc_path, mode=\"r\", permissive=True) as mrc:\n if mrc.data.ndim > 2:\n proj = torch.from_numpy(np.array(mrc.data[in_mrc_idx])).float() * self.cfg.scale_images\n else:\n # the mrcs file can contain only one particle\n proj = torch.from_numpy(np.array(mrc.data)).float() * self.cfg.scale_images\n\n # get (1, side_shape, side_shape) proj\n if len(proj.shape) == 2:\n proj = proj[None, :, :] # add a dummy channel (for consistency w/ img fmt)\n else:\n assert len(proj.shape) == 3 and proj.shape[0] == 1 # some starfile already have a dummy channel\n\n # down-sample\n if self.down_side_shape != self.side_shape:\n if self.cfg.down_method == \"interp\":\n proj = tvf.resize(proj, [self.down_side_shape, ] * 2, antialias=True)\n elif self.cfg.down_method == \"fft\":\n proj = downsample_2d(proj[0, :, :], self.down_side_shape)[None, :, :]\n else:\n raise NotImplementedError\n\n if self.cfg.mask_rad is not None:\n proj = self.mask(proj)\n\n except Exception as e:\n print(f\"WARNING: Particle image {img_name_raw} invalid! Setting to zeros.\")\n print(e)\n proj = torch.zeros(1, self.down_side_shape, self.down_side_shape)\n\n if self.cfg.power_images != 1.0:\n proj *= self.cfg.power_images\n\n # Generate CTF from CTF paramaters\n defocusU = torch.from_numpy(np.array(item_row[\"rlnDefocusU\"] / 1e4, ndmin=2)).float()\n defocusV = torch.from_numpy(np.array(item_row[\"rlnDefocusV\"] / 1e4, ndmin=2)).float()\n angleAstigmatism = torch.from_numpy(np.radians(np.array(item_row[\"rlnDefocusAngle\"], ndmin=2))).float()\n\n # Read \"GT\" orientations\n if self.cfg.ignore_rots:\n rotmat = torch.eye(3).float()\n else:\n # yapf: disable\n rotmat = torch.from_numpy(euler_angles2matrix(\n np.radians(-item_row[\"rlnAngleRot\"]),\n # np.radians(particle[\"rlnAngleTilt\"]) * (-1 if self.cfg.invert_hand else 1),\n np.radians(-item_row[\"rlnAngleTilt\"]),\n np.radians(-item_row[\"rlnAnglePsi\"]))\n ).float()\n # yapf: enable\n\n # Read \"GT\" shifts\n if self.cfg.ignore_trans:\n shiftX = torch.tensor([0.])\n shiftY = torch.tensor([0.])\n else:\n # support early starfile formats\n # Particle translations used to be in pixels (rlnOriginX and rlnOriginY) but this changed to Angstroms\n # (rlnOriginXAngstrom and rlnOriginYAngstrom) in relion 3.1.\n # https://relion.readthedocs.io/en/release-3.1/Reference/Conventions.html\n if \"rlnOriginXAngst\" in item_row:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginXAngst\"], dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginYAngst\"], dtype=np.float32))\n else:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginX\"] * self.apix, dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginY\"] * self.apix, dtype=np.float32))\n\n fproj = primal_to_fourier_2d(proj)\n\n if self.f_mu is not None:\n fproj = (fproj - self.f_mu) / self.f_std\n proj = fourier_to_primal_2d(fproj).real\n\n in_dict = {\n \"proj\": proj,\n \"rotmat\": rotmat,\n \"defocusU\": defocusU,\n \"defocusV\": defocusV,\n \"shiftX\": shiftX,\n \"shiftY\": shiftY,\n \"angleAstigmatism\": angleAstigmatism,\n \"idx\": torch.tensor(idx, dtype=torch.long),\n \"fproj\": fproj,\n \"imgname_raw\": img_name_raw\n }\n\n if \"rlnClassNumber\" in item_row:\n in_dict[\"class_id\"] = item_row[\"rlnClassNumber\"]\n\n return in_dict"
},
{
"identifier": "StarfileDatasetConfig",
"path": "cryostar/utils/dataio.py",
"snippet": "class StarfileDatasetConfig:\n dataset_dir: str\n starfile_path: str\n # if is not specified, the following apix, and side_shape will be inferred from starfile\n apix: float = None\n side_shape: int = None\n # down-sample the original image or not\n down_side_shape: int = None\n down_method: str = \"interp\"\n # apply a circular mask on input image or not\n mask_rad: float = None\n # change image values\n scale_images: float = 1.0\n power_images: float = field(\n default=1.0,\n metadata={\"help\": \"Change the power of the signal by multiplying a constant number.\"})\n # ignore pose from starfile or not\n ignore_trans: bool = False\n ignore_rots: bool = False\n # invert_hand: bool = field(\n # default=False,\n # metadata={\"help\": \"Invert handedness when reading relion data.\"})"
},
{
"identifier": "ImplicitFourierVolume",
"path": "cryostar/nerf/volume_utils.py",
"snippet": "class ImplicitFourierVolume(nn.Module):\n\n def __init__(self, z_dim, img_sz, mask_rad, params_implicit):\n \"\"\"\n Initialization of an implicit representation of the volume in Fourier space.\n\n Parameters\n ----------\n img_sz: int\n params_implicit: dictionary\n \"\"\"\n super().__init__()\n self.img_sz = img_sz\n self.z_dim = z_dim\n\n lincoords = torch.linspace(-1., 1., self.img_sz)\n [X, Y] = torch.meshgrid([lincoords, lincoords], indexing=\"ij\")\n coords = torch.stack([Y, X, torch.zeros_like(X)], dim=-1)\n coords = shift_coords(coords, 1., 1., 0, img_sz, img_sz, 1)\n self.register_buffer('plane_coords', coords.reshape(-1, 3))\n\n self.mask_rad = mask_rad\n if self.mask_rad != 1:\n mask = create_circular_mask(img_sz, img_sz, None, self.mask_rad / 2 * img_sz)\n plane_window_mask = torch.from_numpy(mask).reshape(-1)\n self.register_buffer('plane_window_mask', plane_window_mask)\n sphere_mask = torch.from_numpy(\n create_sphere_mask(self.img_sz, self.img_sz, self.img_sz, radius=self.mask_rad / 2 * self.img_sz)\n )\n self.register_buffer(\"sphere_mask\", sphere_mask)\n\n lincoords = torch.linspace(-1., 1., self.img_sz)\n [X, Y, Z] = torch.meshgrid([lincoords, lincoords, lincoords], indexing=\"ij\")\n coords = torch.stack([Z, Y, X], dim=-1)\n coords = shift_coords(coords, 1., 1., 1., img_sz, img_sz, img_sz)\n self.register_buffer('coords_3d', coords.reshape(-1, 3))\n\n self.fvol = FourierNet(net_type=params_implicit[\"net_type\"],\n z_dim=z_dim,\n pe_dim=params_implicit[\"pe_dim\"],\n pe_type=params_implicit[\"pe_type\"],\n D=params_implicit[\"D\"],\n hidden_dim=params_implicit[\"hidden\"],\n force_symmetry=params_implicit['force_symmetry'])\n\n def forward(self, z, rotmat):\n \"\"\"\n Generates a slice in Fourier space from a rotation matrix.\n\n Parameters\n ----------\n rotmat: torch.Tensor (B, 3, 3)\n\n Returns\n -------\n fplane: torch.Tensor (B, 1, img_sz, img_sz) (complex)\n \"\"\"\n if self.z_dim == 0:\n assert z is None\n batch_sz = rotmat.shape[0]\n\n with torch.autocast(\"cuda\", enabled=False):\n assert self.plane_coords.dtype == torch.float32\n assert rotmat.dtype == torch.float32\n rot_plane_coords = torch.bmm(self.plane_coords.repeat(batch_sz, 1, 1), rotmat) # B, img_sz^2, 3\n\n if self.mask_rad != 1:\n coords_mask = einops.repeat(self.plane_window_mask, \"num_coords -> bsz num_coords c3\", bsz=batch_sz, c3=3)\n rot_plane_coords = rot_plane_coords[coords_mask].reshape(batch_sz, -1, 3) # B, mask_num, 3\n\n fplane = self.fvol(z, rot_plane_coords) # B, _, 1/2\n\n if self.mask_rad != 1:\n unmask_fplane = fplane.new_zeros(batch_sz, self.img_sz * self.img_sz, self.fvol.out_features)\n value_mask = einops.repeat(self.plane_window_mask, \"num_coords -> bsz num_coords c\", bsz=batch_sz, c=self.fvol.out_features)\n unmask_fplane[value_mask] = fplane.reshape(-1)\n fplane = unmask_fplane.reshape(batch_sz, self.img_sz, self.img_sz, self.fvol.out_features)\n else:\n fplane = fplane.reshape(batch_sz, self.img_sz, self.img_sz, self.fvol.out_features)\n\n if self.fvol.out_features == 2:\n fplane = torch.view_as_complex(fplane) # B, img_sz, img_sz\n else:\n fplane = batch_hartley_to_fourier_2d(fplane.squeeze(-1)) # B, img_sz, img_sz\n\n fplane = fplane[:, None, :, :]\n return fplane\n\n def make_volume(self, z):\n with torch.no_grad():\n with torch.autocast(\"cuda\", enabled=False):\n coords = self.coords_3d.unsqueeze(0)\n num_coords = coords.shape[1]\n chunk_size = 128**2 * 32\n exp_fvol = []\n for sid in range(0, num_coords, chunk_size):\n eid = sid + chunk_size\n exp_fvol.append(self.fvol(z, coords[:, sid:eid]))\n exp_fvol = torch.cat(exp_fvol, dim=1)\n if self.fvol.out_features == 2:\n exp_fvol = exp_fvol.reshape(self.img_sz, self.img_sz, self.img_sz, 2)\n exp_fvol = torch.view_as_complex(exp_fvol)\n else:\n exp_fvol = exp_fvol.reshape(self.img_sz, self.img_sz, self.img_sz)\n exp_fvol = hartley_to_fourier_3d(exp_fvol)\n\n exp_fvol[~self.sphere_mask] = 0.0\n exp_vol = fourier_to_primal_3d(exp_fvol).real\n return exp_vol"
},
{
"identifier": "SpatialGridTranslate",
"path": "cryostar/utils/transforms.py",
"snippet": "class SpatialGridTranslate(torch.nn.Module):\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgrid([\n torch.linspace(-1.0, 1.0, self.D, device=device),\n torch.linspace(-1.0, 1.0, self.D, device=device)],\n indexing=\"ij\"), dim=-1).reshape(-1, 2)\n # yapf: enable\n self.register_buffer(\"coords\", coords)\n\n def transform(self, images: torch.Tensor, trans: torch.Tensor):\n \"\"\"\n The `images` are stored in `YX` mode, so the `trans` is also `YX`!\n\n Supposing that D is 96, a point is at 0.0:\n - adding 48 should move it to the right corner which is 1.0\n 1.0 = 0.0 + 48 / (96 / 2)\n - adding 96(>48) should leave it at 0.0\n 0.0 = 0.0 + 96 / (96 / 2) - 2.0\n - adding -96(<48) should leave it at 0.0\n 0.0 = 0.0 - 96 / (96 / 2) + 2.0\n\n Input:\n images: (B, NY, NX)\n trans: (B, T, 2)\n\n Returns:\n images: (B, T, NY, NX)\n \"\"\"\n B, NY, NX = images.shape\n assert self.D == NY == NX\n assert images.shape[0] == trans.shape[0]\n\n grid = einops.rearrange(self.coords, \"N C2 -> 1 1 N C2\") - \\\n einops.rearrange(trans, \"B T C2 -> B T 1 C2\") * 2 / self.D\n grid = grid.flip(-1) # convert the first axis from slow-axis to fast-axis\n grid[grid >= 1] -= 2\n grid[grid <= -1] += 2\n grid.clamp_(-1.0, 1.0)\n\n sampled = F.grid_sample(einops.rearrange(images, \"B NY NX -> B 1 NY NX\"), grid, align_corners=True)\n\n sampled = einops.rearrange(sampled, \"B 1 T (NY NX) -> B T NY NX\", NX=NX, NY=NY)\n return sampled"
},
{
"identifier": "FourierGridTranslate",
"path": "cryostar/utils/transforms.py",
"snippet": "class FourierGridTranslate(torch.nn.Module):\n \"\"\"\n DFT's translation is:\n `f(x - x0, y - y0) <=> F(u, v) exp(-2 j \\pi (x0 u + y0 v) / N )`\n where `x, y, u, v` all have a range of `N`, so `(x0 u + y0 v) / N \\in (0, N)`\n\n Here we initialize the `u, v` coordinates between `(-0.5, 0.5)` so that the \n range is 1, where the `1/N` term can be ignored.\n\n See also: https://dsp.stackexchange.com/questions/40228/translation-property-of-2-d-discrete-fourier-transform\n\n Important notes:\n If `N=4`, the coordinates u will be `[-0.5, -0.17, 0.17, 0.5]`, but the \n `fft`ed image's frequency is `[-0.50, -0.25, 0.00, 0.25]`, so we have to \n add some corrections:\n - right-shift `u` to be `[-0.50, -0.25, 0.00, 0.25]`\n - perform multiplication\n\n \"\"\"\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgrid([\n torch.linspace(-1.0, 1.0, self.D, device=device),\n torch.linspace(-1.0, 1.0, self.D, device=device)],\n indexing=\"ij\"), dim=-1).reshape(-1, 2) / 2\n # yapf: enable\n coords = shift_coords(coords, 0.5, 0.5, None, self.D, self.D, None, False)\n self.register_buffer(\"coords\", coords)\n\n def transform(self, images: torch.Tensor, trans: torch.Tensor):\n \"\"\"\n The `images` are stored in `YX` mode, so the `trans` is also `YX`!\n\n Input:\n images: (B, NY, NX)\n trans: (B, T, 2)\n\n Returns:\n images: (B, T, NY, NX)\n \"\"\"\n B, NY, NX = images.shape\n assert self.D == NY == NX\n assert images.shape[0] == trans.shape[0]\n images = einops.rearrange(images, \"B NY NX -> B 1 (NY NX)\")\n delta = trans @ self.coords.t() * -2j * torch.pi\n images_trans = torch.exp(delta) * images\n images_trans = einops.rearrange(images_trans, \"B T (NY NX) -> B T NY NX\", NY=self.D, NX=self.D)\n return images_trans"
},
{
"identifier": "CTFRelion",
"path": "cryostar/utils/ctf_utils.py",
"snippet": "class CTFRelion(CTFBase):\n \"\"\"\n BUG: There are two bugs in this file:\n 1. `self.angleFrequency` has some error for even-sized grid.\n 2. `local_defocus` in `get_ctf()` has some error, `angleAstigmatism` should be\n replaced with `defocusU - defocusV`.\n\n The bugs will not affect real-world data too much. But you may encounter some issues\n on simulated datasets. Use CTFCryoDRGN instead.\n \"\"\"\n\n def __init__(self,\n size=257,\n resolution=0.8,\n kV=300.0,\n valueNyquist=1.,\n defocusU=1.,\n defocusV=1.,\n angleAstigmatism=0.,\n cs=2.7,\n phasePlate=0.,\n amplitudeContrast=.1,\n bFactor=0.,\n num_particles=500,\n requires_grad=False,\n precompute=False,\n flip_images=False):\n super(CTFRelion, self).__init__(resolution, num_particles, requires_grad)\n self.requires_grad = requires_grad\n self.flip_images = flip_images\n\n self.size = size # in pixel\n self.resolution = resolution # in angstrom\n self.kV = kV # in kilovolt\n\n self.valueNyquist = valueNyquist\n self.phasePlate = phasePlate / 180. * np.pi # in radians (converted from degrees)\n self.amplitudeContrast = amplitudeContrast\n self.bFactor = bFactor\n\n self.frequency = 1. / self.resolution\n\n self.wavelength = self._get_ewavelength(self.kV * 1e3) # input in V (so we convert kv*1e3)\n\n angleAstigmatism = angleAstigmatism / 180. * np.pi # input in degree converted in radian\n cs = cs * 1e7 # input in mm converted in angstrom\n # the angleAstigmatism, defocusU, defocusV and cs are nn.Parameter of size (N, 1, 1)\n self.angleAstigmatism = nn.Parameter(angleAstigmatism * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.cs = nn.Parameter(cs * torch.ones((num_particles, 1, 1), dtype=torch.float32), requires_grad=requires_grad)\n self.defocusU = nn.Parameter(defocusU * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.defocusV = nn.Parameter(defocusV * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n\n self.precomputed_filters = precompute\n\n ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n self.register_buffer(\"r2\", mx**2 + my**2)\n self.register_buffer(\"r\", torch.sqrt(self.r2))\n self.register_buffer(\"angleFrequency\", torch.atan2(my, mx))\n\n if not self.requires_grad and self.precomputed_filters:\n print(\"Precomputing hFourier in CTF\")\n self.register_buffer('hFourier', self.get_ctf(torch.arange(num_particles), num_particles))\n\n def _get_ewavelength(self, U):\n # assumes V as input, returns wavelength in angstrom\n h = scipy.constants.h\n e = scipy.constants.e\n c = scipy.constants.c\n m0 = scipy.constants.m_e\n\n return h / math.sqrt(2. * m0 * e * U) / math.sqrt(1 + e * U / (2 * m0 * c**2)) * 1e10\n\n def get_ctf(self, idcs, B, cpu_params={}, frequency_marcher=None):\n defocusU = self.defocusU[idcs, :, :]\n defocusV = self.defocusV[idcs, :, :]\n angleAstigmatism = self.angleAstigmatism[idcs, :, :]\n cs = self.cs[idcs, :, :]\n\n ac = self.amplitudeContrast\n pc = math.sqrt(1. - ac**2)\n K1 = np.pi / 2. * cs * self.wavelength**3\n K2 = np.pi * self.wavelength\n\n # Cut-off from frequency marcher\n if frequency_marcher is not None:\n self.size_after_fm = 2 * frequency_marcher.f + 1\n if self.size_after_fm > self.size:\n self.size_after_fm = self.size\n angleFrequency = frequency_marcher.cut_coords_plane(self.angleFrequency.reshape(\n self.size, self.size, 1)).reshape(self.size_after_fm, self.size_after_fm)\n r2 = frequency_marcher.cut_coords_plane(self.r2.reshape(self.size, self.size,\n 1)).reshape(self.size_after_fm, self.size_after_fm)\n else:\n self.size_after_fm = self.size\n angleFrequency = self.angleFrequency\n r2 = self.r2\n\n angle = angleFrequency - angleAstigmatism\n local_defocus = 1e4 * (defocusU + defocusV) / 2. + angleAstigmatism * torch.cos(2. * angle)\n\n gamma = K1 * r2**2 - K2 * r2 * local_defocus - self.phasePlate\n hFourier = -pc * torch.sin(gamma) + ac * torch.cos(gamma)\n\n if self.valueNyquist != 1:\n decay = np.sqrt(-np.log(self.valueNyquist)) * 2. * self.resolution\n envelope = torch.exp(-self.frequency * decay**2 * r2)\n hFourier *= envelope\n\n return hFourier\n\n def oversample_multiply_crop(self, x_fourier, hFourier):\n # we assume that the shape of the CTF is always going to be bigger\n # than the size of the input image\n input_sz = x_fourier.shape[-1]\n if input_sz != self.size_after_fm:\n x_primal = fourier_to_primal_2d(x_fourier)\n\n pad_len = (self.size_after_fm - x_fourier.shape[-1]) // 2 # here we assume even lengths\n p2d = (pad_len, pad_len, pad_len, pad_len)\n x_primal_padded = F.pad(x_primal, p2d, 'constant', 0)\n\n x_fourier_padded = primal_to_fourier_2d(x_primal_padded)\n\n x_fourier_padded_filtered = x_fourier_padded * hFourier[:, None, :, :]\n return x_fourier_padded_filtered[..., pad_len:-pad_len, pad_len:-pad_len]\n else:\n return x_fourier * hFourier[:, None, :, :]\n\n def get_cpu_params(self, idcs, ctf_params, flip=False):\n batch_size = idcs.shape[0]\n self.defocusU[idcs, :, :] = ctf_params['defocusU'][:batch_size] if not flip else\\\n ctf_params['defocusU'][batch_size:]\n self.defocusV[idcs, :, :] = ctf_params['defocusV'][:batch_size] if not flip else\\\n ctf_params['defocusV'][batch_size:]\n self.angleAstigmatism[idcs, :, :] = ctf_params['angleAstigmatism'][:batch_size] if not flip else\\\n ctf_params['angleAstigmatism'][batch_size:]\n cpu_params = {}\n return cpu_params\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n # This is when we want to prescribe parameters for the CTF\n if x_fourier.dim() == 3:\n x_fourier = x_fourier[None, ...]\n # x_fourier: B, 1, S, S\n batch_size = len(idcs)\n cpu_params = {}\n if ctf_params:\n cpu_params = self.get_cpu_params(idcs, ctf_params, flip=False)\n\n # if new params for the CTF have been prescribed or we are optimizing it\n # then request the evaluation of the CTF\n if not ctf_params and self.precomputed_filters and not self.requires_grad:\n hFourier = self.hFourier[idcs, :, :]\n else:\n hFourier = self.get_ctf(idcs, batch_size, cpu_params=cpu_params, frequency_marcher=frequency_marcher)\n\n if self.flip_images:\n flipped_hFourier = torch.flip(hFourier, [1, 2])\n\n hFourier = torch.cat([hFourier, flipped_hFourier], dim=0)\n\n return self.oversample_multiply_crop(x_fourier, hFourier)"
},
{
"identifier": "CTFCryoDRGN",
"path": "cryostar/utils/ctf_utils.py",
"snippet": "class CTFCryoDRGN(CTFBase):\n\n def __init__(self,\n size,\n resolution,\n num_particles=None,\n kV=300,\n cs=2.0,\n amplitudeContrast=0.1,\n requires_grad=False):\n super(CTFBase, self).__init__()\n self.size = size\n self.resolution = resolution\n self.requires_grad = requires_grad\n self.kV = kV\n self.cs = cs\n self.ac = amplitudeContrast\n # ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n # mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n ax = torch.fft.fftshift(torch.fft.fftfreq(self.size, self.resolution))\n mx, my = torch.meshgrid(ax, ax, indexing=\"xy\")\n freqs = torch.stack([mx.flatten(), my.flatten()], 1)\n self.register_buffer(\"freqs\", freqs)\n\n def get_ctf(self, ctf_params={}):\n bsz = len(ctf_params[\"defocusU\"])\n device = self.freqs.device\n hFourier = compute_ctf(freqs=self.freqs.repeat(bsz, 1, 1),\n dfu=(ctf_params[\"defocusU\"] * 1e4).squeeze(1),\n dfv=(ctf_params[\"defocusV\"] * 1e4).squeeze(1),\n dfang=torch.rad2deg(ctf_params[\"angleAstigmatism\"]).squeeze(1),\n volt=torch.tensor(self.kV, device=device).repeat(bsz, 1),\n cs=torch.tensor(self.cs, device=device).repeat(bsz, 1),\n w=torch.tensor(self.ac, device=device).repeat(bsz,\n 1)).reshape(bsz, self.size, self.size)\n return hFourier\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n hFourier = -self.get_ctf(ctf_params)\n return x_fourier * hFourier[:, None, :, :]"
},
{
"identifier": "fourier_to_primal_2d",
"path": "cryostar/utils/fft_utils.py",
"snippet": "def fourier_to_primal_2d(f: torch.Tensor) -> torch.Tensor:\n f = torch.fft.ifftshift(f, dim=(-2, -1))\n return torch.fft.fftshift(torch.fft.ifftn(f, s=(f.shape[-2], f.shape[-1]), dim=(-2, -1)), dim=(-2, -1))"
},
{
"identifier": "primal_to_fourier_2d",
"path": "cryostar/utils/fft_utils.py",
"snippet": "@torch.autocast(\"cuda\")\ndef primal_to_fourier_2d(r: torch.Tensor) -> torch.Tensor:\n with torch.autocast(\"cuda\", enabled=False):\n r = torch.fft.ifftshift(r.float(), dim=(-2, -1))\n f = torch.fft.fftshift(torch.fft.fftn(r, s=(r.shape[-2], r.shape[-1]), dim=(-2, -1)), dim=(-2, -1))\n return f"
},
{
"identifier": "sample_along_pca",
"path": "cryostar/utils/latent_space_utils.py",
"snippet": "def sample_along_pca(z: np.ndarray, pca_dim=1, num=5) -> np.ndarray:\n assert isinstance(z, np.ndarray)\n pc, pca = run_pca(z)\n start = np.percentile(pc[:, pca_dim - 1], 5)\n stop = np.percentile(pc[:, pca_dim - 1], 95)\n z_pc_traj = get_pc_traj(pca, z.shape[1], num, pca_dim, start, stop)\n point, point_id = get_nearest_point(z, z_pc_traj)\n return point, point_id"
},
{
"identifier": "get_nearest_point",
"path": "cryostar/utils/latent_space_utils.py",
"snippet": "def get_nearest_point(data: np.ndarray, query: np.ndarray) -> Tuple[npt.NDArray[np.float32], np.ndarray]:\n \"\"\"\n Find closest point in @data to @query\n Return datapoint, index\n \"\"\"\n ind = cdist(query, data).argmin(axis=1)\n return data[ind], ind"
},
{
"identifier": "cluster_kmeans",
"path": "cryostar/utils/latent_space_utils.py",
"snippet": "def cluster_kmeans(z: np.ndarray, K: int, on_data: bool = True, reorder: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Cluster z by K means clustering\n Returns cluster labels, cluster centers\n If reorder=True, reorders clusters according to agglomerative clustering of cluster centers\n \"\"\"\n kmeans = KMeans(n_clusters=K, n_init=10, random_state=0, max_iter=10)\n labels = kmeans.fit_predict(z)\n centers = kmeans.cluster_centers_\n\n centers_ind = None\n if on_data:\n centers, centers_ind = get_nearest_point(z, centers)\n\n if reorder:\n # BUG from seaborn or scipy:\n # sns.clustermap only supports data with at least 2 dim\n if z.shape[1] == 1:\n centers = np.hstack([centers, np.zeros_like(centers)])\n g = sns.clustermap(centers)\n reordered = g.dendrogram_row.reordered_ind\n centers = centers[reordered]\n if centers_ind is not None:\n centers_ind = centers_ind[reordered]\n tmp = {k: i for i, k in enumerate(reordered)}\n labels = np.array([tmp[k] for k in labels])\n if z.shape[1] == 1:\n centers = centers[:, :1]\n return labels, centers"
},
{
"identifier": "pl_init_exp",
"path": "cryostar/utils/misc.py",
"snippet": "def set_seed(seed: int = 42):\ndef chain(arg, *funcs):\ndef convert_to_numpy(*args):\ndef CHECK_SHAPE(tensor, expected_shape):\ndef ASSERT_SHAPE(tensor, expected_shape):\ndef parse_mmengine_args(override_mode=\"default\"):\ndef flatten_nested_dict(nested: Union[dict, Config]) -> dict:\ndef warmup(warmup_step, lower=0.0, upper=1.0):\n def run(cur_step):\ndef init_mmengine_config(args):\ndef init_mmengine_exp(args,\n exp_prefix='',\n backup_list=None,\n inplace=True,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\",\n tensorboard=False):\ndef _get_next_version(root_dir, dir_name_prefix):\ndef pl_init_exp(override_mode=\"default\",\n exp_prefix='',\n backup_list=None,\n inplace=False,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\"):\ndef save_pdb(CAs, path, ref_pdb_path):\ndef load_CAs_from_pdb(file):\ndef load_NCaC_from_pdb(file):\ndef load_chain_A(pdb_path):\ndef points_to_pdb(path_to_save, points: np.ndarray):\ndef point_stack_to_pdb(path_to_save, point_stack: np.ndarray):\ndef find_rigid_alignment(A, B):\ndef batch_find_rigid_alignment(A, B):\ndef pretty_dict(x, precision=3):\ndef create_sphere_mask(d, h, w, center=None, radius=None) -> np.ndarray:\ndef create_circular_mask(h, w, center=None, radius=None) -> np.ndarray:\n H = A_c.T.mm(B_c)\n U, S, V = torch.svd(H)\n R = V.mm(U.T)\n H = einops.einsum(A_c, B_c, \"b n c1, b n c2 -> b c1 c2\")\n V = VmT.mT\n R = einops.einsum(V, U.transpose(2, 1), \"b c1 c2, b c2 c3 -> b c1 c3\")"
},
{
"identifier": "calc_kl_loss",
"path": "cryostar/utils/losses.py",
"snippet": "def calc_kl_loss(mu, log_var, free_bits, reduction=\"mean\"):\n kld_loss = -0.5 * (1 + log_var - mu.pow(2) - log_var.exp())\n # free bits\n kld_loss = torch.clamp(kld_loss, free_bits) # (bsz, z-dim)\n kld_loss = torch.mean(kld_loss, dim=1) # (bsz, )\n if reduction == \"mean\":\n kld_loss = torch.mean(kld_loss) # averaged over bsz x z-dim\n elif reduction == \"none\":\n kld_loss = kld_loss\n else:\n raise NotImplementedError\n return kld_loss"
},
{
"identifier": "VAEEncoder",
"path": "cryostar/utils/ml_modules.py",
"snippet": "class VAEEncoder(nn.Module):\n\n def __init__(self, in_dim: int, hidden_dim: Union[int, List[int]], out_dim: int, num_hidden_layers=3):\n super().__init__()\n self.in_dim = in_dim\n if isinstance(hidden_dim, int):\n self.hidden_dim = (hidden_dim, ) * num_hidden_layers\n elif isinstance(hidden_dim, (list, tuple)):\n assert len(hidden_dim) == num_hidden_layers\n self.hidden_dim = hidden_dim\n else:\n raise NotImplementedError\n self.out_dim = out_dim\n self.num_hidden_layers = num_hidden_layers\n\n self.input_layer = nn.Sequential(\n ResLinear(in_dim, self.hidden_dim[0]) if in_dim == self.hidden_dim[0] else Linear(\n in_dim, self.hidden_dim[0]), nn.ReLU(inplace=True))\n self.mlp = MLP(self.hidden_dim[:-1], self.hidden_dim[1:])\n\n self.mean_layer = Linear(self.hidden_dim[-1], out_dim)\n self.var_layer = Linear(self.hidden_dim[-1], out_dim)\n\n def forward(self, x):\n x = self.mlp(self.input_layer(x))\n mean = self.mean_layer(x)\n log_var = self.var_layer(x)\n return mean, log_var"
},
{
"identifier": "reparameterize",
"path": "cryostar/utils/ml_modules.py",
"snippet": "def reparameterize(mu, log_var):\n std = torch.exp(0.5 * log_var)\n eps = torch.randn_like(std)\n return mu + eps * std"
},
{
"identifier": "save_mrc",
"path": "cryostar/utils/mrc_tools.py",
"snippet": "def save_mrc(vol,\n path,\n voxel_size: Union[int, float, Tuple, np.recarray] = None,\n origin: Union[int, float, Tuple, np.recarray] = None):\n \"\"\"\n Save volumetric data to mrc file, set voxel_size, origin.\n See Also: https://mrcfile.readthedocs.io/en/stable/source/mrcfile.html#mrcfile.mrcobject.MrcObject.voxel_size\n Args:\n vol: density volume\n path: save path\n voxel_size: a single number, a 3-tuple (x, y ,z) or a modified version of the voxel_size array, default 1.\n origin: a single number, a 3-tuple (x, y ,z) or a modified version of the origin array, default 0.\n\n \"\"\"\n with mrcfile.new(path, overwrite=True) as m:\n m.set_data(vol)\n\n if voxel_size is not None:\n m.voxel_size = voxel_size\n\n if origin is not None:\n m.header.origin = origin"
}
] | import os
import os.path as osp
import einops
import lightning.pytorch as pl
import numpy as np
import torch
from lightning.pytorch.strategies import DDPStrategy
from lightning.pytorch.utilities import rank_zero_only
from torch.utils.data import DataLoader
from tqdm import tqdm
from mmengine import mkdir_or_exist
from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig
from cryostar.nerf.volume_utils import ImplicitFourierVolume
from cryostar.utils.transforms import SpatialGridTranslate, FourierGridTranslate
from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN
from cryostar.utils.fft_utils import (fourier_to_primal_2d, primal_to_fourier_2d)
from cryostar.utils.latent_space_utils import sample_along_pca, get_nearest_point, cluster_kmeans
from cryostar.utils.misc import (pl_init_exp, create_circular_mask, log_to_current, pretty_dict)
from cryostar.utils.losses import calc_kl_loss
from cryostar.utils.ml_modules import VAEEncoder, reparameterize
from cryostar.utils.mrc_tools import save_mrc
from miscs import infer_ctf_params_from_config | 10,366 |
log_to_current = rank_zero_only(log_to_current)
TASK_NAME = "density"
class CryoModel(pl.LightningModule):
def __init__(self, cfg, dataset):
super().__init__()
self.cfg = cfg
self.dataset = dataset
self.z_dim = cfg.model.z_dim
self.history_saved_dirs = []
if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0:
if cfg.model.enc_space == "real":
self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2,
cfg.model.hidden,
self.z_dim,
num_hidden_layers=4)
elif cfg.model.enc_space == "fourier":
self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2,
cfg.model.hidden,
self.z_dim,
num_hidden_layers=4)
else:
raise NotImplementedError
if cfg.model.shift_method == "interp":
self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, )
log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.")
elif cfg.model.shift_method == "fft":
self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, )
else:
raise NotImplementedError
ctf_params = infer_ctf_params_from_config(cfg)
if cfg.model.ctf == "v1":
self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset))
log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.")
elif cfg.model.ctf == "v2":
self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset))
else:
raise NotImplementedError
log_to_current(ctf_params)
self.vol = ImplicitFourierVolume(
self.z_dim, self.cfg.data_process.down_side_shape, self.cfg.loss.mask_rad_for_image_loss, {
"net_type": cfg.model.net_type,
"pe_dim": self.cfg.data_process.down_side_shape,
"D": self.cfg.data_process.down_side_shape,
"pe_type": cfg.model.pe_type,
"force_symmetry": False,
"hidden": cfg.model.hidden,
})
|
log_to_current = rank_zero_only(log_to_current)
TASK_NAME = "density"
class CryoModel(pl.LightningModule):
def __init__(self, cfg, dataset):
super().__init__()
self.cfg = cfg
self.dataset = dataset
self.z_dim = cfg.model.z_dim
self.history_saved_dirs = []
if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0:
if cfg.model.enc_space == "real":
self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2,
cfg.model.hidden,
self.z_dim,
num_hidden_layers=4)
elif cfg.model.enc_space == "fourier":
self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2,
cfg.model.hidden,
self.z_dim,
num_hidden_layers=4)
else:
raise NotImplementedError
if cfg.model.shift_method == "interp":
self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, )
log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.")
elif cfg.model.shift_method == "fft":
self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, )
else:
raise NotImplementedError
ctf_params = infer_ctf_params_from_config(cfg)
if cfg.model.ctf == "v1":
self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset))
log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.")
elif cfg.model.ctf == "v2":
self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset))
else:
raise NotImplementedError
log_to_current(ctf_params)
self.vol = ImplicitFourierVolume(
self.z_dim, self.cfg.data_process.down_side_shape, self.cfg.loss.mask_rad_for_image_loss, {
"net_type": cfg.model.net_type,
"pe_dim": self.cfg.data_process.down_side_shape,
"D": self.cfg.data_process.down_side_shape,
"pe_type": cfg.model.pe_type,
"force_symmetry": False,
"hidden": cfg.model.hidden,
}) | mask = create_circular_mask(self.cfg.data_process.down_side_shape, self.cfg.data_process.down_side_shape, None, | 12 | 2023-11-06 07:15:26+00:00 | 12k |
xyongLu/SBCFormer | main.py | [
{
"identifier": "Mixup",
"path": "mixup.py",
"snippet": "class Mixup:\n \"\"\" Mixup/Cutmix that applies different params to each element or whole batch\n\n Args:\n mixup_alpha (float): mixup alpha value, mixup is active if > 0.\n cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.\n cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.\n prob (float): probability of applying mixup or cutmix per batch or element\n switch_prob (float): probability of switching to cutmix instead of mixup when both are active\n mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)\n correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders\n label_smoothing (float): apply label smoothing to the mixed target tensor\n num_classes (int): number of classes for target\n \"\"\"\n def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,\n mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000):\n self.mixup_alpha = mixup_alpha\n self.cutmix_alpha = cutmix_alpha\n self.cutmix_minmax = cutmix_minmax\n if self.cutmix_minmax is not None:\n assert len(self.cutmix_minmax) == 2\n # force cutmix alpha == 1.0 when minmax active to keep logic simple & safe\n self.cutmix_alpha = 1.0\n self.mix_prob = prob\n self.switch_prob = switch_prob\n self.label_smoothing = label_smoothing\n self.num_classes = num_classes\n self.mode = mode\n self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix\n self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)\n\n def _params_per_elem(self, batch_size):\n lam = np.ones(batch_size, dtype=np.float32)\n use_cutmix = np.zeros(batch_size, dtype=np.bool)\n if self.mixup_enabled:\n if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:\n use_cutmix = np.random.rand(batch_size) < self.switch_prob\n lam_mix = np.where(\n use_cutmix,\n np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size),\n np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size))\n elif self.mixup_alpha > 0.:\n lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)\n elif self.cutmix_alpha > 0.:\n use_cutmix = np.ones(batch_size, dtype=np.bool)\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)\n else:\n assert False, \"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.\"\n lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam)\n return lam, use_cutmix\n\n def _params_per_batch(self):\n lam = 1.\n use_cutmix = False\n if self.mixup_enabled and np.random.rand() < self.mix_prob:\n if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:\n use_cutmix = np.random.rand() < self.switch_prob\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \\\n np.random.beta(self.mixup_alpha, self.mixup_alpha)\n elif self.mixup_alpha > 0.:\n lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)\n elif self.cutmix_alpha > 0.:\n use_cutmix = True\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)\n else:\n assert False, \"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.\"\n lam = float(lam_mix)\n return lam, use_cutmix\n\n def _mix_elem(self, x):\n batch_size = len(x)\n lam_batch, use_cutmix = self._params_per_elem(batch_size)\n x_orig = x.clone() # need to keep an unmodified original for mixing source\n for i in range(batch_size):\n j = batch_size - i - 1\n lam = lam_batch[i]\n if lam != 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n x[i] = x[i] * lam + x_orig[j] * (1 - lam)\n return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)\n\n def _mix_pair(self, x):\n batch_size = len(x)\n lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)\n x_orig = x.clone() # need to keep an unmodified original for mixing source\n for i in range(batch_size // 2):\n j = batch_size - i - 1\n lam = lam_batch[i]\n if lam != 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]\n x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n x[i] = x[i] * lam + x_orig[j] * (1 - lam)\n x[j] = x[j] * lam + x_orig[i] * (1 - lam)\n lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))\n return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)\n\n def _mix_batch(self, x):\n lam, use_cutmix = self._params_per_batch()\n if lam == 1.:\n return 1.\n if use_cutmix:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]\n else:\n x_flipped = x.flip(0).mul_(1. - lam)\n x.mul_(lam).add_(x_flipped)\n return lam\n\n def __call__(self, x, target):\n assert len(x) % 2 == 0, 'Batch size should be even when using this'\n if self.mode == 'elem':\n lam = self._mix_elem(x)\n elif self.mode == 'pair':\n lam = self._mix_pair(x)\n else:\n lam = self._mix_batch(x)\n target = mixup_target(target, self.num_classes, lam, self.label_smoothing, x.device)\n return x, target"
},
{
"identifier": "build_dataset",
"path": "datasets.py",
"snippet": "def build_dataset(is_train, args):\n \n if args.data_set == 'CIFAR10':\n if is_train:\n transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.RandomCrop(args.input_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR10_DEFAULT_MEAN, CIFAR10_DEFAULT_STD)\n ])\n else:\n transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR10_DEFAULT_MEAN, CIFAR10_DEFAULT_STD)\n ])\n \n dataset = datasets.CIFAR10(args.data_path, train=is_train, download=True, transform=transform)\n nb_classes = 10\n elif args.data_set == 'CIFAR100':\n if is_train:\n transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.RandomCrop(args.input_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR100_DEFAULT_MEAN, CIFAR100_DEFAULT_STD)\n ])\n else:\n transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR100_DEFAULT_MEAN, CIFAR100_DEFAULT_STD)\n ])\n\n dataset = datasets.CIFAR100(args.data_path, train=is_train, download=True, transform=transform)\n nb_classes = 100\n elif args.data_set == 'IMNET':\n transform = build_transform(is_train, args)\n \n root = os.path.join(args.data_path, 'train' if is_train else 'val')\n dataset = datasets.ImageFolder(root, transform=transform)\n nb_classes = 1000\n elif args.data_set == 'INAT':\n transform = build_transform(is_train, args)\n\n dataset = INatDataset(args.data_path, train=is_train, year=2018,\n category=args.inat_category, transform=transform)\n nb_classes = dataset.nb_classes\n elif args.data_set == 'INAT19':\n transform = build_transform(is_train, args)\n\n dataset = INatDataset(args.data_path, train=is_train, year=2019,\n category=args.inat_category, transform=transform)\n nb_classes = dataset.nb_classes\n\n return dataset, nb_classes"
},
{
"identifier": "train_one_epoch",
"path": "engine.py",
"snippet": "def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss,\n data_loader: Iterable, optimizer: torch.optim.Optimizer,\n device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,\n model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,\n set_training_mode=True):\n model.train(set_training_mode)\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n print_freq = 10\n\n for samples, targets in metric_logger.log_every(data_loader, print_freq, header):\n samples = samples.to(device, non_blocking=True)\n targets = targets.to(device, non_blocking=True)\n\n if mixup_fn is not None:\n samples, targets = mixup_fn(samples, targets)\n\n with torch.cuda.amp.autocast():\n outputs = model(samples)\n loss = criterion(samples, outputs, targets)\n\n loss_value = loss.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n sys.exit(1)\n\n optimizer.zero_grad()\n\n # this attribute is added by timm on one optimizer (adahessian)\n is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order\n loss_scaler(loss, optimizer, clip_grad=max_norm,\n parameters=model.parameters(), create_graph=is_second_order)\n\n torch.cuda.synchronize()\n if model_ema is not None:\n model_ema.update(model)\n\n metric_logger.update(loss=loss_value)\n metric_logger.update(lr=optimizer.param_groups[0][\"lr\"])\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}"
},
{
"identifier": "evaluate",
"path": "engine.py",
"snippet": "@torch.no_grad()\ndef evaluate(data_loader, model, device):\n criterion = torch.nn.CrossEntropyLoss()\n\n metric_logger = utils.MetricLogger(delimiter=\" \")\n header = 'Test:'\n print_freq = 10\n\n # switch to evaluation mode\n model.eval()\n\n for images, target in metric_logger.log_every(data_loader, print_freq, header):\n images = images.to(device, non_blocking=True)\n target = target.to(device, non_blocking=True)\n\n # compute output\n with torch.cuda.amp.autocast():\n output = model(images)\n loss = criterion(output, target)\n\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n\n batch_size = images.shape[0]\n metric_logger.update(loss=loss.item())\n metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)\n metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'\n .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))\n\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}"
},
{
"identifier": "DistillationLoss",
"path": "losses.py",
"snippet": "class DistillationLoss(torch.nn.Module):\n \"\"\"\n This module wraps a standard criterion and adds an extra knowledge distillation loss by\n taking a teacher model prediction and using it as additional supervision.\n \"\"\"\n def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module,\n distillation_type: str, alpha: float, tau: float):\n super().__init__()\n self.base_criterion = base_criterion\n self.teacher_model = teacher_model\n assert distillation_type in ['none', 'soft', 'hard']\n self.distillation_type = distillation_type\n self.alpha = alpha\n self.tau = tau\n\n def forward(self, inputs, outputs, labels):\n \"\"\"\n Args:\n inputs: The original inputs that are feed to the teacher model\n outputs: the outputs of the model to be trained. It is expected to be\n either a Tensor, or a Tuple[Tensor, Tensor], with the original output\n in the first position and the distillation predictions as the second output\n labels: the labels for the base criterion\n \"\"\"\n outputs_kd = None\n if not isinstance(outputs, torch.Tensor):\n # assume that the model outputs a tuple of [outputs, outputs_kd]\n outputs, outputs_kd = outputs\n base_loss = self.base_criterion(outputs, labels)\n if self.distillation_type == 'none':\n return base_loss\n\n if outputs_kd is None:\n raise ValueError(\"When knowledge distillation is enabled, the model is \"\n \"expected to return a Tuple[Tensor, Tensor] with the output of the \"\n \"class_token and the dist_token\")\n # don't backprop throught the teacher\n with torch.no_grad():\n teacher_outputs = self.teacher_model(inputs)\n\n if self.distillation_type == 'soft':\n T = self.tau\n # taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100\n # with slight modifications\n distillation_loss = F.kl_div(\n F.log_softmax(outputs_kd / T, dim=1),\n #We provide the teacher's targets in log probability because we use log_target=True \n #(as recommended in pytorch https://github.com/pytorch/pytorch/blob/9324181d0ac7b4f7949a574dbc3e8be30abe7041/torch/nn/functional.py#L2719)\n #but it is possible to give just the probabilities and set log_target=False. In our experiments we tried both.\n F.log_softmax(teacher_outputs / T, dim=1),\n reduction='sum',\n log_target=True\n ) * (T * T) / outputs_kd.numel()\n #We divide by outputs_kd.numel() to have the legacy PyTorch behavior. \n #But we also experiments output_kd.size(0) \n #see issue 61(https://github.com/facebookresearch/deit/issues/61) for more details\n elif self.distillation_type == 'hard':\n distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))\n\n loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha\n return loss"
},
{
"identifier": "RASampler",
"path": "samplers.py",
"snippet": "class RASampler(torch.utils.data.Sampler):\n \"\"\"Sampler that restricts data loading to a subset of the dataset for distributed,\n with repeated augmentation.\n It ensures that different each augmented version of a sample will be visible to a\n different process (GPU)\n Heavily based on torch.utils.data.DistributedSampler\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):\n if num_replicas is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n num_replicas = dist.get_world_size()\n if rank is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n rank = dist.get_rank()\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n # self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))\n self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))\n self.shuffle = shuffle\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n if self.shuffle:\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = list(range(len(self.dataset)))\n\n # add extra samples to make it evenly divisible\n indices = [ele for ele in indices for i in range(3)]\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n\n return iter(indices[:self.num_selected_samples])\n\n def __len__(self):\n return self.num_selected_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch"
}
] | import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import utils
from pathlib import Path
from mixup import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from losses import DistillationLoss
from samplers import RASampler
from models import * | 8,424 |
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None,)
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias', 'head_dist.weight', 'head_dist.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
model.load_state_dict(checkpoint_model, strict=False)
model.to(device)
# flops, params = get_model_complexity_info(model, (3,args.input_size, args.input_size),as_strings=True,print_per_layer_stat=False)
# print("flops: %s |params: %s" % (flops,params))
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
lr_scheduler, _ = create_scheduler(args, optimizer)
criterion = LabelSmoothingCrossEntropy()
if args.mixup > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
teacher_model = None
# if args.distillation_type != 'none':
# assert args.teacher_path, 'need to specify teacher-path when using distillation'
# print(f"Creating teacher model: {args.teacher_model}")
# teacher_model = create_model(
# args.teacher_model,
# pretrained=False,
# num_classes=args.nb_classes,
# global_pool='avg',
# )
# teacher_model = RegNetY_200MF()
# if args.teacher_path.startswith('https'):
# checkpoint = torch.hub.load_state_dict_from_url(
# args.teacher_path, map_location='cpu', check_hash=True)
# else:
# checkpoint = torch.load(args.teacher_path, map_location='cpu')
# teacher_model.load_state_dict(checkpoint['model'])
# teacher_model.to(device)
# teacher_model.eval()
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
| # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
# from ptflops import get_model_complexity_info
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("running on {} device.".format(device))
def get_args_parser():
parser = argparse.ArgumentParser('SlenderViT training and evaluation script', add_help=False)
# Model parameters
parser.add_argument('--uni-note', default='', type=str, help='unique note on the name of model to train')
parser.add_argument('--model', default='SBCFormer_B', type=str, metavar='MODEL',
help='Name of model to train.')
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--in-chans', type=int, default=3, help='the channel of inputs ')
parser.add_argument('--batch-size', default=30, type=int)
parser.add_argument('--drop', type=float, default=0., metavar='PCT', help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=False)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (defaudevice = torch.device(args.device)ult: None, no clipping)')
parser.add_argument('--clip-grad', type=float, default=5, metavar='NORM', help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05, help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=2.5e-4, metavar='LR', help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N', help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=False)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters distilled
parser.add_argument('--distilled', action='store_true', default=False, help='Perform distilled ')
parser.add_argument('--teacher-model', default='regnety_200mf', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default= '../../PythonWork_E/Data/ImageNet_2012',#'./data', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100' , 'IMNET'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='./outputs', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default= '', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', default=False, help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
# test throught
parser.add_argument('--throughout', action='store_true', help='Perform throughout only')
return parser
@torch.no_grad()
def throughput(data_loader, model, logger):
model.eval()
for _, (images, _) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
batch_size = images.shape[0]
for i in range(50):
model(images)
torch.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
torch.cuda.synchronize()
tic2 = time.time()
logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}")
return
def main(args):
utils.init_distributed_mode(args)
print('------------ Options -------------')
for key, value in sorted(vars(args).items()):
print('%16.16s: %16.16s' % (str(key), str(value)))
print('-------------- End ----------------')
if args.distillation_type != 'none' and args.finetune and not args.eval:
raise NotImplementedError("Finetuning with distillation not yet supported")
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
dataset_val, args.nb_classes = build_dataset(is_train=False, args=args)
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None,)
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias', 'head_dist.weight', 'head_dist.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
model.load_state_dict(checkpoint_model, strict=False)
model.to(device)
# flops, params = get_model_complexity_info(model, (3,args.input_size, args.input_size),as_strings=True,print_per_layer_stat=False)
# print("flops: %s |params: %s" % (flops,params))
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
lr_scheduler, _ = create_scheduler(args, optimizer)
criterion = LabelSmoothingCrossEntropy()
if args.mixup > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
teacher_model = None
# if args.distillation_type != 'none':
# assert args.teacher_path, 'need to specify teacher-path when using distillation'
# print(f"Creating teacher model: {args.teacher_model}")
# teacher_model = create_model(
# args.teacher_model,
# pretrained=False,
# num_classes=args.nb_classes,
# global_pool='avg',
# )
# teacher_model = RegNetY_200MF()
# if args.teacher_path.startswith('https'):
# checkpoint = torch.hub.load_state_dict_from_url(
# args.teacher_path, map_location='cpu', check_hash=True)
# else:
# checkpoint = torch.load(args.teacher_path, map_location='cpu')
# teacher_model.load_state_dict(checkpoint['model'])
# teacher_model.to(device)
# teacher_model.eval()
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none' | criterion = DistillationLoss( | 4 | 2023-11-06 03:31:47+00:00 | 12k |
zamaniamin/fastapi-shop | apps/accounts/tests/test_user.py | [
{
"identifier": "FakeUser",
"path": "apps/accounts/faker/data.py",
"snippet": "class FakeUser(BaseFakeAccount):\n\n @classmethod\n def populate_members(cls):\n \"\"\"\n Create an admin and a user.\n \"\"\"\n\n # --- admin ---\n user, access_token = FakeAccount.verified_registration()\n user_data = {\n 'email': '[email protected]',\n 'first_name': cls.fake.first_name(),\n 'last_name': cls.fake.last_name(),\n 'is_superuser': True,\n 'role': 'admin'\n }\n\n UserManager.update_user(user.id, **user_data)\n\n # --- user ---\n user, access_token = FakeAccount.verified_registration()\n user_data = {\n 'email': '[email protected]',\n 'first_name': cls.fake.first_name(),\n 'last_name': cls.fake.last_name()\n }\n\n UserManager.update_user(user.id, **user_data)\n\n @classmethod\n def populate_admin(cls):\n \"\"\"\n Create an admin and generate an access token too.\n \"\"\"\n\n user, access_token = FakeAccount.verified_registration()\n user_data = {\n 'first_name': cls.fake.first_name(),\n 'last_name': cls.fake.last_name(),\n 'is_superuser': True,\n 'role': 'admin'\n }\n\n user = UserManager.update_user(user.id, **user_data)\n return user, access_token\n\n @classmethod\n def populate_user(cls):\n \"\"\"\n Create a new user and generate an access token too.\n \"\"\"\n\n user, access_token = FakeAccount.verified_registration()\n user_data = {\n 'first_name': cls.fake.first_name(),\n 'last_name': cls.fake.last_name()\n }\n\n user = UserManager.update_user(user.id, **user_data)\n return user, access_token"
},
{
"identifier": "UserVerification",
"path": "apps/accounts/models.py",
"snippet": "class UserVerification(FastModel):\n \"\"\"\n UserVerification represents change requests initiated by users, such as email or phone number changes,\n that require OTP verification.\n\n Attributes:\n id (int): Unique identifier for the verification request.\n user_id (int): ID of the user who initiated the verification request.\n request_type (str): Indicates the type of verification request (register /reset-password /change-email\n /change-phone).\n new_email (str): New email address requested by the user.\n new_phone (str): New phone number requested by the user.\n active_access_token (str, optional): Last valid access token used for JWT authentication. Default is None.\n created_at (datetime): Timestamp indicating when the verification request was created.\n updated_at (datetime): Timestamp indicating when the verification request was last updated.\n \"\"\"\n\n __tablename__ = \"users_verifications\"\n\n id = Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey(\"users.id\"), unique=True)\n\n request_type = Column(String, nullable=True)\n new_email = Column(String(256), nullable=True)\n new_phone = Column(String(256), nullable=True)\n active_access_token = Column(String, nullable=True)\n\n created_at = Column(DateTime, server_default=func.now())\n updated_at = Column(DateTime, server_default=func.now(), onupdate=func.now())\n user = relationship(\"User\", back_populates=\"change\")"
},
{
"identifier": "AccountService",
"path": "apps/accounts/services/authenticate.py",
"snippet": "class AccountService:\n\n @classmethod\n async def current_user(cls, token: str = Depends(OAuth2PasswordBearer(tokenUrl=\"accounts/login\"))) -> User:\n user = await TokenService.fetch_user(token)\n return user\n\n # ----------------\n # --- Register ---\n # ----------------\n\n @classmethod\n def register(cls, email: str, password: str):\n \"\"\"\n Create a new user and send an email with OTP code.\n \"\"\"\n\n # check if user with the given email is exist or not.\n if UserManager.get_user(email=email):\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"This email has already been taken.\"\n )\n\n new_user = UserManager.create_user(email=email, password=password)\n TokenService(new_user.id).request_is_register()\n EmailService.register_send_verification_email(new_user.email)\n\n return {'email': new_user.email,\n 'message': 'Please check your email for an OTP code to confirm your email address.'}\n\n @classmethod\n def verify_registration(cls, email: str, otp: str):\n \"\"\"\n Verifies user registration by validating the provided OTP code.\n\n For first-time users, they must validate their email address before being able to \"login\".\n After validation, their account will be activated, allowing them to \"login\" and use the app.\n\n During verification, the email address is validated using an OTP code. Upon successful verification,\n the user's data is updated, the account is activated, and the user can \"login\" to the app.\n Additionally, an `access_token` is sent to allow the user to \"login\" without being redirected to the login form.\n\n Args:\n email (str): User's email address.\n otp (str): One-Time Password (OTP) code for email verification.\n\n Raises:\n HTTPException: If the user is not found, the email is already verified, or an invalid OTP is provided.\n\n Returns:\n dict: Dictionary containing an authentication token and a success message.\n \"\"\"\n\n # --- get user by email ---\n user = UserManager.get_user(email=email)\n if not user:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"User not found.\"\n )\n\n # --- check email verified or not ---\n if user.is_verified_email:\n raise HTTPException(\n status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,\n detail=\"This email is already verified.\"\n )\n\n # --- validate otp_token for this user ---\n token = TokenService(user=user)\n\n if not token.validate_otp_token(otp):\n raise HTTPException(\n status_code=status.HTTP_406_NOT_ACCEPTABLE,\n detail=\"Invalid OTP code. Please double-check and try again.\"\n )\n\n # --- Update user data and activate the account ---\n UserManager.update_user(user.id, is_verified_email=True, is_active=True, last_login=DateTime.now())\n\n token.reset_otp_token_type()\n\n return {'access_token': token.create_access_token(),\n 'message': 'Your email address has been confirmed. Account activated successfully.'}\n\n # -------------\n # --- Login ---\n # -------------\n\n @classmethod\n def login(cls, email: str, password: str):\n \"\"\"\n Login with given email and password.\n \"\"\"\n\n user = cls.authenticate_user(email, password)\n token: TokenService = TokenService(user)\n\n if not user:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect username or password.\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n\n if not user.is_active:\n raise HTTPException(\n status_code=status.HTTP_403_FORBIDDEN,\n detail=\"Inactive account.\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n\n if not user.is_verified_email:\n raise HTTPException(\n status_code=status.HTTP_403_FORBIDDEN,\n detail=\"Unverified email address.\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n\n UserManager.update_last_login(user.id)\n return {\"access_token\": token.create_access_token(), \"token_type\": \"bearer\"}\n\n @classmethod\n def authenticate_user(cls, email: str, password: str):\n user = UserManager.get_user(email=email)\n if not user:\n return False\n if not PasswordManager.verify_password(password, user.password):\n return False\n return user\n\n # ----------------------\n # --- Reset Password ---\n # ----------------------\n\n @classmethod\n def reset_password(cls, email: str):\n \"\"\"\n Reset password by user email address.\n \"\"\"\n # TODO stop resend email until current otp not expired\n user: User | None\n\n user = UserManager.get_user_or_404(email=email)\n UserManager.is_active(user)\n UserManager.is_verified_email(user)\n\n token = TokenService(user.id)\n token.reset_is_reset_password()\n\n EmailService.reset_password_send_verification_email(user.email)\n\n return {'message': 'Please check your email for an OTP code to confirm the password reset request.'}\n\n @classmethod\n def verify_reset_password(cls, email: str, password: str, otp: str):\n \"\"\"\n Verify the request for reset password and if otp is valid then current access-token will expire.\n \"\"\"\n\n user = UserManager.get_user_or_404(email=email)\n token = TokenService(user.id)\n\n if not token.validate_otp_token(otp):\n raise HTTPException(\n status_code=status.HTTP_406_NOT_ACCEPTABLE,\n detail=\"Invalid OTP code.Please double-check and try again.\"\n )\n\n UserManager.update_user(user.id, password=password)\n token.reset_otp_token_type()\n token.reset_access_token()\n # TODO send an email and notice user the password is changed.\n\n return {'message': 'Your password has been changed.'}\n\n @classmethod\n def change_password(cls, user: User, current_password: str, password: str):\n \"\"\"\n Change password for current user, and then current access-token will be expired.\n \"\"\"\n\n if not PasswordManager.verify_password(current_password, user.password):\n raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect password.\")\n\n UserManager.update_user(user.id, password=password)\n TokenService(user.id).reset_access_token()\n\n return {'message': 'Your password has been changed.'}\n\n @classmethod\n def change_email(cls, user, new_email):\n \"\"\"\n Change password for current user.\n \"\"\"\n\n # Check if the new email address is not already associated with another user\n if UserManager.get_user(email=new_email) is None:\n\n TokenService(user.id).request_is_change_email(new_email)\n EmailService.change_email_send_verification_email(new_email)\n\n else:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=\"This email has already been taken.\")\n\n return {\n 'message': f'Please check your email \"{new_email}\" for an OTP code to confirm the change email request.'}\n\n @classmethod\n def verify_change_email(cls, user, otp):\n \"\"\"\n Verify change password for current user.\n \"\"\"\n\n token = TokenService(user.id)\n\n if token.validate_otp_token(otp):\n new_email = token.get_new_email()\n\n if new_email:\n UserManager.update_user(user.id, email=new_email)\n token.reset_is_change_email()\n else:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Invalid request for email verification.\")\n else:\n raise HTTPException(\n status_code=status.HTTP_406_NOT_ACCEPTABLE,\n detail=\"Invalid OTP code. Please double-check and try again.\")\n\n return {'message': 'Your email is changed.'}\n\n @classmethod\n def resend_otp(cls, request_type: str, email: str):\n \"\"\"\n Resend OTP for registration, password reset, or email change verification.\n \"\"\"\n\n user = UserManager.get_user_or_404(email=email)\n token = TokenService(user.id)\n\n # --- validate current request type ---\n current_request_type = token.get_otp_request_type()\n if current_request_type != request_type:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=f\"Current requested type is invalid.\")\n\n if current_request_type == 'change_email':\n email = token.get_new_email()\n\n # --- resend new OTP ---\n token.check_time_remaining()\n match request_type:\n case 'register':\n EmailService.register_send_verification_email(email)\n case 'change-email':\n EmailService.change_email_send_verification_email(email)\n case 'reset-password':\n EmailService.reset_password_send_verification_email(email)\n\n @classmethod\n def logout(cls, current_user):\n TokenService(current_user).reset_access_token()"
},
{
"identifier": "PasswordManager",
"path": "apps/accounts/services/password.py",
"snippet": "class PasswordManager:\n password_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\")\n min_length: int = 8\n max_length: int = 24\n\n @classmethod\n def validate_password_strength(cls, password: str, has_number: bool = True, has_lowercase: bool = True,\n has_uppercase: bool = True, has_special_char: bool = True) -> str:\n \"\"\"\n Validate a password based on the given constraints.\n\n Args:\n password: The password to validate.\n has_number: Use numbers (0-9) in the password.\n has_lowercase: Use lowercase characters (a-z) in the password.\n has_uppercase: Use uppercase characters (A-Z) in the password.\n has_special_char: Use special characters (!@#$%^&*()_+{}[]:;\"\\'<>.,.?/|) in the password.\n\n Returns:\n The validated password, or raises a HTTPException if the password is invalid.\n \"\"\"\n\n cls.__validate_length(password)\n\n if has_number:\n cls.__validate_pattern(password,\n r'[0-9]', 'Invalid password. Must contain at least one number (0-9).')\n\n if has_uppercase:\n cls.__validate_pattern(password, r'[A-Z]',\n 'Invalid password. Must contain at least one uppercase letter (A-Z).')\n\n if has_lowercase:\n cls.__validate_pattern(password, r'[a-z]',\n 'Invalid password. Must contain at least one lowercase letter (a-z).')\n\n if has_special_char:\n cls.__validate_pattern(password, r'[!@#$%^&*()_+{}\\[\\]:;\"\\'<>,.?/\\\\|]',\n 'Invalid password. Must contain at least one special character.')\n\n return password\n\n @classmethod\n def __validate_length(cls, password: str):\n if len(password) < cls.min_length or len(password) > cls.max_length:\n raise HTTPException(\n status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,\n detail=f'Invalid password length. Must be between {cls.min_length} and {cls.max_length} characters.'\n )\n\n @classmethod\n def __validate_pattern(cls, password: str, pattern: str, message: str):\n if not re.search(pattern, password):\n raise HTTPException(\n status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,\n detail=message\n )\n\n # ---------------------\n # --- Hash Password ---\n # ---------------------\n\n @classmethod\n def hash_password(cls, password: str):\n return cls.password_context.hash(password)\n\n @classmethod\n def verify_password(cls, plain_password: str, hashed_password: str):\n return cls.password_context.verify(plain_password, hashed_password)"
},
{
"identifier": "TokenService",
"path": "apps/accounts/services/token.py",
"snippet": "class TokenService:\n \"\"\"\n Manage \"jwt-token\" or \"otp-token\" that used for authentication.\n \"\"\"\n\n user: User | None\n user_id: int\n\n app_config = AppConfig.get_config()\n\n ALGORITHM = \"HS256\"\n oauth2_scheme = OAuth2PasswordBearer(tokenUrl=\"accounts/login\")\n credentials_exception = HTTPException(status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Could not validate credentials.\",\n headers={\"WWW-Authenticate\": \"Bearer\"})\n\n def __init__(self, user: int | User | None = None):\n if user is not None:\n if isinstance(user, User):\n self.user = user\n self.user_id = user.id\n else:\n self.user = None\n self.user_id = user\n\n # --------------------\n # --- Access Token ---\n # --------------------\n\n \"\"\"\n Utility class for handling JWT authentication and access tokens.\n\n A user's access token will be expired due to actions such as \"resetting the password,\" \"changing the password,\" or\n even \"logging out\" (logout mechanism).\n\n The `access-token` stored in the database serves as a flag for the logout mechanism, ensuring that when a user\n wants to log out of the system, the current token will no longer be valid.\n \"\"\"\n\n def create_access_token(self) -> str:\n \"\"\"\n Create a new access token for the provided user.\n\n Returns:\n str: Access token string.\n \"\"\"\n\n # --- set data to encode ---\n to_encode = {'user_id': self.user_id}\n\n # --- set expire date ---\n to_encode.update({\"exp\": datetime.utcnow() + timedelta(self.app_config.access_token_expire_minutes)})\n\n # --- generate access token ---\n access_token = jwt.encode(to_encode, self.app_config.secret_key, algorithm=self.ALGORITHM)\n\n self.update_access_token(access_token)\n return access_token\n\n def update_access_token(self, token: str):\n UserVerification.update(UserVerification.filter(UserVerification.user_id == self.user_id).first().id,\n active_access_token=token)\n\n def reset_access_token(self):\n UserVerification.update(UserVerification.filter(UserVerification.user_id == self.user_id).first().id,\n active_access_token=None)\n\n @classmethod\n async def fetch_user(cls, token: str) -> User:\n \"\"\"\n Retrieve the user associated with the provided JWT token.\n\n Args:\n token (str): JWT token.\n\n Returns:\n User: User object if the token is valid, raises HTTPException if not.\n \"\"\"\n\n # --- validate token ---\n try:\n payload = jwt.decode(token, cls.app_config.secret_key, algorithms=[cls.ALGORITHM])\n except JWTError as e:\n raise cls.credentials_exception\n\n # --- validate payloads in token ---\n user_id = payload.get(\"user_id\")\n if user_id is None:\n raise cls.credentials_exception\n\n # --- get user ---\n # TODO move user data to token and dont fetch them from database\n user = UserManager.get_user(user_id)\n if user is None:\n raise cls.credentials_exception\n\n UserManager.is_active(user)\n\n # --- validate access token ---\n active_access_token = UserVerification.filter(UserVerification.user_id == user_id).first().active_access_token\n if token != active_access_token:\n raise cls.credentials_exception\n\n UserManager.is_active(user)\n return user\n\n # -----------------\n # --- OTP Token ---\n # -----------------\n\n @classmethod\n def create_otp_token(cls):\n totp = TOTP(cls.app_config.otp_secret_key, interval=cls.app_config.otp_expire_seconds)\n return totp.now()\n\n def request_is_register(self):\n \"\"\"\n Will be used just when a new user is registered.\n \"\"\"\n\n UserVerification.create(user_id=self.user_id, request_type='register')\n\n def get_new_email(self):\n _change: UserVerification = UserVerification.filter(UserVerification.user_id == self.user_id).first()\n if _change.request_type == 'change-email':\n return _change.new_email\n return False\n\n def request_is_change_email(self, new_email: str):\n _change = UserVerification.filter(UserVerification.user_id == self.user_id).first().id\n UserVerification.update(_change, new_email=new_email, request_type='change-email')\n\n def reset_is_change_email(self):\n _change = UserVerification.filter(UserVerification.user_id == self.user_id).first().id\n UserVerification.update(_change, new_email=None, request_type=None)\n\n def reset_is_reset_password(self):\n _change = UserVerification.filter(UserVerification.user_id == self.user_id).first().id\n UserVerification.update(_change, request_type='reset-password')\n\n def reset_otp_token_type(self):\n \"\"\"\n Remove the request_type for otp token by set it to None.\n \"\"\"\n\n _change = UserVerification.filter(UserVerification.user_id == self.user_id).first().id\n UserVerification.update(_change, request_type=None)\n\n def get_otp_request_type(self):\n return UserVerification.filter(UserVerification.user_id == self.user_id).first().request_type\n\n @classmethod\n def validate_otp_token(cls, token: str):\n totp = TOTP(cls.app_config.otp_secret_key, interval=cls.app_config.otp_expire_seconds)\n return totp.verify(token)\n\n @classmethod\n def check_time_remaining(cls):\n totp = TOTP(cls.app_config.otp_secret_key, interval=cls.app_config.otp_expire_seconds)\n time_remaining = int(totp.interval - datetime.now().timestamp() % totp.interval)\n if time_remaining != 0:\n # OTP has not expired, do not resend\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=f\"OTP not expired. Resend available in {time_remaining} seconds.\")"
},
{
"identifier": "UserManager",
"path": "apps/accounts/services/user.py",
"snippet": "class UserManager:\n\n @classmethod\n def create_user(cls, email: str, password: str, first_name: str | None = None, last_name: str | None = None,\n is_verified_email: bool = False, is_active: bool = False, is_superuser: bool = False,\n role: str = 'user', updated_at: DateTime = None, last_login: DateTime = None):\n user_data = {\n \"email\": email,\n \"password\": PasswordManager.hash_password(password),\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"is_verified_email\": is_verified_email,\n \"is_active\": is_active,\n \"is_superuser\": is_superuser,\n \"role\": role,\n \"updated_at\": updated_at,\n \"last_login\": last_login\n }\n user = User.create(**user_data)\n return user\n\n @staticmethod\n def get_user(user_id: int | None = None, email: str = None) -> User | None:\n \"\"\"\n Retrieve a user based on their ID or email address.\n\n Args:\n user_id (int | None): The ID of the user to retrieve. Defaults to None.\n email (str | None): The email address of the user to retrieve. Defaults to None.\n\n Returns:\n User | None: A User object if a user is found based on the provided ID or email,\n or None if no user is found.\n \"\"\"\n if user_id:\n user = User.get(user_id)\n elif email:\n user = User.filter(User.email == email).first()\n else:\n return None\n\n if user is None:\n return None\n\n return user\n\n @staticmethod\n def get_user_or_404(user_id: int | None = None, email: str = None):\n user: User | None = None\n if user_id:\n user = User.get_or_404(user_id)\n elif email:\n user = User.filter(User.email == email).first()\n if not user:\n raise HTTPException(status_code=404, detail=\"User not found.\")\n\n return user\n\n @classmethod\n def update_user(cls, user_id: int, email: str | None = None, password: str | None = None,\n first_name: str | None = None, last_name: str | None = None, is_verified_email: bool | None = None,\n is_active: bool | None = None, is_superuser: bool | None = None, role: str | None = None,\n last_login: DateTime | None = None):\n \"\"\"\n Update a user by their ID.\n \"\"\"\n\n user_data = {}\n\n if first_name is not None:\n user_data[\"first_name\"] = first_name\n\n if last_name is not None:\n user_data[\"last_name\"] = last_name\n\n if email is not None:\n user_data[\"email\"] = email\n\n if password is not None:\n user_data[\"password\"] = PasswordManager.hash_password(password)\n\n if is_verified_email is not None:\n user_data[\"is_verified_email\"] = is_verified_email\n\n if is_active is not None:\n user_data[\"is_active\"] = is_active\n\n if is_superuser is not None:\n user_data[\"is_superuser\"] = is_superuser\n\n if role is not None:\n user_data[\"role\"] = role\n\n if last_login is not None:\n user_data[\"last_login\"] = last_login\n\n return User.update(user_id, **user_data)\n\n @classmethod\n def update_last_login(cls, user_id: int):\n \"\"\"\n Update user's last login.\n \"\"\"\n User.update(user_id, last_login=DateTime.now())\n\n @staticmethod\n def to_dict(user: User):\n \"\"\"\n Convert a User object to a dictionary.\n \"\"\"\n _dict = {\n 'user_id': user.id,\n 'email': user.email,\n 'first_name': user.first_name,\n 'last_name': user.last_name,\n 'is_verified_email': user.is_verified_email,\n 'date_joined': DateTime.string(user.date_joined),\n 'updated_at': DateTime.string(user.updated_at),\n 'last_login': DateTime.string(user.last_login)\n }\n return _dict\n\n @classmethod\n def new_user(cls, **user_data):\n return User.create(**user_data)\n\n @staticmethod\n def is_active(user: User):\n if not user.is_active:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=\"Inactive user.\")\n\n @staticmethod\n def is_verified_email(user: User):\n if not user.is_verified_email:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,\n detail=\"Pleas verify your email address to continue.\")\n # TODO guide user to follow the steps need to verify email address."
},
{
"identifier": "BaseTestCase",
"path": "apps/core/base_test_case.py",
"snippet": "class BaseTestCase:\n\n @staticmethod\n def assert_datetime_format(date: str | datetime):\n if isinstance(date, datetime):\n date = DateTime.string(date)\n\n formatted_date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M:%S')\n assert date == formatted_date\n\n @staticmethod\n def convert_datetime_to_string(date):\n return DateTime.string(date)"
},
{
"identifier": "app",
"path": "apps/main.py",
"snippet": ""
},
{
"identifier": "DatabaseManager",
"path": "config/database.py",
"snippet": "class DatabaseManager:\n \"\"\"\n A utility class for managing database operations using SQLAlchemy.\n\n The DatabaseManager simplifies the process of initializing and managing database connections, creating database\n tables based on SQLAlchemy models, and providing a session for performing database operations.\n\n Attributes:\n engine (Engine): The SQLAlchemy engine for the configured database.\n session (Session): The SQLAlchemy session for database interactions.\n\n Methods:\n __init__():\n Initializes the DatabaseManager by creating an SQLAlchemy engine and a session based on the\n specified database configuration from the 'settings' module.\n\n create_database_tables():\n Detects 'models.py' files in subdirectories of the 'apps' directory and creates corresponding\n database tables based on SQLAlchemy models.\n\n Example Usage:\n db_manager = DatabaseManager()\n\n # Create database tables for all detected models\n db_manager.create_database_tables()\n\n Example Usage2:\n DatabaseManager().create_database_tables()\n \"\"\"\n engine: create_engine = None\n session: Session = None\n\n @classmethod\n def __init__(cls):\n \"\"\"\n Initializes the DatabaseManager.\n\n This method creates an SQLAlchemy engine and a session based on the specified database configuration\n from the 'settings' module.\n \"\"\"\n global testing # Access the global testing flag\n db_config = settings.DATABASES.copy()\n if testing:\n db_config[\"database\"] = \"test_\" + db_config[\"database\"]\n\n if db_config[\"drivername\"] == \"sqlite\":\n project_root = Path(__file__).parent.parent # Assuming this is where your models are located\n db_config[\"database\"] = os.path.join(project_root, db_config[\"database\"])\n\n url = URL.create(**db_config)\n cls.engine = create_engine(url, connect_args={\"check_same_thread\": False})\n else:\n # for postgres\n cls.engine = create_engine(URL.create(**db_config))\n\n session = sessionmaker(autocommit=False, autoflush=False, bind=cls.engine)\n cls.session = session()\n\n @classmethod\n def create_test_database(cls):\n \"\"\"\n Create and configure a test database for use in tests.\n \"\"\"\n\n # Set the testing flag to True\n global testing\n testing = True\n\n # Reinitialize the DatabaseManager for testing\n cls.__init__()\n DatabaseManager.create_database_tables()\n\n @classmethod\n def drop_all_tables(cls):\n \"\"\"\n Drop all tables in the current database.\n \"\"\"\n # TODO drop tables for postgres too\n if cls.engine:\n metadata = MetaData()\n metadata.reflect(bind=cls.engine)\n for table_name, table in metadata.tables.items():\n table.drop(cls.engine)\n\n @classmethod\n def create_database_tables(cls):\n \"\"\"\n Create database tables based on SQLAlchemy models.\n\n This method detects 'models.py' files in subdirectories of the 'apps'\n directory and creates corresponding database tables based on SQLAlchemy\n models defined within those files.\n\n Returns:\n None\n \"\"\"\n script_directory = os.path.dirname(os.path.abspath(__file__))\n project_root = Path(script_directory).parent\n apps_directory = project_root / \"apps\"\n\n for app_dir in apps_directory.iterdir():\n if app_dir.is_dir():\n models_file = app_dir / \"models.py\"\n if models_file.exists():\n module_name = f\"apps.{app_dir.name}.models\"\n try:\n module = importlib.import_module(module_name)\n if hasattr(module, \"FastModel\") and hasattr(module.FastModel, \"metadata\"):\n module.FastModel.metadata.create_all(bind=cls.engine)\n except ImportError:\n pass\n\n @classmethod\n def get_testing_mode(cls):\n return testing"
}
] | from fastapi import status
from fastapi.testclient import TestClient
from apps.accounts.faker.data import FakeUser
from apps.accounts.models import UserVerification
from apps.accounts.services.authenticate import AccountService
from apps.accounts.services.password import PasswordManager
from apps.accounts.services.token import TokenService
from apps.accounts.services.user import UserManager
from apps.core.base_test_case import BaseTestCase
from apps.main import app
from config.database import DatabaseManager | 8,397 | assert expected_user['user_id'] == user.id
assert expected_user['email'] == user.email
assert expected_user['first_name'] == user.first_name
assert expected_user['last_name'] == user.last_name
assert expected_user['is_verified_email'] == user.is_verified_email
self.assert_datetime_format(expected_user['date_joined'])
self.assert_datetime_format(expected_user['updated_at'])
self.assert_datetime_format(expected_user['last_login'])
assert 'password' not in expected_user
assert 'is_active' not in expected_user
assert 'otp_key' not in expected_user
def test_retrieve_me_protected(self):
"""
Test endpoint is protected.
"""
response = self.client.get(self.current_user_endpoint)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_retrieve_single_user(self):
"""
Test retrieve a single user by ID with admin role. only 'admin' can access to it.
"""
# --- create an admin with access-token ---
admin, access_token = FakeUser.populate_admin()
user, _ = FakeUser.populate_user()
headers = {
"Authorization": f"Bearer {access_token}"
}
# --- request to fetch user data from token ---
response = self.client.get(f"{self.accounts_endpoint}{user.id}", headers=headers)
assert response.status_code == status.HTTP_200_OK
def test_retrieve_single_user_403(self):
"""
Test retrieve a single user by ID with user role. only 'admin' can access to it.
"""
# --- create user with access-token ---
user_1, access_token = FakeUser.populate_user()
user_2, _ = FakeUser.populate_user()
headers = {
"Authorization": f"Bearer {access_token}"
}
# --- request to fetch user data from token ---
response = self.client.get(f"{self.accounts_endpoint}{user_2.id}", headers=headers)
assert response.status_code == status.HTTP_403_FORBIDDEN
# ---------------------
# --- Test Payloads ---
# ---------------------
class TestUpdateUser(UserTestBase):
def test_update_current_user(self):
"""
Test update the current user with "user" role.
"""
# --- create user ---
user, access_token = FakeUser.populate_user()
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"
}
payload = {
'first_name': FakeUser.fake.first_name(),
'last_name': FakeUser.fake.last_name()
}
# --- request ---
response = self.client.put(self.current_user_endpoint, headers=headers, json=payload)
assert response.status_code == status.HTTP_200_OK
# --- expected ---
expected_user = response.json().get('user')
assert expected_user['first_name'] == payload['first_name']
assert expected_user['last_name'] == payload['last_name']
self.assert_datetime_format(expected_user['updated_at'])
# TODO update current admin
# ---------------------
# --- Test Payloads ---
# ---------------------
class TestChanges(UserTestBase):
def test_change_password(self):
"""
Test change password by current user.
"""
# --- create a user ---
user, access_token = FakeUser.populate_user()
header = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"
}
# --- request ---
payload = {
'current_password': FakeUser.password,
'password': FakeUser.password + "test",
'password_confirm': FakeUser.password + "test"
}
response = self.client.patch(self.change_password_endpoint, headers=header, json=payload)
assert response.status_code == status.HTTP_200_OK
# --- expected ---
expected = response.json()
assert expected['message'] == 'Your password has been changed.'
# --- expected user data, ensure other info wasn't changed ---
|
class UserTestBase(BaseTestCase):
current_user_endpoint = "/accounts/me/"
change_password_endpoint = "/accounts/me/password/"
change_email_endpoint = "/accounts/me/email/"
verify_change_email_endpoint = "/accounts/me/email/verify/"
accounts_endpoint = "/accounts/"
@classmethod
def setup_class(cls):
cls.client = TestClient(app)
DatabaseManager.create_test_database()
@classmethod
def teardown_class(cls):
DatabaseManager.drop_all_tables()
class TestRetrieveUser(UserTestBase):
def test_successful_retrieve_me(self):
"""
Test retrieving a current user.
"""
# --- create user and generate token ---
user, access_token = FakeUser.populate_user()
headers = {
"Authorization": f"Bearer {access_token}"
}
# --- request to fetch user data from token ---
response = self.client.get(self.current_user_endpoint, headers=headers)
assert response.status_code == status.HTTP_200_OK
# --- expected user ---
expected_user = response.json().get('user')
assert expected_user['user_id'] == user.id
assert expected_user['email'] == user.email
assert expected_user['first_name'] == user.first_name
assert expected_user['last_name'] == user.last_name
assert expected_user['is_verified_email'] == user.is_verified_email
self.assert_datetime_format(expected_user['date_joined'])
self.assert_datetime_format(expected_user['updated_at'])
self.assert_datetime_format(expected_user['last_login'])
assert 'password' not in expected_user
assert 'is_active' not in expected_user
assert 'otp_key' not in expected_user
def test_retrieve_me_protected(self):
"""
Test endpoint is protected.
"""
response = self.client.get(self.current_user_endpoint)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_retrieve_single_user(self):
"""
Test retrieve a single user by ID with admin role. only 'admin' can access to it.
"""
# --- create an admin with access-token ---
admin, access_token = FakeUser.populate_admin()
user, _ = FakeUser.populate_user()
headers = {
"Authorization": f"Bearer {access_token}"
}
# --- request to fetch user data from token ---
response = self.client.get(f"{self.accounts_endpoint}{user.id}", headers=headers)
assert response.status_code == status.HTTP_200_OK
def test_retrieve_single_user_403(self):
"""
Test retrieve a single user by ID with user role. only 'admin' can access to it.
"""
# --- create user with access-token ---
user_1, access_token = FakeUser.populate_user()
user_2, _ = FakeUser.populate_user()
headers = {
"Authorization": f"Bearer {access_token}"
}
# --- request to fetch user data from token ---
response = self.client.get(f"{self.accounts_endpoint}{user_2.id}", headers=headers)
assert response.status_code == status.HTTP_403_FORBIDDEN
# ---------------------
# --- Test Payloads ---
# ---------------------
class TestUpdateUser(UserTestBase):
def test_update_current_user(self):
"""
Test update the current user with "user" role.
"""
# --- create user ---
user, access_token = FakeUser.populate_user()
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"
}
payload = {
'first_name': FakeUser.fake.first_name(),
'last_name': FakeUser.fake.last_name()
}
# --- request ---
response = self.client.put(self.current_user_endpoint, headers=headers, json=payload)
assert response.status_code == status.HTTP_200_OK
# --- expected ---
expected_user = response.json().get('user')
assert expected_user['first_name'] == payload['first_name']
assert expected_user['last_name'] == payload['last_name']
self.assert_datetime_format(expected_user['updated_at'])
# TODO update current admin
# ---------------------
# --- Test Payloads ---
# ---------------------
class TestChanges(UserTestBase):
def test_change_password(self):
"""
Test change password by current user.
"""
# --- create a user ---
user, access_token = FakeUser.populate_user()
header = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"
}
# --- request ---
payload = {
'current_password': FakeUser.password,
'password': FakeUser.password + "test",
'password_confirm': FakeUser.password + "test"
}
response = self.client.patch(self.change_password_endpoint, headers=header, json=payload)
assert response.status_code == status.HTTP_200_OK
# --- expected ---
expected = response.json()
assert expected['message'] == 'Your password has been changed.'
# --- expected user data, ensure other info wasn't changed --- | expected_user = UserManager.get_user(user.id) | 5 | 2023-11-06 04:46:03+00:00 | 12k |
lukas-clarke/eight_sleep | custom_components/eight_sleep/pyEight/eight.py | [
{
"identifier": "NotAuthenticatedError",
"path": "custom_components/eight_sleep/pyEight/exceptions.py",
"snippet": "class NotAuthenticatedError(BaseEightSleepError):\n \"\"\"Exception for eight sleep authentication errors..\"\"\""
},
{
"identifier": "RequestError",
"path": "custom_components/eight_sleep/pyEight/exceptions.py",
"snippet": "class RequestError(BaseEightSleepError):\n \"\"\"Exception for eight sleep request failures.\"\"\""
},
{
"identifier": "EightUser",
"path": "custom_components/eight_sleep/pyEight/user.py",
"snippet": "class EightUser: # pylint: disable=too-many-public-methods\n \"\"\"Class for handling data of each eight user.\"\"\"\n\n def __init__(self, device: \"EightSleep\", user_id: str, side: str):\n \"\"\"Initialize user class.\"\"\"\n self.device = device\n self.user_id = user_id\n self.side = side\n self._user_profile: dict[str, Any] = {}\n self.trends: list[dict[str, Any]] = []\n self.intervals: list[dict[str, Any]] = []\n self.next_alarm = None\n self.bed_state_type = None\n\n # Variables to do dynamic presence\n self.presence: bool = False\n self.observed_low: int = 0\n\n def _get_trend(self, trend_num: int, keys: str | tuple[str, ...]) -> Any:\n \"\"\"Get trend value for specified key.\"\"\"\n if len(self.trends) < trend_num + 1:\n return None\n data = self.trends[-(trend_num + 1)]\n if isinstance(keys, str):\n return data.get(keys)\n if self.trends:\n for key in keys[:-1]:\n data = data.get(key, {})\n return data.get(keys[-1])\n\n def _get_fitness_score(self, trend_num: int, key: str) -> Any:\n \"\"\"Get fitness score for specified key.\"\"\"\n return self._get_trend(trend_num, (\"sleepFitnessScore\", key, \"score\"))\n\n def _get_sleep_score(self, interval_num: int) -> int | None:\n \"\"\"Return sleep score for a given interval.\"\"\"\n if len(self.intervals) < interval_num + 1:\n return None\n return self.intervals[interval_num].get(\"score\")\n\n def _interval_timeseries(self, interval_num: int) -> dict[str, Any] | None:\n \"\"\"Return timeseries interval if it exists.\"\"\"\n if len(self.intervals) < interval_num + 1:\n return None\n return self.intervals[interval_num].get(\"timeseries\", {})\n\n def _get_current_interval_property_value(self, key: str) -> int | float | None:\n \"\"\"Get current property from intervals.\"\"\"\n if (\n not (timeseries_data := self._interval_timeseries(0))\n or timeseries_data.get(key) is None\n ):\n return None\n return timeseries_data[key][-1][1]\n\n def _calculate_interval_data(\n self, interval_num: int, key: str, average_data: bool = True\n ) -> int | float | None:\n \"\"\"Calculate interval data.\"\"\"\n\n if (timeseries := self._interval_timeseries(interval_num)) is None or (\n data_list := timeseries.get(key)\n ) is None:\n return None\n total = 0\n for entry in data_list:\n total += entry[1]\n if not average_data:\n return total\n return total / len(data_list)\n\n def _session_date(self, interval_num: int) -> datetime | None:\n \"\"\"Get session date for given interval.\"\"\"\n if (\n len(self.intervals) < interval_num + 1\n or (session_date := self.intervals[interval_num].get(\"ts\")) is None\n ):\n return None\n date = datetime.strptime(session_date, DATE_TIME_ISO_FORMAT)\n return date.replace(tzinfo=ZoneInfo(\"UTC\"))\n\n def _sleep_breakdown(self, interval_num: int) -> dict[str, Any] | None:\n \"\"\"Return durations of sleep stages for given session.\"\"\"\n if len(self.intervals) < (interval_num + 1) or not (\n stages := self.intervals[interval_num].get(\"stages\")\n ):\n return None\n breakdown = {}\n for stage in stages:\n if stage[\"stage\"] in (\"out\"):\n continue\n if stage[\"stage\"] not in breakdown:\n breakdown[stage[\"stage\"]] = 0\n breakdown[stage[\"stage\"]] += stage[\"duration\"]\n\n return breakdown\n\n def _session_processing(self, interval_num: int) -> bool | None:\n \"\"\"Return processing state of given session.\"\"\"\n if len(self.intervals) < interval_num + 1:\n return None\n return self.intervals[interval_num].get(\"incomplete\", False)\n\n @property\n def user_profile(self) -> dict[str, Any] | None:\n \"\"\"Return userdata.\"\"\"\n return self._user_profile\n\n @property\n def bed_presence(self) -> bool:\n \"\"\"Return true/false for bed presence.\"\"\"\n return self.presence\n\n @property\n def target_heating_level(self) -> int | None:\n \"\"\"Return target heating/cooling level.\"\"\"\n return self.device.device_data.get(f\"{self.side}TargetHeatingLevel\")\n\n @property\n def heating_level(self) -> int | None:\n \"\"\"Return heating/cooling level.\"\"\"\n level = self.device.device_data.get(f\"{self.side}HeatingLevel\")\n # Update observed low\n if level is not None and level < self.observed_low:\n self.observed_low = level\n return level\n\n def past_heating_level(self, num) -> int:\n \"\"\"Return a heating level from the past.\"\"\"\n if num > 9 or len(self.device.device_data_history) < num + 1:\n return 0\n\n return self.device.device_data_history[num].get(f\"{self.side}HeatingLevel\", 0)\n\n def _now_heating_or_cooling(self, target_heating_level_check: bool) -> bool | None:\n \"\"\"Return true/false if heating or cooling is currently happening.\"\"\"\n key = f\"{self.side}NowHeating\"\n if (\n self.target_heating_level is None\n or (target := self.device.device_data.get(key)) is None\n ):\n return None\n return target and target_heating_level_check\n\n @property\n def now_heating(self) -> bool | None:\n \"\"\"Return current heating state.\"\"\"\n level = self.target_heating_level\n return self._now_heating_or_cooling(level is not None and level > 0)\n\n @property\n def now_cooling(self) -> bool | None:\n \"\"\"Return current cooling state.\"\"\"\n level = self.target_heating_level\n return self._now_heating_or_cooling(level is not None and level < 0)\n\n @property\n def heating_remaining(self) -> int | None:\n \"\"\"Return seconds of heat/cool time remaining.\"\"\"\n return self.device.device_data.get(f\"{self.side}HeatingDuration\")\n\n @property\n def last_seen(self) -> str | None:\n \"\"\"Return mattress last seen time.\n\n These values seem to be rarely updated correctly in the API.\n Don't expect accurate results from this property.\n \"\"\"\n if not (last_seen := self.device.device_data.get(f\"{self.side}PresenceEnd\")):\n return None\n return datetime.fromtimestamp(int(last_seen)).strftime(DATE_TIME_ISO_FORMAT)\n\n @property\n def heating_values(self) -> dict[str, Any]:\n \"\"\"Return a dict of all the current heating values.\"\"\"\n return {\n \"level\": self.heating_level,\n \"target\": self.target_heating_level,\n \"active\": self.now_heating,\n \"remaining\": self.heating_remaining,\n \"last_seen\": self.last_seen,\n }\n\n @property\n def current_session_date(self) -> datetime | None:\n \"\"\"Return date/time for start of last session data.\"\"\"\n return self._session_date(0)\n\n @property\n def current_session_processing(self) -> bool | None:\n \"\"\"Return processing state of current session.\"\"\"\n return self._session_processing(0)\n\n @property\n def current_sleep_stage(self) -> str | None:\n \"\"\"Return sleep stage for in-progress session.\"\"\"\n if (\n not self.intervals\n or not (stages := self.intervals[0].get(\"stages\"))\n or len(stages) < 2\n ):\n return None\n # API now always has an awake state last in the dict\n # so always pull the second to last stage while we are\n # in a processing state\n if self.current_session_processing:\n stage = stages[-2].get(\"stage\")\n else:\n stage = stages[-1].get(\"stage\")\n\n # UNRELIABLE... Removing for now.\n # Check sleep stage against last_seen time to make\n # sure we don't get stuck in a non-awake state.\n # delta_elap = datetime.fromtimestamp(time.time()) \\\n # - datetime.strptime(self.last_seen, 'DATE_TIME_ISO_FORMAT')\n # _LOGGER.debug('User elap: %s', delta_elap.total_seconds())\n # if stage != 'awake' and delta_elap.total_seconds() > 1800:\n # Bed hasn't seen us for 30min so set awake.\n # stage = 'awake'\n\n return stage\n\n @property\n def current_sleep_score(self) -> int | None:\n \"\"\"Return sleep score for in-progress session.\"\"\"\n return self._get_sleep_score(0)\n\n @property\n def current_sleep_fitness_score(self) -> int | None:\n \"\"\"Return sleep fitness score for latest session.\"\"\"\n return self._get_trend(0, (\"sleepFitnessScore\", \"total\"))\n\n @property\n def current_sleep_duration_score(self) -> int | None:\n \"\"\"Return sleep duration score for latest session.\"\"\"\n return self._get_fitness_score(0, \"sleepDurationSeconds\")\n\n @property\n def current_latency_asleep_score(self) -> int | None:\n \"\"\"Return latency asleep score for latest session.\"\"\"\n return self._get_fitness_score(0, \"latencyAsleepSeconds\")\n\n @property\n def current_latency_out_score(self) -> int | None:\n \"\"\"Return latency out score for latest session.\"\"\"\n return self._get_fitness_score(0, \"latencyOutSeconds\")\n\n @property\n def current_wakeup_consistency_score(self) -> int | None:\n \"\"\"Return wakeup consistency score for latest session.\"\"\"\n return self._get_fitness_score(0, \"wakeupConsistency\")\n\n @property\n def current_fitness_session_date(self) -> str | None:\n \"\"\"Return date/time for start of last session data.\"\"\"\n return self._get_trend(0, \"day\")\n\n @property\n def current_sleep_breakdown(self) -> dict[str, Any] | None:\n \"\"\"Return durations of sleep stages for in-progress session.\"\"\"\n return self._sleep_breakdown(0)\n\n @property\n def current_bed_temp(self) -> int | float | None:\n \"\"\"Return current bed temperature for in-progress session.\"\"\"\n return self._get_current_interval_property_value(\"tempBedC\")\n\n @property\n def current_room_temp(self) -> int | float | None:\n \"\"\"Return current room temperature for in-progress session.\"\"\"\n return self._get_current_interval_property_value(\"tempRoomC\")\n\n @property\n def current_tnt(self) -> int | None:\n \"\"\"Return current toss & turns for in-progress session.\"\"\"\n return cast(\n Optional[int], self._calculate_interval_data(0, \"tnt\", average_data=False)\n )\n\n @property\n def current_resp_rate(self) -> int | float | None:\n \"\"\"Return current respiratory rate for in-progress session.\"\"\"\n return self._get_current_interval_property_value(\"respiratoryRate\")\n\n @property\n def current_heart_rate(self) -> int | float | None:\n \"\"\"Return current heart rate for in-progress session.\"\"\"\n return self._get_current_interval_property_value(\"heartRate\")\n\n @property\n def current_values(self) -> dict[str, Any]:\n \"\"\"Return a dict of all the 'current' parameters.\"\"\"\n return {\n \"date\": self.current_session_date,\n \"score\": self.current_sleep_score,\n \"stage\": self.current_sleep_stage,\n \"breakdown\": self.current_sleep_breakdown,\n \"tnt\": self.current_tnt,\n \"bed_temp\": self.current_bed_temp,\n \"room_temp\": self.current_room_temp,\n \"resp_rate\": self.current_resp_rate,\n \"heart_rate\": self.current_heart_rate,\n \"processing\": self.current_session_processing,\n }\n\n @property\n def current_fitness_values(self) -> dict[str, Any]:\n \"\"\"Return a dict of all the 'current' fitness score parameters.\"\"\"\n return {\n \"date\": self.current_fitness_session_date,\n \"score\": self.current_sleep_fitness_score,\n \"duration\": self.current_sleep_duration_score,\n \"asleep\": self.current_latency_asleep_score,\n \"out\": self.current_latency_out_score,\n \"wakeup\": self.current_wakeup_consistency_score,\n }\n\n @property\n def last_session_date(self) -> datetime | None:\n \"\"\"Return date/time for start of last session data.\"\"\"\n return self._session_date(1)\n\n @property\n def last_session_processing(self) -> bool | None:\n \"\"\"Return processing state of current session.\"\"\"\n return self._session_processing(1)\n\n @property\n def last_sleep_score(self) -> int | None:\n \"\"\"Return sleep score from last complete sleep session.\"\"\"\n return self._get_sleep_score(1)\n\n @property\n def last_sleep_fitness_score(self) -> int | None:\n \"\"\"Return sleep fitness score for previous sleep session.\"\"\"\n return self._get_trend(1, (\"sleepFitnessScore\", \"total\"))\n\n @property\n def last_sleep_duration_score(self) -> int | None:\n \"\"\"Return sleep duration score for previous session.\"\"\"\n return self._get_fitness_score(1, \"sleepDurationSeconds\")\n\n @property\n def last_latency_asleep_score(self) -> int | None:\n \"\"\"Return latency asleep score for previous session.\"\"\"\n return self._get_fitness_score(1, \"latencyAsleepSeconds\")\n\n @property\n def last_latency_out_score(self) -> int | None:\n \"\"\"Return latency out score for previous session.\"\"\"\n return self._get_fitness_score(1, \"latencyOutSeconds\")\n\n @property\n def last_wakeup_consistency_score(self) -> int | None:\n \"\"\"Return wakeup consistency score for previous session.\"\"\"\n return self._get_fitness_score(1, \"wakeupConsistency\")\n\n @property\n def last_fitness_session_date(self) -> str | None:\n \"\"\"Return date/time for start of previous session data.\"\"\"\n return self._get_trend(1, \"day\")\n\n @property\n def last_sleep_breakdown(self) -> dict[str, Any] | None:\n \"\"\"Return durations of sleep stages for last complete session.\"\"\"\n return self._sleep_breakdown(1)\n\n @property\n def last_bed_temp(self) -> int | float | None:\n \"\"\"Return avg bed temperature for last session.\"\"\"\n return self._calculate_interval_data(1, \"tempBedC\")\n\n @property\n def last_room_temp(self) -> int | float | None:\n \"\"\"Return avg room temperature for last session.\"\"\"\n return self._calculate_interval_data(1, \"tempRoomC\")\n\n @property\n def last_tnt(self) -> int | None:\n \"\"\"Return toss & turns for last session.\"\"\"\n return cast(\n Optional[int], self._calculate_interval_data(1, \"tnt\", average_data=False)\n )\n\n @property\n def last_resp_rate(self) -> int | float | None:\n \"\"\"Return avg respiratory rate for last session.\"\"\"\n return self._calculate_interval_data(1, \"respiratoryRate\")\n\n @property\n def last_heart_rate(self) -> int | float | None:\n \"\"\"Return avg heart rate for last session.\"\"\"\n return self._calculate_interval_data(1, \"heartRate\")\n\n @property\n def last_values(self) -> dict[str, Any]:\n \"\"\"Return a dict of all the 'last' parameters.\"\"\"\n return {\n \"date\": self.last_session_date,\n \"score\": self.last_sleep_score,\n \"breakdown\": self.last_sleep_breakdown,\n \"tnt\": self.last_tnt,\n \"bed_temp\": self.last_bed_temp,\n \"room_temp\": self.last_room_temp,\n \"resp_rate\": self.last_resp_rate,\n \"heart_rate\": self.last_heart_rate,\n \"processing\": self.last_session_processing,\n }\n\n @property\n def last_fitness_values(self) -> dict[str, Any]:\n \"\"\"Return a dict of all the 'last' fitness score parameters.\"\"\"\n return {\n \"date\": self.last_fitness_session_date,\n \"score\": self.last_sleep_fitness_score,\n \"duration\": self.last_sleep_duration_score,\n \"asleep\": self.last_latency_asleep_score,\n \"out\": self.last_latency_out_score,\n \"wakeup\": self.last_wakeup_consistency_score,\n }\n\n def trend_sleep_score(self, date: str) -> int | None:\n \"\"\"Return trend sleep score for specified date.\"\"\"\n return next(\n (day.get(\"score\") for day in self.trends if day.get(\"day\") == date),\n None,\n )\n\n def sleep_fitness_score(self, date: str) -> int | None:\n \"\"\"Return sleep fitness score for specified date.\"\"\"\n return next(\n (\n day.get(\"sleepFitnessScore\", {}).get(\"total\")\n for day in self.trends\n if day.get(\"day\") == date\n ),\n None,\n )\n\n def heating_stats(self) -> None:\n \"\"\"Calculate some heating data stats.\"\"\"\n local_5 = []\n local_10 = []\n\n for i in range(0, 10):\n if (level := self.past_heating_level(i)) is None:\n continue\n if level == 0:\n _LOGGER.debug(\"Cant calculate stats yet...\")\n return\n if i < 5:\n local_5.append(level)\n local_10.append(level)\n\n _LOGGER.debug(\"%s Heating History: %s\", self.side, local_10)\n\n try:\n # Average of 5min on the history dict.\n fiveminavg = statistics.mean(local_5)\n tenminavg = statistics.mean(local_10)\n _LOGGER.debug(\"%s Heating 5 min avg: %s\", self.side, fiveminavg)\n _LOGGER.debug(\"%s Heating 10 min avg: %s\", self.side, tenminavg)\n\n # Standard deviation\n fivestdev = statistics.stdev(local_5)\n tenstdev = statistics.stdev(local_10)\n _LOGGER.debug(\"%s Heating 5 min stdev: %s\", self.side, fivestdev)\n _LOGGER.debug(\"%s Heating 10 min stdev: %s\", self.side, tenstdev)\n\n # Variance\n fivevar = statistics.variance(local_5)\n tenvar = statistics.variance(local_10)\n _LOGGER.debug(\"%s Heating 5 min variance: %s\", self.side, fivevar)\n _LOGGER.debug(\"%s Heating 10 min variance: %s\", self.side, tenvar)\n except statistics.StatisticsError:\n _LOGGER.debug(\"Cant calculate stats yet...\")\n\n # Other possible options for exploration....\n # Pearson correlation coefficient\n # Spearman rank correlation\n # Kendalls Tau\n\n def dynamic_presence(self) -> None:\n \"\"\"\n Determine presence based on bed heating level and end presence\n time reported by the api.\n\n Idea originated from Alex Lee Yuk Cheung SmartThings Code.\n \"\"\"\n\n # self.heating_stats()\n\n # Method needs to be different for pod since it doesn't rest at 0\n # - Working idea is to track the low and adjust the scale so that low is 0\n # - Buffer changes while cooling/heating is active\n if self.target_heating_level is None or self.heating_level is None:\n return\n level_zero = self.observed_low * (-1)\n working_level = self.heating_level + level_zero\n if self.device.is_pod:\n if not self.presence:\n if working_level > 50:\n if not self.now_cooling and not self.now_heating:\n self.presence = True\n elif self.target_heating_level > 0:\n # Heating\n if working_level - self.target_heating_level >= 8:\n self.presence = True\n elif self.target_heating_level < 0:\n # Cooling\n if self.heating_level + self.target_heating_level >= 8:\n self.presence = True\n elif working_level > 25:\n # Catch rising edge\n if (\n self.past_heating_level(0) - self.past_heating_level(1) >= 2\n and self.past_heating_level(1) - self.past_heating_level(2) >= 2\n and self.past_heating_level(2) - self.past_heating_level(3) >= 2\n ):\n # Values are increasing so we are likely in bed\n if not self.now_heating:\n self.presence = True\n elif working_level - self.target_heating_level >= 8:\n self.presence = True\n\n elif self.presence:\n if working_level <= 15:\n # Failsafe, very slow\n self.presence = False\n elif working_level < 35: # Threshold is expiremental for now\n if (\n self.past_heating_level(0) - self.past_heating_level(1) < 0\n and self.past_heating_level(1) - self.past_heating_level(2) < 0\n and self.past_heating_level(2) - self.past_heating_level(3) < 0\n ):\n # Values are decreasing so we are likely out of bed\n self.presence = False\n else:\n # Method for 0 resting state\n if not self.presence:\n if self.heating_level > 50:\n # Can likely make this better\n if not self.now_heating:\n self.presence = True\n elif self.heating_level - self.target_heating_level >= 8:\n self.presence = True\n elif self.heating_level > 25:\n # Catch rising edge\n if (\n self.past_heating_level(0) - self.past_heating_level(1) >= 2\n and self.past_heating_level(1) - self.past_heating_level(2) >= 2\n and self.past_heating_level(2) - self.past_heating_level(3) >= 2\n ):\n # Values are increasing so we are likely in bed\n if not self.now_heating:\n self.presence = True\n elif self.heating_level - self.target_heating_level >= 8:\n self.presence = True\n\n elif self.presence:\n if self.heating_level <= 15:\n # Failsafe, very slow\n self.presence = False\n elif self.heating_level < 50:\n if (\n self.past_heating_level(0) - self.past_heating_level(1) < 0\n and self.past_heating_level(1) - self.past_heating_level(2) < 0\n and self.past_heating_level(2) - self.past_heating_level(3) < 0\n ):\n # Values are decreasing so we are likely out of bed\n self.presence = False\n\n # Last seen can lag real-time by up to 35min so this is\n # mostly a backup to using the heat values.\n # seen_delta = datetime.fromtimestamp(time.time()) \\\n # - datetime.strptime(self.last_seen, 'DATE_TIME_ISO_FORMAT')\n # _LOGGER.debug('%s Last seen time delta: %s', self.side,\n # seen_delta.total_seconds())\n # if self.presence and seen_delta.total_seconds() > 2100:\n # self.presence = False\n\n _LOGGER.debug(\"%s Presence Results: %s\", self.side, self.presence)\n\n async def update_user(self) -> None:\n \"\"\"Update all user data.\"\"\"\n await self.update_intervals_data()\n\n now = datetime.today()\n start = now - timedelta(days=2)\n end = now + timedelta(days=2)\n\n await self.update_trend_data(\n start.strftime(DATE_FORMAT), end.strftime(DATE_FORMAT)\n )\n await self.update_routines_data()\n\n self.bed_state_type = await self.get_bed_state_type()\n\n async def get_bed_state_type(self) -> str:\n \"\"\"Gets the bed state.\"\"\"\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n data = await self.device.api_request(\"GET\", url)\n return data[\"currentState\"][\"type\"]\n\n async def set_heating_level(self, level: int, duration: int = 0) -> None:\n \"\"\"Update heating data json.\"\"\"\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n data_for_duration = {\"timeBased\": {\"level\": level, \"durationSeconds\": duration}}\n data_for_level = {\"currentLevel\": level}\n # Catch bad low inputs\n level = max(-100, level)\n # Catch bad high inputs\n level = min(100, level)\n\n await self.turn_on_side() # Turn on side before setting temperature\n await self.device.api_request(\n \"PUT\", url, data=data_for_level\n ) # Set heating level before duration\n await self.device.api_request(\"PUT\", url, data=data_for_duration)\n\n async def set_smart_heating_level(self, level: int, sleep_stage: str) -> None:\n \"\"\"Will set the temperature level at a smart sleep stage\"\"\"\n if sleep_stage not in POSSIBLE_SLEEP_STAGES:\n raise Exception(\n f\"Invalid sleep stage {sleep_stage}. Should be one of {POSSIBLE_SLEEP_STAGES}\"\n )\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n data = await self.device.api_request(\"GET\", url)\n sleep_stages_levels = data[\"smart\"]\n # Catch bad low inputs\n level = max(-100, level)\n # Catch bad high inputs\n level = min(100, level)\n sleep_stages_levels[sleep_stage] = level\n data = {\"smart\": sleep_stages_levels}\n await self.device.api_request(\"PUT\", url, data=data)\n\n async def increment_heating_level(self, offset: int) -> None:\n \"\"\"Increment heating level with offset\"\"\"\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n current_level = await self.get_current_heating_level()\n new_level = current_level + offset\n # Catch bad low inputs\n new_level = max(-100, new_level)\n # Catch bad high inputs\n new_level = min(100, new_level)\n\n data_for_level = {\"currentLevel\": new_level}\n\n await self.device.api_request(\"PUT\", url, data=data_for_level)\n\n async def get_current_heating_level(self) -> int:\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n resp = await self.device.api_request(\"GET\", url)\n return int(resp[\"currentLevel\"])\n\n async def turn_on_side(self):\n \"\"\"Turns on the side of the user\"\"\"\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n data = {\"currentState\": {\"type\": \"smart\"}}\n await self.device.api_request(\"PUT\", url, data=data)\n\n async def turn_off_side(self):\n \"\"\"Turns on the side of the user\"\"\"\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n data = {\"currentState\": {\"type\": \"off\"}}\n await self.device.api_request(\"PUT\", url, data=data)\n\n async def set_away_mode(self, action: str):\n \"\"\"Sets the away mode. The action can either be 'start' or 'stop'\"\"\"\n url = APP_API_URL + f\"v1/users/{self.user_id}/away-mode\"\n # Setting time to UTC of 24 hours ago to get API to trigger immediately\n now = str(\n (datetime.utcnow() - timedelta(days=1)).strftime(\"%Y-%m-%dT%H:%M:%S.%f\")[\n :-3\n ]\n + \"Z\"\n )\n if action != \"start\" and action != \"end\":\n raise Exception(f\"Invalid action: {action}\")\n data = {\"awayPeriod\": {action: now}}\n await self.device.api_request(\"PUT\", url, data=data)\n\n async def update_user_profile(self) -> None:\n \"\"\"Update user profile data.\"\"\"\n url = f\"{CLIENT_API_URL}/users/{self.user_id}\"\n profile_data = await self.device.api_request(\"get\", url)\n if profile_data is None:\n _LOGGER.error(\"Unable to fetch user profile data for %s\", self.user_id)\n else:\n self._user_profile = profile_data[\"user\"]\n\n async def update_trend_data(self, start_date: str, end_date: str) -> None:\n \"\"\"Update trends data json for specified time period.\"\"\"\n url = f\"{CLIENT_API_URL}/users/{self.user_id}/trends\"\n params = {\n \"tz\": self.device.timezone,\n \"from\": start_date,\n \"to\": end_date,\n # 'include-main': 'true'\n }\n trend_data = await self.device.api_request(\"get\", url, params=params)\n self.trends = trend_data.get(\"days\", [])\n\n async def update_intervals_data(self) -> None:\n \"\"\"Update intervals data json for specified time period.\"\"\"\n url = f\"{CLIENT_API_URL}/users/{self.user_id}/intervals\"\n\n intervals = await self.device.api_request(\"get\", url)\n self.intervals = intervals.get(\"intervals\", [])\n\n async def update_routines_data(self) -> None:\n url = APP_API_URL + f\"v2/users/{self.user_id}/routines\"\n resp = await self.device.api_request(\"GET\", url)\n\n try:\n nextTimestamp = resp[\"state\"][\"nextAlarm\"][\"nextTimestamp\"]\n except KeyError:\n nextTimestamp = None\n\n if not nextTimestamp:\n self.next_alarm = None\n return\n\n date_format = \"%Y-%m-%dT%H:%M:%SZ\"\n # Convert string to datetime object\n datetime_object = datetime.strptime(nextTimestamp, date_format)\n # Set the timezone to UTC\n utc_timezone = pytz.UTC\n datetime_object_utc = datetime_object.replace(tzinfo=utc_timezone)\n # Set the timezone to a specific timezone\n timezone = pytz.timezone(self.device.timezone)\n self.next_alarm = datetime_object_utc.astimezone(timezone)"
},
{
"identifier": "Token",
"path": "custom_components/eight_sleep/pyEight/structs.py",
"snippet": "class Token:\n bearer_token: str\n expiration: float\n main_id: str"
}
] | import asyncio
import atexit
import logging
import time
import httpx
from datetime import datetime
from typing import Any
from aiohttp.client import ClientError, ClientSession, ClientTimeout
from .constants import *
from .exceptions import NotAuthenticatedError, RequestError
from .user import EightUser
from .structs import Token | 8,450 | """
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
CLIENT_TIMEOUT = ClientTimeout(total=DEFAULT_TIMEOUT)
class EightSleep:
"""Eight sleep API object."""
def __init__(
self,
email: str,
password: str,
timezone: str,
client_id: str = None,
client_secret: str = None,
client_session: ClientSession | None = None,
check_auth: bool = False,
) -> None:
"""Initialize eight sleep class."""
self._email = email
self._password = password
# If client_id isn't set, use the default value
if not client_id:
client_id = "0894c7f33bb94800a03f1f4df13a4f38"
self._client_id = client_id
# client_secret isn't required for current Eight Sleep API auth
# but can't be empty value, so setting random string if not set
if not client_secret:
client_secret = "ASDF"
self._client_secret = client_secret
self.timezone = timezone
self.users: dict[str, EightUser] = {}
self._user_id: str | None = None
self._token: str | None = None
self._token_expiration: datetime | None = None
self._device_ids: list[str] = []
self._is_pod: bool = False
# Setup 10 element list
self._device_json_list: list[dict] = []
self._api_session = client_session
self._internal_session: bool = False
if check_auth:
self._get_auth()
# Stop on exit
atexit.register(self.at_exit)
def at_exit(self) -> None:
"""Run at exit."""
try:
loop = asyncio.get_running_loop()
asyncio.run_coroutine_threadsafe(self.stop(), loop).result()
except RuntimeError:
asyncio.run(self.stop())
@property
def token(self) -> str | None:
"""Return session token."""
return self._token
@property
def user_id(self) -> str | None:
"""Return user ID of the logged in user."""
return self._user_id
@property
def device_id(self) -> str | None:
"""Return devices id."""
return self._device_ids[0]
@property
def device_data(self) -> dict:
"""Return current raw device_data json."""
return self._device_json_list[0]
@property
def device_data_history(self) -> list[dict]:
"""Return full raw device_data json list."""
return self._device_json_list
@property
def is_pod(self) -> bool:
"""Return if device is a POD."""
return self._is_pod
async def _get_auth(self) -> Token:
data = {
"client_id": self._client_id,
"client_secret": self._client_secret,
"grant_type": "password",
"username": self._email,
"password": self._password,
}
async with httpx.AsyncClient() as client:
response = await client.post(
AUTH_URL,
headers=DEFAULT_AUTH_HEADERS,
json=data,
timeout=DEFAULT_TIMEOUT,
)
if response.status_code == 200:
access_token_str = response.json()["access_token"]
expiration_seconds_int = (
float(response.json()["expires_in"]) + time.time()
)
main_id = response.json()["userId"]
return Token(access_token_str, expiration_seconds_int, main_id)
else:
| """
pyeight.eight
~~~~~~~~~~~~~~~~~~~~
Provides api for Eight Sleep
Copyright (c) 2022-2023 <https://github.com/lukas-clarke/pyEight>
Licensed under the MIT license.
"""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
CLIENT_TIMEOUT = ClientTimeout(total=DEFAULT_TIMEOUT)
class EightSleep:
"""Eight sleep API object."""
def __init__(
self,
email: str,
password: str,
timezone: str,
client_id: str = None,
client_secret: str = None,
client_session: ClientSession | None = None,
check_auth: bool = False,
) -> None:
"""Initialize eight sleep class."""
self._email = email
self._password = password
# If client_id isn't set, use the default value
if not client_id:
client_id = "0894c7f33bb94800a03f1f4df13a4f38"
self._client_id = client_id
# client_secret isn't required for current Eight Sleep API auth
# but can't be empty value, so setting random string if not set
if not client_secret:
client_secret = "ASDF"
self._client_secret = client_secret
self.timezone = timezone
self.users: dict[str, EightUser] = {}
self._user_id: str | None = None
self._token: str | None = None
self._token_expiration: datetime | None = None
self._device_ids: list[str] = []
self._is_pod: bool = False
# Setup 10 element list
self._device_json_list: list[dict] = []
self._api_session = client_session
self._internal_session: bool = False
if check_auth:
self._get_auth()
# Stop on exit
atexit.register(self.at_exit)
def at_exit(self) -> None:
"""Run at exit."""
try:
loop = asyncio.get_running_loop()
asyncio.run_coroutine_threadsafe(self.stop(), loop).result()
except RuntimeError:
asyncio.run(self.stop())
@property
def token(self) -> str | None:
"""Return session token."""
return self._token
@property
def user_id(self) -> str | None:
"""Return user ID of the logged in user."""
return self._user_id
@property
def device_id(self) -> str | None:
"""Return devices id."""
return self._device_ids[0]
@property
def device_data(self) -> dict:
"""Return current raw device_data json."""
return self._device_json_list[0]
@property
def device_data_history(self) -> list[dict]:
"""Return full raw device_data json list."""
return self._device_json_list
@property
def is_pod(self) -> bool:
"""Return if device is a POD."""
return self._is_pod
async def _get_auth(self) -> Token:
data = {
"client_id": self._client_id,
"client_secret": self._client_secret,
"grant_type": "password",
"username": self._email,
"password": self._password,
}
async with httpx.AsyncClient() as client:
response = await client.post(
AUTH_URL,
headers=DEFAULT_AUTH_HEADERS,
json=data,
timeout=DEFAULT_TIMEOUT,
)
if response.status_code == 200:
access_token_str = response.json()["access_token"]
expiration_seconds_int = (
float(response.json()["expires_in"]) + time.time()
)
main_id = response.json()["userId"]
return Token(access_token_str, expiration_seconds_int, main_id)
else: | raise RequestError( | 1 | 2023-11-01 16:15:52+00:00 | 12k |
gickowtf/pixoo-homeassistant | custom_components/divoom_pixoo/pixoo64/_pixoo.py | [
{
"identifier": "Palette",
"path": "custom_components/divoom_pixoo/pixoo64/_colors.py",
"snippet": "class Palette:\n BLACK = COLOR_BLACK\n WHITE = COLOR_WHITE"
},
{
"identifier": "retrieve_glyph",
"path": "custom_components/divoom_pixoo/pixoo64/_font.py",
"snippet": "def retrieve_glyph(character, font):\n if character in font:\n return font[character]\n\n return None"
},
{
"identifier": "FONT_GICKO",
"path": "custom_components/divoom_pixoo/pixoo64/_font.py",
"snippet": "FONT_GICKO = {'0': [0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 6],\n '1': [0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 4],\n '2': [0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 6],\n '3': [1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 6],\n '4': [0, 1, 1, 1, 1, 0,\n 1, 1, 0, 1, 1, 0,\n 1, 0, 0, 1, 1, 0,\n 1, 0, 0, 1, 1, 0,\n 1, 1, 1, 1, 1, 1,\n 0, 0, 0, 1, 1, 0, 6],\n '5': [1, 1, 1, 1, 1, 0,\n 1, 0, 0, 0, 0, 0,\n 1, 1, 1, 1, 1, 0,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 1, 1,\n 0, 1, 1, 1, 1, 0, 6],\n '6': [0, 1, 1, 1, 1, 0,\n 1, 1, 0, 0, 0, 0,\n 1, 1, 1, 1, 1, 0,\n 1, 1, 0, 0, 1, 1,\n 1, 1, 0, 0, 1, 1,\n 0, 1, 1, 1, 1, 0, 6],\n '7': [1, 1, 1, 1, 1, 1,\n 0, 0, 0, 0, 1, 1,\n 0, 0, 0, 1, 1, 0,\n 0, 0, 1, 1, 0, 0,\n 0, 1, 1, 1, 0, 0,\n 0, 1, 1, 1, 0, 0, 6],\n '8': [0, 1, 1, 1, 1, 0,\n 1, 0, 0, 1, 1, 1,\n 0, 1, 1, 1, 1, 0,\n 1, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 1, 1,\n 0, 1, 1, 1, 1, 0, 6],\n '9': [0, 1, 1, 1, 1, 0,\n 1, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 1, 1,\n 0, 1, 1, 1, 1, 1,\n 0, 0, 0, 1, 1, 1,\n 0, 1, 1, 1, 1, 0, 6],\n ' ': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2],\n 'A': [0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 6],\n 'B': [1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 6],\n 'C': [0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 6],\n 'D': [1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 6],\n 'E': [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 5],\n 'F': [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 5],\n 'G': [0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 6],\n 'H': [1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 5],\n 'I': [1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 4],\n 'J': [0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 6],\n 'K': [1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 6],\n 'L': [1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 4],\n 'M': [1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 6],\n 'N': [1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 6],\n 'O': [0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 6],\n 'P': [1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 6],\n 'Q': [0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 6],\n 'R': [1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 6],\n 'S': [0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 6],\n 'T': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 6],\n 'U': [1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 6],\n 'V': [1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 6],\n 'W': [1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 6],\n 'X': [1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 6],\n 'Y': [1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 6],\n 'Z': [1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 6],\n ':': [0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 2],\n '?': [0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 6],\n '!': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 2],\n '.': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2],\n '-': [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 3]\n }"
},
{
"identifier": "FONT_PICO_8",
"path": "custom_components/divoom_pixoo/pixoo64/_font.py",
"snippet": "FONT_PICO_8 = {'0': [1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 3], '1': [1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 3],\n '2': [1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 3], '3': [1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 3],\n '4': [1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 3], '5': [1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 3],\n '6': [1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 3], '7': [1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 3],\n '8': [1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 3], '9': [1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 3],\n 'a': [0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 3], 'b': [0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 3],\n 'c': [0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 3], 'd': [0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 3],\n 'e': [0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 3], 'f': [0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 3],\n 'g': [0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 3], 'h': [0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 3],\n 'i': [0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 3], 'j': [0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 3],\n 'k': [0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 3], 'l': [0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 3],\n 'm': [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 3], 'n': [0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 3],\n 'o': [0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 3], 'p': [0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 3],\n 'q': [0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 3], 'r': [0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 3],\n 's': [0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 3], 't': [0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 3],\n 'u': [0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 3], 'v': [0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 3],\n 'w': [0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 3], 'x': [0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 3],\n 'y': [0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 3], 'z': [0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 3],\n 'A': [1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 3], 'B': [1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 3],\n 'C': [0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 3], 'D': [1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 3],\n 'E': [1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 3], 'F': [1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 3],\n 'G': [0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 3], 'H': [1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 3],\n 'I': [1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 3], 'J': [1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 3],\n 'K': [1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 3], 'L': [1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 3],\n 'M': [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 3], 'N': [1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 3],\n 'O': [0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 3], 'P': [1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 3],\n 'Q': [0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 3], 'R': [1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 3],\n 'S': [0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 3], 'T': [1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 3],\n 'U': [1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 3], 'V': [1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 3],\n 'W': [1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 5], 'X': [1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 3],\n 'Y': [1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 3], 'Z': [1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 3],\n '!': [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 3], \"'\": [0, 1, 0, 1, 3],\n '(': [0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 3], ')': [0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 3],\n '+': [0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 3], ',': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 3],\n '-': [0, 0, 0, 0, 0, 0, 1, 1, 1, 3], '<': [0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 3],\n '=': [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 3], '>': [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 3],\n '?': [1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 3], '[': [1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 3],\n ']': [0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 3], '^': [0, 1, 0, 1, 0, 1, 3],\n '_': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3], ':': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 3],\n ';': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 3], '.': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3],\n '/': [0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 3], '{': [0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 3],\n '|': [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 3], '}': [1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 3],\n '~': [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 3], '$': [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 3],\n '@': [0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 3], '%': [1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 3],\n ' ': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3]\n }"
}
] | import base64
import json
import requests
from enum import IntEnum
from PIL import Image, ImageOps
from ._colors import Palette
from ._font import retrieve_glyph, FONT_GICKO, FONT_PICO_8 | 10,780 |
def draw_filled_rectangle_from_top_left_to_bottom_right_rgb(self,
top_left_x=0,
top_left_y=0,
bottom_right_x=1,
bottom_right_y=1,
r=0, g=0, b=0):
self.draw_filled_rectangle((top_left_x, top_left_y),
(bottom_right_x, bottom_right_y), (r, g, b))
def draw_image(self, image_path_or_object, xy=(0, 0),
image_resample_mode=ImageResampleMode.PIXEL_ART,
pad_resample=False):
image = image_path_or_object if isinstance(image_path_or_object,
Image.Image) else Image.open(
image_path_or_object)
size = image.size
width = size[0]
height = size[1]
# See if it needs to be scaled/resized to fit the display
if width > self.size or height > self.size:
if pad_resample:
image = ImageOps.pad(image, (self.size, self.size),
image_resample_mode)
else:
image.thumbnail((self.size, self.size), image_resample_mode)
if self.debug:
print(
f'[.] Resized image to fit on screen (saving aspect ratio): "{image_path_or_object}" ({width}, {height}) '
f'-> ({image.size[0]}, {image.size[1]})')
# Convert the loaded image to RGB
rgb_image = image.convert('RGB')
# Iterate over all pixels in the image that are left and buffer them
for y in range(image.size[1]):
for x in range(image.size[0]):
location = (x, y)
placed_x = x + xy[0]
if self.size - 1 < placed_x or placed_x < 0:
continue
placed_y = y + xy[1]
if self.size - 1 < placed_y or placed_y < 0:
continue
self.draw_pixel((placed_x, placed_y),
rgb_image.getpixel(location))
def draw_image_at_location(self, image_path_or_object, x, y,
image_resample_mode=ImageResampleMode.PIXEL_ART):
self.draw_image(image_path_or_object, (x, y), image_resample_mode)
def draw_line(self, start_xy, stop_xy, rgb=Palette.WHITE):
line = set()
# Calculate the amount of steps needed between the points to draw a nice line
amount_of_steps = minimum_amount_of_steps(start_xy, stop_xy)
# Iterate over them and create a nice set of pixels
for step in range(amount_of_steps):
if amount_of_steps == 0:
interpolant = 0
else:
interpolant = step / amount_of_steps
# Add a pixel as a rounded location
line.add(
round_location(lerp_location(start_xy, stop_xy, interpolant)))
# Draw the actual pixel line
for pixel in line:
self.draw_pixel(pixel, rgb)
def draw_line_from_start_to_stop_rgb(self, start_x, start_y, stop_x, stop_y,
r=255, g=255, b=255):
self.draw_line((start_x, start_y), (stop_x, stop_y), (r, g, b))
def draw_pixel(self, xy, rgb):
# If it's not on the screen, we're not going to bother
if xy[0] < 0 or xy[0] >= self.size or xy[1] < 0 or xy[1] >= self.size:
if self.debug:
limit = self.size - 1
print(
f'[!] Invalid coordinates given: ({xy[0]}, {xy[1]}) (maximum coordinates are ({limit}, {limit})')
return
# Calculate the index
index = xy[0] + (xy[1] * self.size)
# Color it
self.draw_pixel_at_index(index, rgb)
def draw_pixel_at_index(self, index, rgb):
# Validate the index
if index < 0 or index >= self.pixel_count:
if self.debug:
print(f'[!] Invalid index given: {index} (maximum index is {self.pixel_count - 1})')
return
# Clamp the color, just to be safe
rgb = clamp_color(rgb)
# Move to place in array
index = index * 3
self.__buffer[index] = rgb[0]
self.__buffer[index + 1] = rgb[1]
self.__buffer[index + 2] = rgb[2]
def draw_pixel_at_index_rgb(self, index, r, g, b):
self.draw_pixel_at_index(index, (r, g, b))
def draw_pixel_at_location_rgb(self, x, y, r, g, b):
self.draw_pixel((x, y), (r, g, b))
def draw_character(self, character, xy=(0, 0), rgb=Palette.WHITE, font=None):
if font is None:
|
def clamp(value, minimum=0, maximum=255):
if value > maximum:
return maximum
if value < minimum:
return minimum
return value
def clamp_color(rgb):
return clamp(rgb[0]), clamp(rgb[1]), clamp(rgb[2])
def lerp(start, end, interpolant):
return start + interpolant * (end - start)
def lerp_location(xy1, xy2, interpolant):
return lerp(xy1[0], xy2[0], interpolant), lerp(xy1[1], xy2[1], interpolant)
def minimum_amount_of_steps(xy1, xy2):
return max(abs(xy1[0] - xy2[0]), abs(xy1[1] - xy2[1]))
def rgb_to_hex_color(rgb):
return f'#{rgb[0]:0>2X}{rgb[1]:0>2X}{rgb[2]:0>2X}'
def round_location(xy):
return round(xy[0]), round(xy[1])
class Channel(IntEnum):
FACES = 0
CLOUD = 1
VISUALIZER = 2
CUSTOM = 3
class ImageResampleMode(IntEnum):
PIXEL_ART = Image.NEAREST
class TextScrollDirection(IntEnum):
LEFT = 0
RIGHT = 1
class Pixoo:
__buffer = []
__buffers_send = 0
__counter = 0
__refresh_counter_limit = 32
def __init__(self, address, size=64, debug=False, refresh_connection_automatically=True):
assert size in [16, 32, 64], \
'Invalid screen size in pixels given. ' \
'Valid options are 16, 32, and 64'
self.refresh_connection_automatically = refresh_connection_automatically
self.address = address
self.debug = debug
self.size = size
# Total number of pixels
self.pixel_count = self.size * self.size
# Generate URL
self.__url = 'http://{0}/post'.format(address)
# Prefill the buffer
self.fill()
# Retrieve the counter
self.__load_counter()
# Resetting if needed
if self.refresh_connection_automatically and self.__counter > self.__refresh_counter_limit:
self.__reset_counter()
def clear(self, rgb: object = Palette.BLACK) -> object:
self.fill(rgb)
def clear_rgb(self, r, g, b):
self.fill_rgb(r, g, b)
def draw_character_at_location_rgb(self, character, x=0, y=0, r=255, g=255,
b=255):
self.draw_character(character, (x, y), (r, g, b))
def draw_filled_rectangle(self, top_left_xy=(0, 0), bottom_right_xy=(1, 1),
rgb=Palette.BLACK):
for y in range(top_left_xy[1], bottom_right_xy[1] + 1):
for x in range(top_left_xy[0], bottom_right_xy[0] + 1):
self.draw_pixel((x, y), rgb)
def draw_filled_rectangle_from_top_left_to_bottom_right_rgb(self,
top_left_x=0,
top_left_y=0,
bottom_right_x=1,
bottom_right_y=1,
r=0, g=0, b=0):
self.draw_filled_rectangle((top_left_x, top_left_y),
(bottom_right_x, bottom_right_y), (r, g, b))
def draw_image(self, image_path_or_object, xy=(0, 0),
image_resample_mode=ImageResampleMode.PIXEL_ART,
pad_resample=False):
image = image_path_or_object if isinstance(image_path_or_object,
Image.Image) else Image.open(
image_path_or_object)
size = image.size
width = size[0]
height = size[1]
# See if it needs to be scaled/resized to fit the display
if width > self.size or height > self.size:
if pad_resample:
image = ImageOps.pad(image, (self.size, self.size),
image_resample_mode)
else:
image.thumbnail((self.size, self.size), image_resample_mode)
if self.debug:
print(
f'[.] Resized image to fit on screen (saving aspect ratio): "{image_path_or_object}" ({width}, {height}) '
f'-> ({image.size[0]}, {image.size[1]})')
# Convert the loaded image to RGB
rgb_image = image.convert('RGB')
# Iterate over all pixels in the image that are left and buffer them
for y in range(image.size[1]):
for x in range(image.size[0]):
location = (x, y)
placed_x = x + xy[0]
if self.size - 1 < placed_x or placed_x < 0:
continue
placed_y = y + xy[1]
if self.size - 1 < placed_y or placed_y < 0:
continue
self.draw_pixel((placed_x, placed_y),
rgb_image.getpixel(location))
def draw_image_at_location(self, image_path_or_object, x, y,
image_resample_mode=ImageResampleMode.PIXEL_ART):
self.draw_image(image_path_or_object, (x, y), image_resample_mode)
def draw_line(self, start_xy, stop_xy, rgb=Palette.WHITE):
line = set()
# Calculate the amount of steps needed between the points to draw a nice line
amount_of_steps = minimum_amount_of_steps(start_xy, stop_xy)
# Iterate over them and create a nice set of pixels
for step in range(amount_of_steps):
if amount_of_steps == 0:
interpolant = 0
else:
interpolant = step / amount_of_steps
# Add a pixel as a rounded location
line.add(
round_location(lerp_location(start_xy, stop_xy, interpolant)))
# Draw the actual pixel line
for pixel in line:
self.draw_pixel(pixel, rgb)
def draw_line_from_start_to_stop_rgb(self, start_x, start_y, stop_x, stop_y,
r=255, g=255, b=255):
self.draw_line((start_x, start_y), (stop_x, stop_y), (r, g, b))
def draw_pixel(self, xy, rgb):
# If it's not on the screen, we're not going to bother
if xy[0] < 0 or xy[0] >= self.size or xy[1] < 0 or xy[1] >= self.size:
if self.debug:
limit = self.size - 1
print(
f'[!] Invalid coordinates given: ({xy[0]}, {xy[1]}) (maximum coordinates are ({limit}, {limit})')
return
# Calculate the index
index = xy[0] + (xy[1] * self.size)
# Color it
self.draw_pixel_at_index(index, rgb)
def draw_pixel_at_index(self, index, rgb):
# Validate the index
if index < 0 or index >= self.pixel_count:
if self.debug:
print(f'[!] Invalid index given: {index} (maximum index is {self.pixel_count - 1})')
return
# Clamp the color, just to be safe
rgb = clamp_color(rgb)
# Move to place in array
index = index * 3
self.__buffer[index] = rgb[0]
self.__buffer[index + 1] = rgb[1]
self.__buffer[index + 2] = rgb[2]
def draw_pixel_at_index_rgb(self, index, r, g, b):
self.draw_pixel_at_index(index, (r, g, b))
def draw_pixel_at_location_rgb(self, x, y, r, g, b):
self.draw_pixel((x, y), (r, g, b))
def draw_character(self, character, xy=(0, 0), rgb=Palette.WHITE, font=None):
if font is None: | font = FONT_PICO_8 | 3 | 2023-11-05 19:16:34+00:00 | 12k |
jkulhanek/nerfbaselines | nerfbaselines/datasets/colmap.py | [
{
"identifier": "Dataset",
"path": "nerfbaselines/types.py",
"snippet": "NB_PREFIX = os.path.expanduser(os.environ.get(\"NB_PREFIX\", \"~/.cache/nerfbaselines\"))\nclass Dataset:\nclass CurrentProgress:\n class RenderOutput(TypedDict):\nclass MethodInfo:\nclass Method(Protocol):\nclass RayMethod(Method):\n def __post_init__(self):\n def __len__(self):\n def __getitem__(self, i) -> \"Dataset\":\n def index(obj):\n def load_features(self, required_features, supported_camera_models=None):\n def expected_scene_scale(self):\ndef batched(array, batch_size):\n def install(cls):\n def get_info(self) -> MethodInfo:\n def render(self, cameras: Cameras, progress_callback: Optional[ProgressCallback] = None) -> Iterable[RenderOutput]: # [h w c]\n def setup_train(self, train_dataset: Dataset, *, num_iterations: int):\n def train_iteration(self, step: int):\n def save(self, path: Path):\n def __init__(self, batch_size, seed: int = 42, xnp=np):\n def render_rays(self, origins: np.ndarray, directions: np.ndarray, nears_fars: Optional[np.ndarray]) -> RenderOutput: # batch 3 # batch 3 # batch 3\n def train_iteration_rays(self, step: int, origins: np.ndarray, directions: np.ndarray, nears_fars: Optional[np.ndarray], colors: np.ndarray): # [batch 3] # [batch 3] # [batch 2] # [batch c]\n def setup_train(self, train_dataset: Dataset, *, num_iterations: int):\n def train_iteration(self, step: int):\n def render(self, cameras: Cameras, progress_callback: Optional[ProgressCallback] = None) -> Iterable[RenderOutput]:"
},
{
"identifier": "Indices",
"path": "nerfbaselines/utils.py",
"snippet": "class Indices:\n def __init__(self, steps):\n self._steps = steps\n self.total: Optional[int] = None\n\n def __contains__(self, x):\n if isinstance(self._steps, list):\n steps = self._steps\n if any(x < 0 for x in self._steps):\n assert self.total is not None, \"total must be specified for negative steps\"\n steps = set(x if x >= 0 else self.total + x for x in self._steps)\n return x in steps\n elif isinstance(self._steps, slice):\n start: int = self._steps.start or 0\n if start < 0:\n assert self.total is not None, \"total must be specified for negative start\"\n start = self.total - start\n stop: Optional[int] = self._steps.stop or self.total\n if stop is not None and stop < 0:\n assert self.total is not None, \"total must be specified for negative stop\"\n stop = self.total - stop\n step: int = self._steps.step or 1\n return x >= start and (stop is None or x < stop) and (x - start) % step == 0\n\n @classmethod\n def every_iters(cls, iters: int, zero: bool = False):\n start = iters if zero else 0\n return cls(slice(start, None, iters))\n\n def __repr__(self):\n if isinstance(self._steps, list):\n return \",\".join(map(str, self._steps))\n elif isinstance(self._steps, slice):\n out = f\"{self._steps.start or ''}:{self._steps.stop or ''}\"\n if self._steps.step is not None:\n out += f\":{self._steps.step}\"\n return out\n else:\n return repr(self._steps)\n\n def __str__(self):\n return repr(self)"
},
{
"identifier": "CameraModel",
"path": "nerfbaselines/cameras.py",
"snippet": "class CameraModel(Enum):\n PINHOLE = 0\n OPENCV = 1\n OPENCV_FISHEYE = 2\n FULL_OPENCV = 3"
},
{
"identifier": "Cameras",
"path": "nerfbaselines/cameras.py",
"snippet": "class Cameras:\n poses: np.ndarray # [N, (R, t)]\n normalized_intrinsics: np.ndarray # [N, (fx,fy,cx,cy)]\n\n # Distortions\n camera_types: np.ndarray # [N]\n distortion_parameters: np.ndarray # [N, num_params]\n\n image_sizes: Optional[np.ndarray] # [N, 2]\n nears_fars: Optional[np.ndarray] # [N, 2]\n\n @cached_property\n def intrinsics(self):\n assert self.image_sizes is not None\n assert self.normalized_intrinsics.shape[:-1] == self.image_sizes.shape[:-1], \"normalized_intrinsics and image_sizes must be broadcastable\"\n return self.normalized_intrinsics * self.image_sizes[..., :1]\n\n def __len__(self):\n if len(self.poses.shape) == 2:\n return 1\n return len(self.poses)\n\n def item(self):\n assert len(self) == 1, \"Cameras must have exactly one element to be converted to a single camera\"\n if len(self.poses.shape) == 2:\n return self\n return self[0]\n\n def __getitem__(self, index):\n return type(self)(\n poses=self.poses[index],\n normalized_intrinsics=self.normalized_intrinsics[index],\n camera_types=self.camera_types[index],\n distortion_parameters=self.distortion_parameters[index],\n image_sizes=self.image_sizes[index] if self.image_sizes is not None else None,\n nears_fars=self.nears_fars[index] if self.nears_fars is not None else None,\n )\n\n def __setitem__(self, index, value):\n assert (self.image_sizes is None) == (value.image_sizes is None), \"Either both or none of the cameras must have image sizes\"\n assert (self.nears_fars is None) == (value.nears_fars is None), \"Either both or none of the cameras must have nears and fars\"\n self.poses[index] = value.poses\n self.normalized_intrinsics[index] = value.normalized_intrinsics\n self.camera_types[index] = value.camera_types\n self.distortion_parameters[index] = value.distortion_parameters\n if self.image_sizes is not None:\n self.image_sizes[index] = value.image_sizes\n if self.nears_fars is not None:\n self.nears_fars[index] = value.nears_fars\n\n def __iter__(self):\n for i in range(len(self)):\n yield self[i]\n\n def get_rays(self, xy: np.ndarray, xnp=np) -> Tuple[np.ndarray, np.ndarray]:\n assert xy.shape[-1] == 2\n assert xy.shape[0] == len(self)\n assert xy.dtype.kind in {\"i\", \"u\"}, \"xy must be integer\"\n\n xy = xy.astype(xnp.float32) + 0.5\n return self.unproject(xy, xnp=xnp)\n\n def unproject(self, xy: np.ndarray, xnp=np) -> Tuple[np.ndarray, np.ndarray]:\n assert xy.shape[-1] == 2\n assert is_broadcastable(xy.shape[:-1], self.poses.shape[:-2]), \"xy must be broadcastable with poses, shapes: {}, {}\".format(xy.shape[:-1], self.poses.shape[:-2])\n assert xy.dtype.kind == \"f\"\n fx, fy, cx, cy = xnp.moveaxis(self.intrinsics, -1, 0)\n x = xy[..., 0]\n y = xy[..., 1]\n u = (x - cx) / fx\n v = (y - cy) / fy\n\n uv = xnp.stack((u, v), -1)\n uv = _undistort(self.camera_types, self.distortion_parameters, uv, xnp=xnp)\n directions = xnp.concatenate((uv, xnp.ones_like(uv[..., :1])), -1)\n\n # Switch from OpenCV to OpenGL coordinate system\n directions[..., 1:] *= -1\n\n rotation = self.poses[..., :3, :3] # (..., 3, 3)\n directions = (directions[..., None, :] * rotation).sum(-1)\n origins = xnp.broadcast_to(self.poses[..., :3, 3], directions.shape)\n return origins, directions\n\n def project(self, xyz: np.ndarray, xnp=np) -> np.ndarray:\n eps = xnp.finfo(xyz.dtype).eps\n assert xyz.shape[-1] == 3\n assert is_broadcastable(xyz.shape[:-1], self.poses.shape[:-2]), \"xyz must be broadcastable with poses, shapes: {}, {}\".format(xyz.shape[:-1], self.poses.shape[:-2])\n\n # World -> Camera\n origins = self.poses[..., :3, 3]\n rotation = self.poses[..., :3, :3]\n # Rotation and translation\n uvw = xyz - origins\n uvw = (rotation * uvw[..., :, None]).sum(-2)\n # Switch from OpenGL to OpenCV coordinate system\n uvw[..., 1:] *= -1\n\n # Camera -> Camera distorted\n uv = xnp.divide(uvw[..., :2], uvw[..., 2:], out=xnp.zeros_like(uvw[..., :2]), where=xnp.abs(uvw[..., 2:]) > eps)\n\n uv = _distort(self.camera_types, self.distortion_parameters, uv, xnp=xnp)\n x, y = xnp.moveaxis(uv, -1, 0)\n\n # Transform to image coordinates\n # Camera distorted -> Image\n fx, fy, cx, cy = xnp.moveaxis(self.intrinsics, -1, 0)\n x = fx * x + cx\n y = fy * y + cy\n return xnp.stack((x, y), -1)\n\n @classmethod\n def cat(cls, values: Sequence[\"Cameras\"]) -> \"Cameras\":\n return cls(\n poses=np.concatenate([v.poses for v in values]),\n normalized_intrinsics=np.concatenate([v.normalized_intrinsics for v in values]),\n camera_types=np.concatenate([v.camera_types for v in values]),\n distortion_parameters=np.concatenate([v.distortion_parameters for v in values]),\n image_sizes=np.concatenate([v.image_sizes for v in values]) if any(v.image_sizes is not None for v in values) else None,\n nears_fars=np.concatenate([v.nears_fars for v in values]) if any(v.nears_fars is not None for v in values) else None,\n )\n\n def with_image_sizes(self, image_sizes: np.ndarray) -> \"Cameras\":\n return dataclasses.replace(self, image_sizes=image_sizes)"
},
{
"identifier": "read_cameras_binary",
"path": "nerfbaselines/datasets/_colmap_utils.py",
"snippet": "def read_cameras_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8 * num_params, format_char_sequence=\"d\" * num_params)\n cameras[camera_id] = Camera(\n id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params),\n )\n assert len(cameras) == num_cameras\n return cameras"
},
{
"identifier": "read_images_binary",
"path": "nerfbaselines/datasets/_colmap_utils.py",
"snippet": "def read_images_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(\n fid,\n num_bytes=24 * num_points2D,\n format_char_sequence=\"ddq\" * num_points2D,\n )\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id,\n qvec=qvec,\n tvec=tvec,\n camera_id=camera_id,\n name=image_name,\n xys=xys,\n point3D_ids=point3D_ids,\n )\n return images"
},
{
"identifier": "read_points3D_binary",
"path": "nerfbaselines/datasets/_colmap_utils.py",
"snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n points3D = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_points):\n binary_point_line_properties = read_next_bytes(fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n point3D_id = binary_point_line_properties[0]\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid,\n num_bytes=8 * track_length,\n format_char_sequence=\"ii\" * track_length,\n )\n image_ids = np.array(tuple(map(int, track_elems[0::2])))\n point2D_idxs = np.array(tuple(map(int, track_elems[1::2])))\n points3D[point3D_id] = Point3D(\n id=point3D_id,\n xyz=xyz,\n rgb=rgb,\n error=error,\n image_ids=image_ids,\n point2D_idxs=point2D_idxs,\n )\n return points3D"
},
{
"identifier": "qvec2rotmat",
"path": "nerfbaselines/datasets/_colmap_utils.py",
"snippet": "def qvec2rotmat(self):\n return qvec2rotmat(self.qvec)"
},
{
"identifier": "read_cameras_text",
"path": "nerfbaselines/datasets/_colmap_utils.py",
"snippet": "class Image(BaseImage):\n def qvec2rotmat(self):\ndef read_next_bytes(fid, num_bytes, format_char_sequence, endian_character=\"<\"):\ndef write_next_bytes(fid, data, format_char_sequence, endian_character=\"<\"):\ndef read_cameras_text(path):\ndef read_cameras_binary(path_to_model_file):\ndef write_cameras_text(cameras, path):\ndef write_cameras_binary(cameras, path_to_model_file):\ndef read_images_text(path):\ndef read_images_binary(path_to_model_file):\ndef write_images_text(images, path):\ndef write_images_binary(images, path_to_model_file):\ndef read_points3D_text(path):\ndef read_points3D_binary(path_to_model_file):\ndef write_points3D_text(points3D, path):\ndef write_points3D_binary(points3D, path_to_model_file):\ndef detect_model_format(path, ext):\ndef read_model(path, ext=\"\"):\ndef write_model(cameras, images, points3D, path, ext=\".bin\"):\ndef qvec2rotmat(qvec):\ndef rotmat2qvec(R):\nCAMERA_MODELS = {\n CameraModel(model_id=0, model_name=\"SIMPLE_PINHOLE\", num_params=3),\n CameraModel(model_id=1, model_name=\"PINHOLE\", num_params=4),\n CameraModel(model_id=2, model_name=\"SIMPLE_RADIAL\", num_params=4),\n CameraModel(model_id=3, model_name=\"RADIAL\", num_params=5),\n CameraModel(model_id=4, model_name=\"OPENCV\", num_params=8),\n CameraModel(model_id=5, model_name=\"OPENCV_FISHEYE\", num_params=8),\n CameraModel(model_id=6, model_name=\"FULL_OPENCV\", num_params=12),\n CameraModel(model_id=7, model_name=\"FOV\", num_params=5),\n CameraModel(model_id=8, model_name=\"SIMPLE_RADIAL_FISHEYE\", num_params=4),\n CameraModel(model_id=9, model_name=\"RADIAL_FISHEYE\", num_params=5),\n CameraModel(model_id=10, model_name=\"THIN_PRISM_FISHEYE\", num_params=12),\n}\nCAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS])\nCAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model) for camera_model in CAMERA_MODELS])\n HEADER = \"# Camera list with one line of data per camera:\\n\" + \"# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\\n\" + \"# Number of cameras: {}\\n\".format(len(cameras))\n HEADER = (\n \"# Image list with two lines of data per image:\\n\"\n + \"# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\\n\"\n + \"# POINTS2D[] as (X, Y, POINT3D_ID)\\n\"\n + \"# Number of images: {}, mean observations per image: {}\\n\".format(len(images), mean_observations)\n )\n HEADER = (\n \"# 3D point list with one line of data per point:\\n\"\n + \"# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\\n\"\n + \"# Number of points: {}, mean track length: {}\\n\".format(len(points3D), mean_track_length)\n )\n K = (\n np.array(\n [\n [Rxx - Ryy - Rzz, 0, 0, 0],\n [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],\n [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],\n [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz],\n ]\n )\n / 3.0\n )"
},
{
"identifier": "DatasetNotFoundError",
"path": "nerfbaselines/datasets/_common.py",
"snippet": "def single(xs):\ndef _dataset_undistort_unsupported(dataset: Dataset, supported_camera_models):\ndef dataset_load_features(dataset: Dataset, required_features, supported_camera_models=None):\n def __init__(self, errors, message):\n def write_to_logger(self, color=True, terminal_width=None):\nclass DatasetNotFoundError(Exception):\nclass MultiDatasetError(DatasetNotFoundError):"
}
] | import typing
import logging
import numpy as np
from collections import OrderedDict
from pathlib import Path
from typing import Tuple, Optional, Dict
from ..types import Dataset, DatasetFeature, FrozenSet
from ..utils import Indices
from ..cameras import CameraModel, Cameras
from ._colmap_utils import read_cameras_binary, read_images_binary, read_points3D_binary, qvec2rotmat
from ._colmap_utils import read_cameras_text, read_images_text, read_points3D_text, Image, Camera, Point3D
from ._common import DatasetNotFoundError, padded_stack | 7,486 | elif camera.model == "FULL_OPENCV":
# fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6
# u2 = u ** 2
# uv = u * v
# v2 = v ** 2
# r2 = u2 + v2
# r4 = r2 * r2
# r6 = r4 * r2
# radial = (1 + k1 * r2 + k2 * r4 + k3 * r6) /
# (1 + k4 * r2 + k5 * r4 + k6 * r6)
# du = u * radial + 2 * p1 * uv + p2 * (r2 + 2 * u2) - u
# dv = v * radial + 2 * p2 * uv + p1 * (r2 + 2 * v2) - v
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
out["k1"] = float(camera_params[4])
out["k2"] = float(camera_params[5])
out["p1"] = float(camera_params[6])
out["p2"] = float(camera_params[7])
out["k3"] = float(camera_params[8])
out["k4"] = float(camera_params[9])
out["k5"] = float(camera_params[10])
out["k6"] = float(camera_params[11])
raise NotImplementedError(f"{camera.model} camera model is not supported yet!")
elif camera.model == "FOV":
# fx, fy, cx, cy, omega
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
out["omega"] = float(camera_params[4])
raise NotImplementedError(f"{camera.model} camera model is not supported yet!")
elif camera.model == "SIMPLE_RADIAL_FISHEYE":
# f, cx, cy, k
# r = sqrt(u ** 2 + v ** 2)
# if r > eps:
# theta = atan(r)
# theta2 = theta ** 2
# thetad = theta * (1 + k * theta2)
# du = u * thetad / r - u;
# dv = v * thetad / r - v;
# else:
# du = dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
out["k1"] = float(camera_params[3])
camera_model = CameraModel.OPENCV_FISHEYE
elif camera.model == "RADIAL_FISHEYE":
# f, cx, cy, k1, k2
# r = sqrt(u ** 2 + v ** 2)
# if r > eps:
# theta = atan(r)
# theta2 = theta ** 2
# theta4 = theta2 ** 2
# thetad = theta * (1 + k * theta2)
# thetad = theta * (1 + k1 * theta2 + k2 * theta4)
# du = u * thetad / r - u;
# dv = v * thetad / r - v;
# else:
# du = dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
out["k1"] = float(camera_params[3])
out["k2"] = float(camera_params[4])
out["k3"] = 0.0
out["k4"] = 0.0
camera_model = CameraModel.OPENCV_FISHEYE
else:
# THIN_PRISM_FISHEYE not supported!
raise NotImplementedError(f"{camera.model} camera model is not supported yet!")
image_width: int = camera.width
image_height: int = camera.height
intrinsics = np.array([fl_x, fl_y, cx, cy], dtype=np.float32) / float(image_width)
distortion_params = np.array([out.get(k, 0.0) for k in ("k1", "k2", "p1", "p2", "k3", "k4")], dtype=np.float32)
return intrinsics, camera_model.value, distortion_params, (image_width, image_height)
def load_colmap_dataset(path: Path, images_path: Optional[Path] = None, split: Optional[str] = None, test_indices: Optional[Indices] = None, features: Optional[FrozenSet[DatasetFeature]] = None):
if features is None:
features = typing.cast(FrozenSet[DatasetFeature], {})
load_points = "points3D_xyz" in features or "points3D_rgb" in features
if split:
assert split in {"train", "test"}
# Load COLMAP dataset
colmap_path = path / "sparse" / "0"
if images_path is None:
images_path = Path("images")
images_path = path / images_path
if not colmap_path.exists():
raise DatasetNotFoundError("Missing 'sparse/0' folder in COLMAP dataset")
if not (colmap_path / "cameras.bin").exists() and not (colmap_path / "cameras.txt").exists():
raise DatasetNotFoundError("Missing 'sparse/0/cameras.{bin,txt}' file in COLMAP dataset")
if not images_path.exists():
raise DatasetNotFoundError("Missing 'images' folder in COLMAP dataset")
if (colmap_path / "cameras.bin").exists():
cameras = read_cameras_binary(colmap_path / "cameras.bin")
elif (colmap_path / "cameras.txt").exists():
cameras = read_cameras_text(colmap_path / "cameras.txt")
else:
raise DatasetNotFoundError("Missing 'sparse/0/cameras.{bin,txt}' file in COLMAP dataset")
if not (colmap_path / "images.bin").exists() and not (colmap_path / "images.txt").exists():
raise DatasetNotFoundError("Missing 'sparse/0/images.{bin,txt}' file in COLMAP dataset")
if (colmap_path / "images.bin").exists():
images = read_images_binary(colmap_path / "images.bin")
elif (colmap_path / "images.txt").exists():
images = read_images_text(colmap_path / "images.txt")
else:
raise DatasetNotFoundError("Missing 'sparse/0/images.{bin,txt}' file in COLMAP dataset")
|
def _parse_colmap_camera_params(camera: Camera) -> Tuple[np.ndarray, int, np.ndarray, Tuple[int, int]]:
"""
Parses all currently supported COLMAP cameras into the transforms.json metadata
Args:
camera: COLMAP camera
Returns:
transforms.json metadata containing camera's intrinsics and distortion parameters
"""
# Parameters match https://github.com/colmap/colmap/blob/dev/src/base/camera_models.h
out = OrderedDict() # Default in Python 3.7+
camera_params = camera.params
camera_model: CameraModel
if camera.model == "SIMPLE_PINHOLE":
# du = 0
# dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
camera_model = CameraModel.PINHOLE
elif camera.model == "PINHOLE":
# f, cx, cy, k
# du = 0
# dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
camera_model = CameraModel.PINHOLE
elif camera.model == "SIMPLE_RADIAL":
# f, cx, cy, k
# r2 = u**2 + v**2;
# radial = k * r2
# du = u * radial
# dv = u * radial
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
out["k1"] = float(camera_params[3])
camera_model = CameraModel.OPENCV
elif camera.model == "RADIAL":
# f, cx, cy, k1, k2
# r2 = u**2 + v**2;
# radial = k1 * r2 + k2 * r2 ** 2
# du = u * radial
# dv = v * radial
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
out["k1"] = float(camera_params[3])
out["k2"] = float(camera_params[4])
camera_model = CameraModel.OPENCV
elif camera.model == "OPENCV":
# fx, fy, cx, cy, k1, k2, p1, p2
# uv = u * v;
# r2 = u**2 + v**2
# radial = k1 * r2 + k2 * r2 ** 2
# du = u * radial + 2 * p1 * u*v + p2 * (r2 + 2 * u**2)
# dv = v * radial + 2 * p2 * u*v + p1 * (r2 + 2 * v**2)
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
out["k1"] = float(camera_params[4])
out["k2"] = float(camera_params[5])
out["p1"] = float(camera_params[6])
out["p2"] = float(camera_params[7])
camera_model = CameraModel.OPENCV
elif camera.model == "OPENCV_FISHEYE":
# fx, fy, cx, cy, k1, k2, k3, k4
# r = sqrt(u**2 + v**2)
# if r > eps:
# theta = atan(r)
# theta2 = theta ** 2
# theta4 = theta2 ** 2
# theta6 = theta4 * theta2
# theta8 = theta4 ** 2
# thetad = theta * (1 + k1 * theta2 + k2 * theta4 + k3 * theta6 + k4 * theta8)
# du = u * thetad / r - u;
# dv = v * thetad / r - v;
# else:
# du = dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
out["k1"] = float(camera_params[4])
out["k2"] = float(camera_params[5])
out["k3"] = float(camera_params[6])
out["k4"] = float(camera_params[7])
camera_model = CameraModel.OPENCV_FISHEYE
elif camera.model == "FULL_OPENCV":
# fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6
# u2 = u ** 2
# uv = u * v
# v2 = v ** 2
# r2 = u2 + v2
# r4 = r2 * r2
# r6 = r4 * r2
# radial = (1 + k1 * r2 + k2 * r4 + k3 * r6) /
# (1 + k4 * r2 + k5 * r4 + k6 * r6)
# du = u * radial + 2 * p1 * uv + p2 * (r2 + 2 * u2) - u
# dv = v * radial + 2 * p2 * uv + p1 * (r2 + 2 * v2) - v
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
out["k1"] = float(camera_params[4])
out["k2"] = float(camera_params[5])
out["p1"] = float(camera_params[6])
out["p2"] = float(camera_params[7])
out["k3"] = float(camera_params[8])
out["k4"] = float(camera_params[9])
out["k5"] = float(camera_params[10])
out["k6"] = float(camera_params[11])
raise NotImplementedError(f"{camera.model} camera model is not supported yet!")
elif camera.model == "FOV":
# fx, fy, cx, cy, omega
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
out["omega"] = float(camera_params[4])
raise NotImplementedError(f"{camera.model} camera model is not supported yet!")
elif camera.model == "SIMPLE_RADIAL_FISHEYE":
# f, cx, cy, k
# r = sqrt(u ** 2 + v ** 2)
# if r > eps:
# theta = atan(r)
# theta2 = theta ** 2
# thetad = theta * (1 + k * theta2)
# du = u * thetad / r - u;
# dv = v * thetad / r - v;
# else:
# du = dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
out["k1"] = float(camera_params[3])
camera_model = CameraModel.OPENCV_FISHEYE
elif camera.model == "RADIAL_FISHEYE":
# f, cx, cy, k1, k2
# r = sqrt(u ** 2 + v ** 2)
# if r > eps:
# theta = atan(r)
# theta2 = theta ** 2
# theta4 = theta2 ** 2
# thetad = theta * (1 + k * theta2)
# thetad = theta * (1 + k1 * theta2 + k2 * theta4)
# du = u * thetad / r - u;
# dv = v * thetad / r - v;
# else:
# du = dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
out["k1"] = float(camera_params[3])
out["k2"] = float(camera_params[4])
out["k3"] = 0.0
out["k4"] = 0.0
camera_model = CameraModel.OPENCV_FISHEYE
else:
# THIN_PRISM_FISHEYE not supported!
raise NotImplementedError(f"{camera.model} camera model is not supported yet!")
image_width: int = camera.width
image_height: int = camera.height
intrinsics = np.array([fl_x, fl_y, cx, cy], dtype=np.float32) / float(image_width)
distortion_params = np.array([out.get(k, 0.0) for k in ("k1", "k2", "p1", "p2", "k3", "k4")], dtype=np.float32)
return intrinsics, camera_model.value, distortion_params, (image_width, image_height)
def load_colmap_dataset(path: Path, images_path: Optional[Path] = None, split: Optional[str] = None, test_indices: Optional[Indices] = None, features: Optional[FrozenSet[DatasetFeature]] = None):
if features is None:
features = typing.cast(FrozenSet[DatasetFeature], {})
load_points = "points3D_xyz" in features or "points3D_rgb" in features
if split:
assert split in {"train", "test"}
# Load COLMAP dataset
colmap_path = path / "sparse" / "0"
if images_path is None:
images_path = Path("images")
images_path = path / images_path
if not colmap_path.exists():
raise DatasetNotFoundError("Missing 'sparse/0' folder in COLMAP dataset")
if not (colmap_path / "cameras.bin").exists() and not (colmap_path / "cameras.txt").exists():
raise DatasetNotFoundError("Missing 'sparse/0/cameras.{bin,txt}' file in COLMAP dataset")
if not images_path.exists():
raise DatasetNotFoundError("Missing 'images' folder in COLMAP dataset")
if (colmap_path / "cameras.bin").exists():
cameras = read_cameras_binary(colmap_path / "cameras.bin")
elif (colmap_path / "cameras.txt").exists():
cameras = read_cameras_text(colmap_path / "cameras.txt")
else:
raise DatasetNotFoundError("Missing 'sparse/0/cameras.{bin,txt}' file in COLMAP dataset")
if not (colmap_path / "images.bin").exists() and not (colmap_path / "images.txt").exists():
raise DatasetNotFoundError("Missing 'sparse/0/images.{bin,txt}' file in COLMAP dataset")
if (colmap_path / "images.bin").exists():
images = read_images_binary(colmap_path / "images.bin")
elif (colmap_path / "images.txt").exists():
images = read_images_text(colmap_path / "images.txt")
else:
raise DatasetNotFoundError("Missing 'sparse/0/images.{bin,txt}' file in COLMAP dataset")
| points3D: Optional[Dict[str, Point3D]] = None | 8 | 2023-11-07 20:22:35+00:00 | 12k |
microsoft/Everything-of-Thoughts-XoT | xot_all_in_one/xot/controller/controller.py | [
{
"identifier": "IO_Solver",
"path": "xot_all_in_one/xot/controller/solver/io_solver.py",
"snippet": "class IO_Solver:\n def __init__(self, args, gpt, game, prompter, parser, to_print=False):\n self.args = args\n self.gpt = gpt\n self.game = game\n self.prompter = prompter\n self.parser = parser\n self.to_print = to_print\n\n # comment: this is the main function of the io solver\n def solve(self, idx):\n \"\"\"_summary_\n Parameters:\n idx: index of the test board\n \n Return:\n ys: a list of solutions\n info: a dictionary of information\n \"\"\"\n x = self.game.getOneTestBoard(idx)\n y = ''\n if not self.args.multi_solution:\n prompt = self.prompter.standard_prompt_wrap(x, y)\n else:\n prompt = self.prompter.standard_prompt_wrap_multi(x, y)\n\n instruct = self.prompter.get_instruction_prompt()\n samples = self.gpt(prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n\n ys= [y + _ for _ in samples]\n if self.to_print:\n print('io_solve -- input: ', x)\n print('io_solve -- prompt: ', prompt)\n print('io_solve -- output: ', samples)\n \n info = {}\n\n return ys, info"
},
{
"identifier": "CoT_Solver",
"path": "xot_all_in_one/xot/controller/solver/cot_solver.py",
"snippet": "class CoT_Solver:\n def __init__(self, args, gpt, game, prompter, parser, to_print=False):\n self.args = args\n self.gpt = gpt\n self.game = game\n self.prompter = prompter\n self.parser = parser\n self.to_print = to_print\n\n\n def solve(self, idx):\n '''_summary_\n Parameters:\n idx: index of the test board\n \n Return:\n ys: a list of solutions\n info: a dictionary of information\n '''\n x = self.game.getOneTestBoard(idx)\n y = ''\n\n if not self.args.multi_solution:\n prompt = self.prompter.cot_prompt_wrap(x, y)\n else:\n prompt = self.prompter.cot_prompt_wrap_multi(x, y)\n instruct = self.prompter.get_instruction_prompt()\n samples = self.gpt(prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n\n ys= [y + _ for _ in samples]\n if self.to_print:\n print('cot_solve -- input: ', x)\n print('cot_solve -- prompt: ', prompt)\n print('cot_solve -- output: ', samples)\n\n info = {}\n \n return ys, info"
},
{
"identifier": "ToT_Solver",
"path": "xot_all_in_one/xot/controller/solver/tot_solver.py",
"snippet": "class ToT_Solver:\n def __init__(self, args, gpt, game, prompter, parser, to_print=False):\n \"\"\"\n Initialize the ToT_Solver with the necessary components.\n\n Args:\n args: The arguments for the solver.\n gpt: The GPT model to be used in the solver.\n game: The game or simulation to be solved.\n prompter: The prompter for generating prompts for the GPT model.\n parser: The parser for parsing the output of the GPT model.\n to_print (bool, optional): A flag indicating whether to print debug information. Defaults to False.\n \"\"\"\n self.args = args\n self.gpt = gpt\n self.game = game\n self.prompter = prompter\n self.parser = parser\n self.to_print = to_print\n \n def get_max_index(self, lst):\n \"\"\"\n This function returns the index of the maximum value in a list.\n\n Args:\n lst (list): The list in which the maximum value is to be found.\n \n Returns:\n max_indices: The index of the maximum value in the list.\n \"\"\"\n max_value = max(lst)\n max_indices = [index for index, value in enumerate(lst) if value == max_value]\n return random.choice(max_indices)\n\n def get_proposals(self, args, gpt, prompter, x, y, isFinished): \n \"\"\"\n This function generates proposals for a partial output 'y' using the 'get_proposals' method.\n\n Args:\n args (gpt): The arguments to be passed to the 'get_proposals' method.\n gpt: The GPT model to be used in the 'get_proposals' method.\n prompter: The prompter to be used in the 'get_proposals' method.\n x: The input to be passed to the 'get_proposals' method.\n y: The partial output for which proposals are to be generated.\n isFinished (bool): A flag indicating whether the partial output 'y' is a complete output.\n \n Returns:\n proposals: A list of generated proposals.\n current_state: The current state of the game after the partial output 'y' is applied.\n \"\"\"\n instruct = prompter.get_instruction_prompt()\n propose_prompt, current_state = prompter.propose_prompt_wrap(x, y, isFinished)\n \n proposals = gpt(propose_prompt, instruct, n=args.param.n_generate_sample, stop=args.gpt.stop)[0]\n if self.to_print:\n print('propose_prompt', propose_prompt)\n print('proposals', proposals)\n\n if isFinished:\n return [proposals + '\\n'], current_state\n else:\n if args.env == 'game24':\n proposals_precheck = proposals.strip().split('\\n')\n proposals_aftercheck = []\n for idx, p in enumerate(proposals_precheck):\n try:\n exp = p.lower().split(\"=\")[0].strip()\n terms = exp.split(' ')\n num1, _, num2 = terms[0], terms[1], terms[2]\n if num1 in current_state and num2 in current_state:\n res = eval(exp)\n proposals_aftercheck.append(p)\n except:\n continue\n else:\n proposals_aftercheck = proposals.replace('\\n', '').strip().split(', ')\n \n return [y + _ + '\\n' for _ in proposals_aftercheck], current_state\n \n\n def get_value_tot(self, args, gpt, prompter, parser, x, y, cache_value=True):\n \"\"\"\n This function calculates the total value for a partial output 'y' using the 'get_value' method.\n It uses a local cache to avoid duplicate calculations.\n\n Args:\n args (gpt): The arguments to be passed to the 'get_value' method.\n gpt: The GPT model to be used in the 'get_value' method.\n prompter: The prompter to be used in the 'get_value' method.\n parser: The parser to be used in the 'get_value' method.\n x: The input to be passed to the 'get_value' method.\n y: The partial output for which the total value is to be calculated.\n cache_value (bool, optional): A flag indicating whether to cache the calculated values. Defaults to True.\n \n Returns:\n value: The calculated value for the partial output 'y'.\n \"\"\"\n instruct = prompter.get_instruction_prompt()\n value_prompt = prompter.value_prompt_wrap(x, y)\n \n if cache_value and value_prompt in prompter.value_cache:\n return prompter.value_cache[value_prompt]\n value_outputs = gpt(value_prompt, instruct, n=args.param.n_generate_sample, stop=args.gpt.stop)\n value = parser.value_outputs_unwrap(x, y, value_outputs)\n \n if self.to_print:\n print('value_prompt', value_prompt)\n print('value_outputs', value_outputs)\n \n if cache_value:\n prompter.value_cache[value_prompt] = value\n return value\n\n\n def get_values_tot(self, args, gpt, prompter, parser, x, ys, cache_value=True):\n \"\"\"\n This function calculates the total value for each partial output in 'ys' using the 'get_value_tot' method.\n It uses a local cache to avoid duplicate calculations.\n\n Args:\n args (gpt): The arguments to be passed to the 'get_value_tot' method.\n gpt: The GPT model to be used in the 'get_value_tot' method.\n prompter: The prompter to be used in the 'get_value_tot' method.\n parser: The parser to be used in the 'get_value_tot' method.\n x: The input to be passed to the 'get_value_tot' method.\n ys (list): A list of partial outputs for which the total value is to be calculated.\n cache_value (bool, optional): A flag indicating whether to cache the calculated values. Defaults to True.\n\n Returns:\n values: A list of calculated values for each partial output in 'ys'.\n \"\"\"\n values = []\n local_value_cache = {}\n for y in ys: # each partial output\n if y in local_value_cache: # avoid duplicate calculations\n value = 0\n else: \n value = self.get_value_tot(args, gpt, prompter, parser, x, y, cache_value=cache_value)\n local_value_cache[y] = value\n values.append(value)\n return values\n \n\n def solve(self, idx):\n \"\"\"_summary_\n Parameters:\n idx: index of the test board\n\n Return:\n ys: a list of solutions\n info: a dictionary of information\n \"\"\"\n x = self.game.getOneTestBoard(idx)\n\n\n if self.args.multi_solution and self.args.env == 'cube':\n total_game_step = self.args.task.total_game_step + 3\n elif self.args.multi_solution and self.args.env == 'npuzzle':\n total_game_step = self.args.task.total_game_step + 3\n elif not self.args.param.last_step and self.args.env == 'game24':\n total_game_step = self.args.task.total_game_step - 1\n else: \n total_game_step = self.args.task.total_game_step\n\n\n ys = [''] # current output candidates\n infos = []\n isFinished = False\n for step in range(total_game_step+1):\n if self.to_print:\n print('Current Step: %s'%(step+1))\n # generation\n new_ys = []\n for y in ys:\n ys_, current_state = self.get_proposals(self.args, self.gpt, self.prompter, x, y, isFinished)\n new_ys.append(ys_)\n \n new_ys = list(itertools.chain(*new_ys))\n\n if self.to_print:\n print('new_ys', new_ys)\n\n if isFinished:\n infos.append({'step': step, 'x': str(x), 'thoughts': ys, 'answer': new_ys})\n ys = new_ys\n break\n \n # evaluation\n values = self.get_values_tot(self.args, self.gpt, self.prompter, self.parser, x, new_ys)\n if len(values) == 0:\n isFinished = True\n continue\n\n # selection\n if self.args.param.n_select_sample == 1: # b=1\n max_values = max(values)\n select_ids = [self.get_max_index(values)]\n select_new_ys = [new_ys[select_id] for select_id in select_ids]\n else:\n ids = list(range(len(new_ys)))\n max_values = max(values)\n select_ids = sorted(ids, key=lambda x: values[x], reverse=True)[:self.args.param.n_select_sample]\n select_new_ys = [new_ys[select_id] for select_id in select_ids]\n\n # log\n if self.to_print: \n sorted_new_ys, sorted_values = zip(*sorted(zip(new_ys, values), key=lambda x: x[1], reverse=True))\n print(f'-- new_ys --: {sorted_new_ys}\\n-- sol values --: {sorted_values}\\n-- choices --: {select_new_ys}\\n')\n \n infos.append({'step': step, 'x': str(x), 'ys': ys, 'new_ys': new_ys, 'values': values, 'select_new_ys': select_new_ys})\n ys = select_new_ys\n\n if self.args.env == 'game24':\n isFinished = step == total_game_step - 1 # or float(max_values) == 0.001\n else:\n moves = self.parser.extract_top_select(select_new_ys)\n # Condition to Stop: 1. One of the candiates Reach the Correct Answer; 2. Reach the maximum step; 3. only left impossible answer\n success = False\n for m in moves:\n success = success or self.game.isFinishing(x, m) \n isFinished = success or step == total_game_step - 1 # or float(max_values) == 0.001 \n\n if self.to_print: \n print(ys)\n\n info = {'steps': infos}\n\n return ys, info"
},
{
"identifier": "GoT_Solver",
"path": "xot_all_in_one/xot/controller/solver/got_solver.py",
"snippet": "class GoT_Solver:\n \"\"\"\n A class used to solve a game using a GPT model.\n\n ...\n\n Attributes\n ----------\n args : object\n a configuration object with various parameters\n gpt : object\n a GPT model used for generating proposals and selections\n game : object\n a game object representing the current state of the game\n prompter : object\n an object used to generate prompts for the GPT model\n parser : object\n an object used to parse the outputs of the GPT model\n\n Methods\n -------\n get_proposals(args, gpt, prompter, x, y, isFinished)\n Generates a set of proposals or possible solutions to the game.\n get_select_got(args, gpt, prompter, parser, x, ys, cache_value=True)\n Selects the best proposal from a set of proposals.\n solve(idx)\n Solves the game by generating proposals and selecting the best ones until the game is finished.\n \"\"\"\n def __init__(self, args, gpt, game, prompter, parser, to_print=False):\n \"\"\"Initializes the GoT_Solver with the given arguments.\"\"\"\n self.args = args\n self.gpt = gpt\n self.game = game\n self.prompter = prompter\n self.parser = parser\n self.to_print = to_print\n \n \n def get_proposals(self, args, gpt, prompter, x, y, isFinished):\n \"\"\"\n Generates a set of proposals or possible solutions to the game.\n\n Parameters:\n args (object): a configuration object with various parameters\n gpt (object): a GPT model used for generating proposals\n prompter (object): an object used to generate prompts for the GPT model\n x (object): the current state of the game\n y (object): the current output candidates\n isFinished (bool): a flag indicating whether the game is finished or not\n\n Returns:\n list: a list of proposals\n object: the current state of the game\n \"\"\"\n # Get instruction prompt\n instruct = prompter.get_instruction_prompt()\n # Get propose prompt and current state\n propose_prompt, current_state = prompter.propose_prompt_wrap(x, y, isFinished)\n # Generate proposals\n proposals = gpt(propose_prompt, instruct, n=args.param.n_generate_sample, stop=args.gpt.stop)[0]\n\n # Print propose prompt and proposals if in debug mode\n if self.to_print:\n print('propose_prompt', propose_prompt)\n print('proposals', proposals)\n\n # If game is finished, return proposals\n if isFinished:\n return [proposals + '\\n'], current_state\n else:\n # If game environment is 'game24', perform precheck on proposals\n if args.env == 'game24':\n proposals_precheck = proposals.strip().split('\\n')\n proposals_aftercheck = []\n for idx, p in enumerate(proposals_precheck):\n try:\n exp = p.lower().split(\"=\")[0].strip()\n terms = exp.split(' ')\n num1, _, num2 = terms[0], terms[1], terms[2]\n if num1 in current_state and num2 in current_state:\n res = eval(exp)\n proposals_aftercheck.append(p)\n except:\n continue\n else:\n # If game environment is not 'game24', split proposals\n proposals_aftercheck = proposals.replace('\\n', '').strip().split(', ')\n \n # Return proposals after check\n return [y + _ + '\\n' for _ in proposals_aftercheck], current_state\n\n\n def get_select_got(self, args, gpt, prompter, parser, x, ys, cache_value=True):\n \"\"\"\n Selects the best proposal from a set of proposals.\n\n Parameters:\n args (object): a configuration object with various parameters\n gpt (object): a GPT model used for generating selections\n prompter (object): an object used to generate prompts for the GPT model\n parser (object): an object used to parse the outputs of the GPT model\n x (object): the current state of the game\n ys (list): a list of proposals\n cache_value (bool): a flag indicating whether to cache the value or not\n\n Returns:\n list: a list of selected proposals\n \"\"\"\n # Get instruction prompt\n instruct = prompter.get_instruction_prompt()\n # Get select prompt\n select_prompt = prompter.select_prompt_wrap(x, ys, args.param.n_select_sample)\n # Generate select outputs\n select_outputs = gpt(select_prompt, instruct, n=args.param.n_generate_sample, stop=args.gpt.stop)\n # Unwrap select outputs and select the best one\n select = parser.select_outputs_unwrap(x, ys, select_outputs, args.multi_solution)\n\n # Print select prompt and select outputs if in debug mode\n if self.to_print:\n print('select_prompt', select_prompt)\n print('select_outputs', select_outputs)\n \n # Return selected proposals\n return select\n\n\n def solve(self, idx):\n \"\"\"\n Solves the game by generating proposals and selecting the best ones until the game is finished.\n\n Parameters:\n idx (int): the index of the game to be solved\n\n Returns:\n list: a list of final proposals\n dict: a dictionary of steps taken to solve the game\n \"\"\"\n # Get one test board from the game\n x = self.game.getOneTestBoard(idx)\n\n # Set total game step based on the game environment and whether multiple solutions are allowed\n if self.args.multi_solution and self.args.env == 'cube':\n total_game_step = self.args.task.total_game_step + 3\n elif self.args.multi_solution and self.args.env == 'npuzzle':\n total_game_step = self.args.task.total_game_step + 3\n elif not self.args.param.last_step and self.args.env == 'game24':\n total_game_step = self.args.task.total_game_step - 1\n else: \n total_game_step = self.args.task.total_game_step\n\n # Initialize current output candidates and other variables\n ys = [''] \n infos = []\n isFinished = False\n\n # Start solving the game\n for step in range(total_game_step+1):\n # Generation phase\n new_ys = []\n\n for y in ys:\n # Generate proposals\n ys_, current_state = self.get_proposals(self.args, self.gpt, self.prompter, x, y, isFinished)\n new_ys.append(ys_)\n \n new_ys = list(itertools.chain(*new_ys))\n\n # If game is finished, log information and break the loop\n if isFinished:\n infos.append({'step': step, 'x': str(x), 'thoughts': ys, 'answer': new_ys})\n ys = new_ys\n break\n\n # If there's no new candidates, it's impossible to reach the answer: early stop\n if len(new_ys) == 0:\n isFinished = True\n select_new_ys = ys[:min(self.args.param.n_select_sample, len(ys))]\n ys = select_new_ys\n infos.append({'step': step, 'x': str(x), 'ys': ys, 'new_ys': new_ys, 'select': [], 'select_new_ys': select_new_ys})\n continue\n \n # Evaluation phase\n # Select the best proposals\n select = self.get_select_got(self.args, self.gpt, self.prompter, self.parser, x, new_ys)\n\n # Print select and new proposals if in debug mode\n if self.to_print:\n print('select', select)\n print('ys', ys)\n print('new_ys', new_ys)\n\n # Preselect new proposals\n select_new_ys_pre = []\n for m in select:\n idx = m - 1\n select_new_ys_pre.append(new_ys[idx])\n \n # Select new proposals\n if len(select_new_ys_pre) > 0:\n select_new_ys = select_new_ys_pre[:min(self.args.param.n_select_sample, len(select_new_ys_pre))]\n else:\n select_new_ys = ys[:min(self.args.param.n_select_sample, len(ys))]\n\n # Log information\n if self.to_print: \n print('select_new_ys_pre', select_new_ys_pre)\n print('select_new_ys', select_new_ys)\n print(f'select --: {select}\\n-- choices --: {select_new_ys}\\n')\n \n infos.append({'step': step, 'x': str(x), 'ys': ys, 'new_ys': new_ys, 'select': select, 'select_new_ys': select_new_ys})\n ys = select_new_ys\n\n # Check if game is finished\n if self.args.env == 'game24':\n isFinished = step == total_game_step - 1\n else:\n moves = self.parser.extract_top_select(select_new_ys)\n # Condition to Stop: 1. One of the candiates Reach the Correct Answer; 2. Reach the maximum step; 3. only left impossible answer\n success = False\n for m in moves:\n success = success or self.game.isFinishing(x, m) \n isFinished = success or step == total_game_step - 1\n\n # Print final proposals if in debug mode\n if self.to_print: \n print(ys)\n # Return final proposals and steps taken to solve the game\n return ys, {'steps': infos}"
},
{
"identifier": "XoT_Solver",
"path": "xot_all_in_one/xot/controller/solver/xot_solver.py",
"snippet": "class XoT_Solver:\n \"\"\"\n The XoT_Solver class is designed to solve a variety of games using a combination of Monte Carlo Tree Search (MCTS), \n Neural Networks (NN), and a coaching mechanism. It supports both single and multiple solutions, and can revise its \n solutions based on feedback.\n\n Attributes:\n args: A configuration object containing various parameters.\n gpt: An instance of a GPT model for generating prompts.\n game: An instance of the game to be solved.\n prompter: An instance of a class for generating prompts.\n parser: An instance of a class for parsing actions and thoughts.\n nmcts: An instance of MCTS.\n c: An instance of a Coach.\n to_print: A boolean indicating whether to print debug information.\n \"\"\"\n def __init__(self, args, gpt, game, prompter, parser, to_print=False):\n \"\"\"\n Initializes the XoT_Solver with the given arguments, GPT model, game, prompter, parser, and print option.\n \"\"\"\n self.args = args\n self.gpt = gpt\n self.game = game\n self.prompter = prompter\n self.parser = parser\n self.nmcts, self.c = self.initial_xot(args)\n\n self.to_print = to_print\n \n def initial_xot(self, args):\n \"\"\"\n Initializes the Neural Network and MCTS based on the game environment specified in the arguments.\n \"\"\"\n if args.env.lower() == 'game24':\n from .pytorch_game24.NNet import NNetWrapper as nn\n elif args.env.lower() == 'cube':\n from .pytorch_cube.NNet import NNetWrapper as nn\n elif args.env.lower() == 'npuzzle':\n from .pytorch_npuzzle.NNet import NNetWrapper as nn\n else:\n raise ValueError\n \n nnet = nn(self.game)\n nnet.load_checkpoint(folder=self.args.model.checkpoint, filename=self.args.model.filename)\n nmcts = MCTS(self.game, nnet, args)\n c = Coach(self.game, nnet, args)\n return nmcts, c\n \n def multi_solve_before_revision(self, x):\n \"\"\"\n Solves the game for multiple solutions before any revisions are made.\n \"\"\"\n nmcts_modelcall_before = self.nmcts.getModelCall()\n player = lambda x: np.argmax(self.nmcts.getActionProb(x, temp=0, step=0))\n problem_state, getGameEnded, actions_idx, actions = self.c.generate_thoughts(x, player)\n actions_list, actions_candicates_list = [], []\n for i in range(self.args.xot.multi_solution_exploration):\n selected_ac_seq, _ = self.nmcts.inferSinglePlayer(problem_state, step=0, seed=i)\n if selected_ac_seq is not None:\n actions_candicates_list.append(str(selected_ac_seq))\n \n count = Counter(actions_candicates_list) \n actions_list = [ast.literal_eval(item) for item, _ in count.most_common(3)] \n\n nmcts_modelcall_after = self.nmcts.getModelCall()\n model_call_phase1 = nmcts_modelcall_after - nmcts_modelcall_before\n \n thoughts_list = []\n for actions in actions_list:\n try:\n thoughts_list.append(self.parser.action_to_thoughs(actions, x))\n except:\n continue\n if self.to_print:\n print('xot_solve -- thoughts: ', thoughts_list)\n \n prompt, _ = self.prompter.xot_prompt_multi_wrap(x, thoughts_list)\n instruct = self.prompter.get_instruction_prompt()\n samples = self.gpt(prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n return samples, thoughts_list, actions, model_call_phase1\n \n\n def single_solve_before_revision(self, x):\n \"\"\"\n Solves the game for a single solution before any revisions are made.\n \"\"\"\n player = lambda x: np.argmax(self.nmcts.getActionProb(x, temp=0, step=0))\n nmcts_modelcall_before = self.nmcts.getModelCall()\n problem_state, getGameEnded, actions_idx, actions = self.c.generate_thoughts(x, player)\n nmcts_modelcall_after = self.nmcts.getModelCall()\n model_call_phase1 = nmcts_modelcall_after - nmcts_modelcall_before\n thoughts = self.parser.action_to_thoughs(actions, x)\n\n if self.to_print:\n print('xot_solve -- thoughts: ', thoughts)\n prompt, _ = self.prompter.xot_prompt_wrap(x, thoughts)\n\n instruct = self.prompter.get_instruction_prompt()\n samples = self.gpt(prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n \n return samples, thoughts, actions, model_call_phase1\n\n\n def solve_single_revision(self, x, samples, thoughts, actions, model_call_phase1, model_call_phase2):\n \"\"\"\n Revises a single solution based on feedback.\n \"\"\"\n instruct = self.prompter.get_instruction_prompt()\n for revise_count in range(self.args.xot.revise_times):\n infos = [self.parser.test_output(x, y, None) for y in samples]\n isCorrect = infos[0]['r']\n \n # Terminal Condition\n if isCorrect:\n model_call = model_call_phase1 + model_call_phase2\n if revise_count == 0:\n return samples, {}, False, None, [model_call, model_call_phase1, model_call_phase2]\n else:\n return samples, {}, revise_count, revised_state, [model_call, model_call_phase1, model_call_phase2]\n\n if not isCorrect:\n revised_prompt, _ = self.prompter.xot_prompt_revised_wrap(x, thoughts)\n revised_samples = self.gpt(revised_prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n \n if self.to_print:\n print('revised_prompt', revised_prompt)\n print('revised_samples', revised_samples)\n \n if 'wrong' in revised_samples[0].lower() or 'incorrect' in revised_samples[0].lower():\n try:\n if 'all steps are wrong' in revised_samples[0].lower():\n incorrect_step = 1\n if self.to_print:\n print('all the steps are wrong')\n else:\n incorrect_step = int(revised_samples[0].split('is wrong')[0].strip().split(']')[0][-1])\n \n revised_state = self.parser.get_revised_state(x, thoughts, incorrect_step)\n \n if self.to_print:\n print('incorrect_step', incorrect_step)\n print('revised_state', revised_state)\n \n if self.args.env == 'game24':\n if incorrect_step > 1:\n ac_seq = actions_idx[:incorrect_step-1]\n state = x\n for i in range(len(ac_seq)):\n state, _ = self.game.getNextState(state, actions_idx[i])\n revised_state = state\n\n nmcts_modelcall_before = self.nmcts.getModelCall()\n player = lambda x: np.argmax(self.nmcts.getActionProb(x, temp=0, step=0))\n\n problem_state, getGameEnded, actions_idx, actions_revised = self.c.generate_thoughts(revised_state, player)\n nmcts_modelcall_after = self.nmcts.getModelCall()\n model_call_phase2 += nmcts_modelcall_after - nmcts_modelcall_before\n\n actions_after_revised = actions[:incorrect_step-1]\n actions_after_revised.extend(actions_revised)\n \n thoughts_revised = self.parser.action_to_thoughs(actions_after_revised, x)\n \n if self.to_print:\n print('actions_revised', actions_revised)\n print('actions_after_revised', actions_after_revised)\n print('thoughts_revised', thoughts_revised)\n print('xot_solve -- revised thoughts: ', thoughts_revised)\n\n prompt, _ = self.prompter.xot_prompt_wrap(x, thoughts_revised)\n instruct = self.prompter.get_instruction_prompt()\n samples = self.gpt(prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n except:\n continue\n \n # after N revised times\n model_call = model_call_phase1 + model_call_phase2\n return samples, {}, revise_count+1, None, [model_call, model_call_phase1, model_call_phase2] \n\n\n def solve_multi_revision(self, x, samples, thoughts_list, actions, model_call_phase1, model_call_phase2_total):\n \"\"\"\n Revises multiple solutions based on feedback.\n \"\"\"\n instruct = self.prompter.get_instruction_prompt()\n infos = [self.parser.test_output_multi(x, y, [None, None, None]) for y in samples]\n isCorrect_list = infos[0]['r']\n \n if self.to_print:\n print('x', x)\n print('infos', infos)\n print('thoughts_list', thoughts_list)\n print('samples', samples)\n print('isCorrect_list', isCorrect_list)\n\n thoughts_revised_list = thoughts_list[:]\n revise_flags = [False] * len(isCorrect_list)\n\n for idx, r_ in enumerate(isCorrect_list):\n if idx >= len(thoughts_list): # It is posssible that gpt provides more ans than given thoughts\n break\n isCorrect = isCorrect_list[r_]['r'] \n if self.to_print:\n print('isCorrect', isCorrect)\n \n if not isCorrect:\n revise_flags[idx] = True\n thoughts = thoughts_list[idx]\n \n revised_prompt, _ = self.prompter.xot_prompt_revised_wrap(x, thoughts)\n revised_samples = self.gpt(revised_prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n \n if self.to_print:\n print('thoughts', thoughts)\n print('revised_prompt', revised_prompt)\n print('revised_samples', revised_samples)\n \n if 'wrong' in revised_samples[0].lower()or 'incorrect' in revised_samples[0].lower():\n try:\n if 'all steps are wrong' in revised_samples[0].lower():\n incorrect_step = 1\n if self.to_print:\n print('all steps are wrong')\n else:\n incorrect_step = int(revised_samples[0].split('is wrong')[0].strip().split(']')[0][-1])\n \n revised_state = self.parser.get_revised_state(x, thoughts, incorrect_step)\n if self.to_print:\n print('incorrect_step', incorrect_step)\n print('revised_state', revised_state)\n\n if self.args.env == 'game24':\n if incorrect_step > 1:\n ac_seq = actions_idx[:incorrect_step-1]\n state = x\n for i in range(len(ac_seq)):\n state, _ = self.game.getNextState(state, actions_idx[i])\n revised_state = state\n \n nmcts_modelcall_before = self.nmcts.getModelCall()\n player = lambda x: np.argmax(self.nmcts.getActionProb(x, temp=0, step=0))\n problem_state, getGameEnded, actions_idx, actions_revised = self.c.generate_thoughts(revised_state, player)\n nmcts_modelcall_after = self.nmcts.getModelCall()\n model_call_phase2_total += nmcts_modelcall_after - nmcts_modelcall_before\n \n actions_after_revised = actions[:incorrect_step-1]\n actions_after_revised.extend(actions_revised)\n \n thoughts_revised = self.parser.action_to_thoughs(actions_after_revised, x)\n thoughts_revised_list[idx] = thoughts_revised\n\n if self.to_print:\n print('actions_revised', actions_revised)\n print('actions_after_revised', actions_after_revised)\n print('thoughts_revised', thoughts_revised)\n \n except:\n model_call_phase2_total += 0\n else:\n model_call_phase2_total += 0\n\n prompt, _ = self.prompter.xot_prompt_multi_wrap(x, thoughts_revised_list)\n instruct = self.prompter.get_instruction_prompt()\n samples_revised = self.gpt(prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n model_call = model_call_phase1 + model_call_phase2_total\n return samples_revised, {}, revise_flags, None, [model_call, model_call_phase1, model_call_phase2_total]\n \n\n def solve(self, idx):\n \"\"\"\n The main method that solves the game. It first generates solutions, then revises them if necessary.\n \"\"\"\n x = self.game.getOneTestBoard(idx)\n self.nmcts.reset()\n model_call_phase1, model_call_phase2 = 0, 0\n\n # Load Config\n self.c.game.total_game_step = self.args.task.total_game_step\n if self.args.multi_solution:\n self.nmcts.args.numMCTSSims = self.args.xot.multi_numMCTSSims\n else:\n self.nmcts.args.numMCTSSims = self.args.xot.numMCTSSims\n \n if not self.args.multi_solution:\n samples, thoughts, actions, model_call_phase1 = self.single_solve_before_revision(x)\n else:\n samples, thoughts_list, actions, model_call_phase1 = self.multi_solve_before_revision(x)\n \n if not self.args.xot.revised:\n model_call = model_call_phase1 + model_call_phase2\n return samples, {}, False, None, [model_call, model_call_phase1, model_call_phase2]\n else: \n # Update Config For Revision\n self.c.game.total_game_step = self.args.xot.revise_total_game_step\n self.nmcts.args.numMCTSSims = self.args.xot.revise_numMCTSSims\n if self.args.xot.revised and not self.args.multi_solution:\n return self.solve_single_revision(x, samples, thoughts, actions, model_call_phase1, model_call_phase2)\n \n if self.args.xot.revised and self.args.multi_solution:\n return self.solve_multi_revision(x, samples, thoughts_list, actions, model_call_phase1, model_call_phase2)"
}
] | import os
import json
import itertools
import random
import ast
import re
import numpy as np
import pandas as pd
from collections import Counter
from .utils import *
from .solver.io_solver import IO_Solver
from .solver.cot_solver import CoT_Solver
from .solver.tot_solver import ToT_Solver
from .solver.got_solver import GoT_Solver
from .solver.xot_solver import XoT_Solver | 9,463 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
class Controller:
"""
Controller class to manage the execution flow
This involves language models, operations, prompting, and parsing.
"""
def __init__(self, config, gpt, game, prompter, parser):
self.config = config
self.gpt = gpt
self.game = game
self.prompter = prompter
self.parser = parser
def initial_logs(self, config):
if config.method == 'io' or config.method == 'cot':
file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_sample{config.param.n_generate_sample}_multi{config.multi_solution}_start{config.task.task_start_index}_end{config.task.task_end_index}.json'
elif config.method == 'tot':
file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_propose{config.param.n_generate_sample}_value{config.param.n_evaluate_sample}_greedy{config.param.n_select_sample}_start{config.task.task_start_index}_end{config.task.task_end_index}_laststep{config.param.last_step}.json'
elif config.method == 'got':
file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_multi{config.multi_solution}_propose{config.param.n_generate_sample}_value{config.param.n_evaluate_sample}_greedy{config.param.n_select_sample}_start{config.task.task_start_index}_end{config.task.task_end_index}_laststep{config.param.last_step}.json'
elif config.method == 'xot':
file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_multi{config.multi_solution}_revised{config.xot.revised}_reviseTimes{config.xot.revise_times}_start{config.task.task_start_index}_end{config.task.task_end_index}_laststep{config.param.last_step}.json'
else:
raise ValueError("invalid method")
os.makedirs(os.path.dirname(file), exist_ok=True)
return file
def initial_solver(self, config):
if config.method == 'io':
return IO_Solver(config, self.gpt, self.game, self.prompter, self.parser)
elif config.method == 'cot':
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
class Controller:
"""
Controller class to manage the execution flow
This involves language models, operations, prompting, and parsing.
"""
def __init__(self, config, gpt, game, prompter, parser):
self.config = config
self.gpt = gpt
self.game = game
self.prompter = prompter
self.parser = parser
def initial_logs(self, config):
if config.method == 'io' or config.method == 'cot':
file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_sample{config.param.n_generate_sample}_multi{config.multi_solution}_start{config.task.task_start_index}_end{config.task.task_end_index}.json'
elif config.method == 'tot':
file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_propose{config.param.n_generate_sample}_value{config.param.n_evaluate_sample}_greedy{config.param.n_select_sample}_start{config.task.task_start_index}_end{config.task.task_end_index}_laststep{config.param.last_step}.json'
elif config.method == 'got':
file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_multi{config.multi_solution}_propose{config.param.n_generate_sample}_value{config.param.n_evaluate_sample}_greedy{config.param.n_select_sample}_start{config.task.task_start_index}_end{config.task.task_end_index}_laststep{config.param.last_step}.json'
elif config.method == 'xot':
file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_multi{config.multi_solution}_revised{config.xot.revised}_reviseTimes{config.xot.revise_times}_start{config.task.task_start_index}_end{config.task.task_end_index}_laststep{config.param.last_step}.json'
else:
raise ValueError("invalid method")
os.makedirs(os.path.dirname(file), exist_ok=True)
return file
def initial_solver(self, config):
if config.method == 'io':
return IO_Solver(config, self.gpt, self.game, self.prompter, self.parser)
elif config.method == 'cot': | return CoT_Solver(config, self.gpt, self.game, self.prompter, self.parser) | 1 | 2023-11-08 09:48:34+00:00 | 12k |
UMass-Foundation-Model/CoVLM | open_flamingo/eval/evaluate_llava.py | [
{
"identifier": "create_model_and_transforms",
"path": "open_flamingo/src/factory.py",
"snippet": "def create_model_and_transforms(\n clip_vision_encoder_path: str,\n clip_vision_encoder_pretrained: str,\n lang_encoder_path: str,\n tokenizer_path: str,\n use_local_files: bool = False,\n decoder_layers_attr_name: str = None,\n checkpoint_activations: bool = False,\n freeze_vision_encoder: bool = False,\n load_detection_head_weight: str = None,\n **flamingo_kwargs,\n):\n is_llava = \"llava\" in lang_encoder_path\n if is_llava:\n from llava.model.builder import load_pretrained_model\n from llava.mm_utils import get_model_name_from_path\n model_path = lang_encoder_path\n model_path = os.path.expanduser(model_path)\n model_name = get_model_name_from_path(model_path)\n text_tokenizer, llava_model, image_processor, context_len = load_pretrained_model(model_path, None, model_name)\n mm_projector = llava_model.model.mm_projector\n vision_encoder = llava_model.model.vision_tower\n del llava_model.model.layers\n del llava_model.lm_head\n else:\n mm_projector = None\n vision_encoder, _, image_processor = open_clip.create_model_and_transforms(\n clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained\n )\n # set the vision encoder to output the visual features\n vision_encoder.visual.output_tokens = True\n # delete text encoder part\n del vision_encoder.transformer\n del vision_encoder.text_projection\n del vision_encoder.token_embedding\n del vision_encoder.ln_final\n del vision_encoder.positional_embedding\n del vision_encoder.logit_scale\n vision_encoder.visual.proj = None\n vision_encoder.visual.ln_post = torch.nn.Identity()\n text_tokenizer = AutoTokenizer.from_pretrained(\n tokenizer_path, local_files_only=use_local_files\n )\n\n # add Flamingo special tokens to the tokenizer\n additional_special_tokens = [\"<|#image#|>\", \"<|#endofimage#|>\", \"<|#visual#|>\", \"<|#object#|>\", \"<|#box#|>\", \"<|#endofobject#|>\", \"<|#attr#|>\", \"<|#endofattr#|>\", \"<|#previsual#|>\", \"<|#prebox#|>\"]\n text_tokenizer.add_special_tokens(\n {\"additional_special_tokens\": additional_special_tokens}\n )\n if text_tokenizer.pad_token is None:\n # Issue: GPT models don't have a pad token, which we use to\n # modify labels for the loss.\n text_tokenizer.add_special_tokens({\"pad_token\": \"<PAD>\"})\n\n if is_llava:\n vicuna_path = LLAVA_TO_VICUNA[lang_encoder_path]\n lang_encoder = AutoModelForCausalLM.from_pretrained(\n vicuna_path, local_files_only=use_local_files\n )\n else:\n lang_encoder = AutoModelForCausalLM.from_pretrained(\n lang_encoder_path, local_files_only=use_local_files\n )\n extend_instance(lang_encoder, FlamingoLMMixin)\n\n if decoder_layers_attr_name is None:\n decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder)\n lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name)\n lang_encoder.resize_token_embeddings(len(text_tokenizer))\n lang_encoder_name = lang_encoder.__class__.__name__.lower()\n if checkpoint_activations:\n from fairscale.nn.checkpoint import checkpoint_wrapper\n vision_encoder_layers = vision_encoder.vision_tower.vision_model.encoder.layers if is_llava else vision_encoder.visual.transformer.resblocks\n for i in range(len(vision_encoder_layers)):\n vision_encoder_layers[i] = checkpoint_wrapper(\n vision_encoder_layers[i],\n offload_to_cpu=False,\n )\n if \"opt\" in lang_encoder_name:\n lang_encoder_layers = lang_encoder.model.decoder.layers\n elif \"codegen\" in lang_encoder_name:\n lang_encoder_layers = lang_encoder.transformer.h\n elif \"llama\" in lang_encoder_name:\n lang_encoder_layers = lang_encoder.model.layers\n elif \"gptneo\" in lang_encoder_name:\n lang_encoder_layers = lang_encoder.gpt_neox.layers\n else:\n raise ValueError(f\"unknown model {lang_encoder_name}\")\n for i in range(len(lang_encoder_layers)):\n lang_encoder_layers[i] = checkpoint_wrapper(\n lang_encoder_layers[i],\n offload_to_cpu=False,\n )\n if is_llava:\n vis_dim = vision_encoder.config.hidden_size\n image_size = vision_encoder.config.image_size\n patch_size = vision_encoder.config.patch_size\n else:\n vis_dim = open_clip.get_model_config(clip_vision_encoder_path)[\"vision_cfg\"][\"width\"]\n image_size = open_clip.get_model_config(clip_vision_encoder_path)[\"vision_cfg\"][\"image_size\"]\n patch_size = open_clip.get_model_config(clip_vision_encoder_path)[\"vision_cfg\"][\"patch_size\"]\n assert image_size % patch_size == 0\n vis_embed_size = (image_size // patch_size) ** 2\n\n lang_dim = int(lang_encoder.config.hidden_size)\n if hasattr(lang_encoder.config, \"word_embed_proj_dim\"):\n hidden_state_dim = lang_encoder.config.word_embed_proj_dim\n else:\n hidden_state_dim = lang_encoder.config.hidden_size\n model = Flamingo(\n vision_encoder=vision_encoder,\n lang_encoder=lang_encoder,\n eoc_token_id=text_tokenizer.encode(text_tokenizer.eos_token)[-1],\n media_token_id=text_tokenizer.encode(\"<|#image#|>\")[-1],\n image_end_token_id=text_tokenizer.encode(\"<|#endofimage#|>\")[-1],\n visual_token_id=text_tokenizer.encode(\"<|#visual#|>\")[-1],\n previsual_token_id=text_tokenizer.encode(\"<|#previsual#|>\")[-1],\n box_token_id=text_tokenizer.encode(\"<|#box#|>\")[-1],\n prebox_token_id=text_tokenizer.encode(\"<|#prebox#|>\")[-1],\n endofobject_token_id=text_tokenizer.encode(\"<|#endofobject#|>\")[-1],\n vis_dim=vis_dim,\n vis_embed_size=vis_embed_size,\n lang_dim=lang_dim,\n image_size=image_size,\n patch_size=patch_size,\n hidden_state_dim=hidden_state_dim,\n mm_projector=mm_projector,\n **flamingo_kwargs,\n )\n\n if is_llava and load_detection_head_weight is not None:\n temp = torch.load(load_detection_head_weight, map_location=\"cpu\")\n detection_head_checkpoint = {}\n for key in temp[\"model_state_dict\"]:\n if key.startswith(\"detection_head\"):\n detection_head_checkpoint[key.replace(\"detection_head.\", \"\")] = temp[\"model_state_dict\"][key]\n model.detection_head.yolox_head.load_state_dict(detection_head_checkpoint, strict=True)\n logging.info(f\"load detection_head weights from: {load_detection_head_weight}\")\n del temp\n\n if freeze_vision_encoder:\n logging.info(\"freeze vision encoder\")\n model.vision_encoder.requires_grad_(False)\n\n logging.info(\n f\"Flamingo model initialized with {sum(p.numel() for p in model.parameters() if p.requires_grad)} trainable parameters\"\n )\n\n return model, image_processor, text_tokenizer, vis_embed_size"
},
{
"identifier": "init_distributed_device",
"path": "open_flamingo/train/distributed.py",
"snippet": "def init_distributed_device(args):\n # Distributed training = training on more than one GPU.\n # Works in both single and multi-node scenarios.\n args.distributed = False\n args.world_size = 1\n args.rank = 0 # global rank\n args.local_rank = 0\n if args.horovod:\n assert hvd is not None, \"Horovod is not installed\"\n hvd.init()\n args.local_rank = int(hvd.local_rank())\n args.rank = hvd.rank()\n args.world_size = hvd.size()\n args.distributed = True\n os.environ[\"LOCAL_RANK\"] = str(args.local_rank)\n os.environ[\"RANK\"] = str(args.rank)\n os.environ[\"WORLD_SIZE\"] = str(args.world_size)\n elif is_using_distributed():\n if \"SLURM_PROCID\" in os.environ:\n # DDP via SLURM\n args.local_rank, args.rank, args.world_size = world_info_from_env()\n # SLURM var -> torch.distributed vars in case needed\n os.environ[\"LOCAL_RANK\"] = str(args.local_rank)\n os.environ[\"RANK\"] = str(args.rank)\n os.environ[\"WORLD_SIZE\"] = str(args.world_size)\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n )\n else:\n # DDP via torchrun, torch.distributed.launch\n args.local_rank, _, _ = world_info_from_env()\n torch.distributed.init_process_group(\n backend=args.dist_backend, init_method=args.dist_url\n )\n args.world_size = torch.distributed.get_world_size()\n args.rank = torch.distributed.get_rank()\n args.distributed = True\n else:\n # needed to run on single gpu\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url,\n world_size=1,\n rank=0,\n )\n\n if torch.cuda.is_available():\n if args.distributed and not args.no_set_device_rank:\n device = \"cuda:%d\" % args.local_rank\n else:\n device = \"cuda:0\"\n torch.cuda.set_device(device)\n else:\n device = \"cpu\"\n args.device = device\n device = torch.device(device)\n return device"
},
{
"identifier": "world_info_from_env",
"path": "open_flamingo/train/distributed.py",
"snippet": "def world_info_from_env():\n local_rank = 0\n for v in (\n \"LOCAL_RANK\",\n \"MPI_LOCALRANKID\",\n \"SLURM_LOCALID\",\n \"OMPI_COMM_WORLD_LOCAL_RANK\",\n ):\n if v in os.environ:\n local_rank = int(os.environ[v])\n break\n global_rank = 0\n for v in (\"RANK\", \"PMI_RANK\", \"SLURM_PROCID\", \"OMPI_COMM_WORLD_RANK\"):\n if v in os.environ:\n global_rank = int(os.environ[v])\n break\n world_size = 1\n for v in (\"WORLD_SIZE\", \"PMI_SIZE\", \"SLURM_NTASKS\", \"OMPI_COMM_WORLD_SIZE\"):\n if v in os.environ:\n world_size = int(os.environ[v])\n break\n\n return local_rank, global_rank, world_size"
},
{
"identifier": "GQADataset",
"path": "open_flamingo/eval/task/gqa.py",
"snippet": "class GQADataset(Dataset):\n def __init__(\n self,\n image_dir_path=\"/gpfs/u/home/LMCG/LMCGljnn/scratch/datasets/raw/gqa/images\",\n annotations_path=\"/gpfs/u/home/LMCG/LMCGljnn/scratch/datasets/raw/gqa/testdev_balanced_questions.json\",\n ):\n annotations = json.load(open(annotations_path))\n self.questions = []\n self.answers = []\n self.image_paths = []\n self.question_ids = []\n for anno_id in annotations:\n question = annotations[anno_id][\"question\"]\n imageId = annotations[anno_id][\"imageId\"]\n answer = annotations[anno_id][\"answer\"]\n self.questions.append(question)\n self.answers.append(answer)\n self.image_paths.append(os.path.join(image_dir_path, \"{}.jpg\".format(imageId)))\n self.question_ids.append(anno_id)\n # print(annotations[anno_id][\"types\"])\n self.vqa_dataset = \"gqa\"\n\n def __len__(self):\n return len(self.questions)\n\n def __getitem__(self, idx):\n question = self.questions[idx]\n question_id = self.question_ids[idx]\n answer = self.answers[idx]\n img_path = self.image_paths[idx]\n image = Image.open(img_path)\n return {\n \"image\": image,\n \"question\": question,\n \"answers\": answer,\n \"question_id\": question_id,\n }"
},
{
"identifier": "is_correct",
"path": "open_flamingo/eval/task/utils.py",
"snippet": "def is_correct(input_ids, logits, tokenizer, object: str, topk=5, N=10):\n answer_id = torch.tensor(tokenizer(f\" {object}\", add_special_tokens=False)[\"input_ids\"]).to(input_ids.device)\n answer_begin_idx = (input_ids == answer_id[0]).nonzero()\n answer_idx = None\n for (batch_idx, IDX) in answer_begin_idx:\n try:\n if (input_ids[batch_idx, IDX:IDX+len(answer_id)] == answer_id).all():\n answer_idx = list(range(IDX-1, IDX+len(answer_id)-1))\n except:\n pass\n if answer_idx is None:\n return np.inf, False, False\n res = logits[0, answer_idx].softmax(-1).sort(descending=True)\n values = res.values\n indices = res.indices\n chosen_ids = list(itertools.product(*([list(range(N))]*len(answer_idx))))\n probs = []\n for ids in chosen_ids:\n prob = 1.0\n for i, id in enumerate(ids):\n prob *= values[i, id]\n probs.append((prob.item(), ids))\n probs.sort(reverse=True)\n answer_pos = tuple([id_array.tolist().index(idx) for id_array, idx in zip(indices, answer_id)])\n ranking = [p[1] for p in probs]\n # if len(answer_idx) > 1:\n # import pdb; pdb.set_trace()\n try:\n r = ranking.index(answer_pos)\n return r, r < 1, r < 5\n except:\n return np.inf, False, False"
},
{
"identifier": "get_iou",
"path": "open_flamingo/eval/task/utils.py",
"snippet": "def get_iou(box1, box2):\n # box1 and box2 should be in the format [x1, y1, x2, y2]\n intersection = max(0, min(box1[2], box2[2]) - max(box1[0], box2[0])) * \\\n max(0, min(box1[3], box2[3]) - max(box1[1], box2[1]))\n area_box1 = (box1[2] - box1[0]) * (box1[3] - box1[1])\n area_box2 = (box2[2] - box2[0]) * (box2[3] - box2[1])\n union = area_box1 + area_box2 - intersection\n iou = intersection / union if union > 0 else 0\n return iou"
},
{
"identifier": "evaluate_cola",
"path": "open_flamingo/eval/task/cola.py",
"snippet": "def evaluate_cola(\n model,\n tokenizer,\n image_processor,\n vis_embed_size=None,\n rank=0,\n world_size=1,\n id=0,\n debug=False,\n):\n dataset_name = \"cola\"\n dataset = json.load(open(DATASET))\n model = model.cuda().eval()\n correct = 0\n total = 0\n pbar = tqdm(dataset, disable=(rank != 0))\n for ii, sample in enumerate(pbar):\n if ii % world_size != rank:\n continue\n image1 = Image.open(os.path.join(VG_ROOT, os.path.basename(sample[0]))).convert(\"RGB\").resize((224, 224))\n text1 = sample[1]\n image2 = Image.open(os.path.join(VG_ROOT, os.path.basename(sample[2]))).convert(\"RGB\").resize((224, 224))\n text2 = sample[3]\n score11 = -get_score(image1, text1, model, tokenizer, image_processor, vis_embed_size)\n score12 = -get_score(image1, text2, model, tokenizer, image_processor, vis_embed_size)\n score21 = -get_score(image2, text1, model, tokenizer, image_processor, vis_embed_size)\n score22 = -get_score(image2, text2, model, tokenizer, image_processor, vis_embed_size)\n if score11 > score21 and score22 > score12:\n correct += 1\n total += 1\n pbar.set_description(f\"{correct / total:.2f}\")\n\n with open(f\"{dataset_name}_results_part{rank}_{id}.json\", \"w\") as f:\n f.write(json.dumps([total, correct]))\n if world_size > 1:\n torch.distributed.barrier()\n if rank == 0:\n total = 0\n correct = 0\n print(f\"evaluate on rank {rank}. world size is {world_size}\")\n for rank_i in range(world_size):\n [total_part, correct_part] = json.load(open(f\"{dataset_name}_results_part{rank_i}_{id}.json\"))\n os.remove(f\"{dataset_name}_results_part{rank_i}_{id}.json\")\n total += total_part\n correct += correct_part\n score = correct / total\n print(\"score:\", score)\n with open(os.path.join(\"eval_results\", f\"{dataset_name}_{model.expr_name}_{model.step_num}_{int(time.time())}_{score}_{total}\"), \"w\") as f:\n pass\n else:\n score = 0.0\n if world_size > 1:\n torch.distributed.barrier()\n return score"
},
{
"identifier": "evaluate_gqa",
"path": "open_flamingo/eval/task/gqa.py",
"snippet": "def evaluate_gqa(\n model,\n tokenizer,\n image_processor,\n batch_size=1,\n vis_embed_size=None,\n rank=0,\n world_size=1,\n id=0,\n):\n \"\"\"\n Evaluate a model on VQA datasets. Currently supports VQA v2.0.\n\n Args:\n model (nn.Module): model to evaluate\n tokenizer (transformers.PreTrainedTokenizer): tokenizer for the model\n image_processor : image processor for the model\n batch_size (int): batch size\n image_dir_path (str): path to image directory\n questions_json_path (str): path to questions json file\n annotations_json_path (str): path to annotations json file\n seed (int, optional): random seed. Defaults to 42.\n max_generation_length (int, optional): max generation length. Defaults to 5.\n num_beams (int, optional): number of beams to use for beam search. Defaults to 3.\n length_penalty (float, optional): length penalty for beam search. Defaults to -2.0.\n num_samples (int, optional): number of samples to evaluate on. Defaults to 5000 samples.\n query_set_size (int, optional): size of the query set. Defaults to 2048.\n num_shots (int, optional): number of shots to use. Defaults to 8.\n device (int, optional): device to use. Defaults to -1 (cpu).\n num_workers (int, optional): number of workers to use. Defaults to 4.\n vqa_dataset (string): type of vqa dataset: currently supports vqa, ok_vqa. Defaults to vqa.\n Returns:\n float: accuracy score\n \"\"\"\n assert batch_size == 1\n vqa_dataset = \"gqa\"\n eval_dataset = GQADataset()\n object_token_id = tokenizer(\"<|#object#|>\", add_special_tokens=False)[\"input_ids\"][-1]\n endofobject_token_id = tokenizer(\"<|#endofobject#|>\", add_special_tokens=False)[\"input_ids\"][-1]\n prebox_token_id = tokenizer(\"<|#prebox#|>\", add_special_tokens=False)[\"input_ids\"][-1]\n media_token_id = tokenizer(\"<|#image#|>\", add_special_tokens=False)[\"input_ids\"][-1]\n endofmedia_token_id = tokenizer(\"<|#endofimage#|>\", add_special_tokens=False)[\"input_ids\"][-1]\n pad_token_id = tokenizer(tokenizer.pad_token, add_special_tokens=False)[\"input_ids\"][-1]\n bos_token_id = tokenizer(tokenizer.bos_token, add_special_tokens=False)[\"input_ids\"][-1]\n def get_prompt(sample):\n return f\"{tokenizer.bos_token}<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>Question: {sample['question'].strip()} Short answer:\"\n model.eval().cuda()\n lang_encoder_name = model.lang_encoder.__class__.__name__.lower()\n predictions = []\n if batch_size != 1:\n tokenizer.padding_side = \"left\"\n if world_size > 1:\n torch.distributed.barrier()\n this_tot = 0\n for ii, batch in enumerate(more_itertools.chunked(\n tqdm(eval_dataset, desc=\"Running inference\", disable=(rank != 0)), batch_size,\n )):\n if ii % world_size != rank:\n continue\n batch[0][\"image\"] = batch[0][\"image\"].resize((224, 224))\n batch_images = prepare_batch_images(\n batch=batch,\n image_processor=image_processor,\n ).cuda()\n batch_text = [get_prompt(s) for s in batch]\n encodings = tokenizer(\n batch_text,\n return_tensors=\"pt\",\n padding=\"longest\",\n truncation=True,\n max_length=2000,\n )\n input_ids = encodings[\"input_ids\"].cuda()\n attention_mask = encodings[\"attention_mask\"].cuda()\n image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist()\n image_start_index_list = [[x] for x in image_start_index_list]\n image_nums = [1] * len(input_ids)\n with torch.inference_mode() and torch.cuda.amp.autocast(dtype=torch.float16):\n outputs = model.generate(\n batch_images,\n input_ids,\n attention_mask=attention_mask,\n max_new_tokens=10,\n min_length=1,\n num_beams=1,\n # length_penalty=0,\n image_start_index_list=image_start_index_list,\n image_nums=image_nums,\n added_bbox_list=None,\n return_dict_in_generate=True,\n output_scores=True,\n )\n scores = outputs.scores\n outputs = outputs.sequences[:, len(input_ids[0]) :]\n if object_token_id in scores[0][0].sort(descending=True).indices[:5]:\n sample = batch[0]\n # print(\"=\"*80)\n # print(\"sample:\", batch, scores[0][0].sort(descending=True).indices[:10].tolist().index(object_token_id))\n prompt1 = [f\"{tokenizer.bos_token}<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>Question: {sample['question'].strip()} Short answer:<|#object#|><|#previsual#|>\"]\n boxes, scores = get_bbox(None, batch_images, prompt1, model, tokenizer, media_token_id, prebox_token_id, return_all=True)\n # open_cv_image = np.array(sample[\"image\"])\n # open_cv_image = open_cv_image[:, :, ::-1].copy()\n # cv2.imwrite(f\"Atest_ori.png\", open_cv_image)\n # open_cv_image = cv2.rectangle(open_cv_image, boxes[0][:2].astype(int), boxes[0][2:].astype(int), (0, 255, 0), 2)\n # print(scores)\n # cv2.imwrite(f\"Atest.png\", open_cv_image)\n if boxes is not None and len(boxes) > 0:\n prompt2 = [f\"{tokenizer.bos_token}<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>Question: {sample['question'].strip()} Short answer: it is<|#object#|><|#previsual#|><|#prebox#|><|#object#|> a\"]\n encodings = tokenizer(\n prompt2,\n return_tensors=\"pt\",\n padding=\"longest\",\n truncation=True,\n max_length=2000,\n )\n input_ids = encodings[\"input_ids\"].cuda()\n attention_mask = encodings[\"attention_mask\"].cuda()\n image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist()\n image_start_index_list = [[x] for x in image_start_index_list]\n image_nums = [1] * len(input_ids)\n added_bbox_list = [torch.tensor(boxes[0]/224.0).cuda().unsqueeze(0).clamp(0, 0.99)]\n with torch.inference_mode() and torch.cuda.amp.autocast(dtype=torch.float16):\n outputs = model.generate(\n batch_images,\n input_ids,\n attention_mask=attention_mask,\n max_new_tokens=10,\n min_length=1,\n num_beams=1,\n image_start_index_list=image_start_index_list,\n image_nums=image_nums,\n added_bbox_list=added_bbox_list,\n eos_token_id=(endofobject_token_id),\n )\n outputs = outputs[:, len(input_ids[0]) :]\n # print(\"previsual===>{}\".format(tokenizer.decode(outputs[0], skip_special_tokens=True).strip().lower().strip(string.punctuation+\" \")))\n\n # postprocess begin\n new_predictions = [\n out.strip().lower().strip(string.punctuation+\" \") for out in tokenizer.batch_decode(outputs, skip_special_tokens=True)\n ]\n this_tot += 1\n predictions.extend(\n [\n {\"answer\": p, \"question_id\": sample[\"question_id\"], \"_question\": sample[\"question\"], \"answers\": sample[\"answers\"]}\n for p, sample in zip(new_predictions, batch)\n ]\n )\n with open(f\"{vqa_dataset}_{lang_encoder_name}_results_part{rank}_{id}.json\", \"w\") as f:\n f.write(json.dumps(predictions))\n print(\"save to\", f\"{vqa_dataset}_{lang_encoder_name}_results_part{rank}_{id}.json\")\n\n time.sleep(10)\n if world_size > 1:\n torch.distributed.barrier()\n if rank == 0:\n print(f\"evaluate on rank {rank}. world size is {world_size}\")\n predictions = []\n for rank_i in range(world_size):\n print(\"load\", f\"{vqa_dataset}_{lang_encoder_name}_results_part{rank_i}_{id}.json\")\n predictions.extend(json.load(open(f\"{vqa_dataset}_{lang_encoder_name}_results_part{rank_i}_{id}.json\")))\n os.remove(f\"{vqa_dataset}_{lang_encoder_name}_results_part{rank_i}_{id}.json\")\n print(\"num:\", len(predictions))\n # save the predictions to a temporary file\n random_uuid = str(uuid.uuid4())\n with open(f\"{vqa_dataset}results_{random_uuid}.json\", \"w\") as f:\n f.write(json.dumps(predictions, indent=4))\n\n acc = compute_gqa_accuracy(predictions)\n print(vqa_dataset, \"score:\", acc, \"| save to\", f\"{vqa_dataset}results_{random_uuid}.json\")\n os.makedirs(\"eval_results\", exist_ok=True)\n with open(os.path.join(\"eval_results\", f\"{vqa_dataset}_{model.expr_name}_{model.step_num}_{int(time.time())}_{acc}\"), \"w\") as f:\n f.write(json.dumps(predictions, indent=2))\n\n # delete the temporary file\n os.remove(f\"{vqa_dataset}results_{random_uuid}.json\")\n else:\n time.sleep(5)\n acc = 0.0\n if world_size > 1:\n torch.distributed.barrier()\n return acc"
}
] | import argparse
import json
import os
import random
import uuid
import time
import cv2
import webdataset as wds
import transformers
import more_itertools
import numpy as np
import torch
import base64
import string
import spacy
import hashlib
import pdb; pdb.set_trace()
from math import ceil
from collections import defaultdict
from typing import Callable
from accelerate import init_empty_weights, load_checkpoint_and_dispatch
from sklearn.metrics import recall_score, average_precision_score
from coco_metric import compute_cider, postprocess_captioning_generation
from eval_datasets import VQADataset
from tqdm import tqdm
from collections import Counter
from vqa_metric import compute_vqa_accuracy, compute_gqa_accuracy
from open_flamingo.src.factory import create_model_and_transforms
from PIL import Image
from io import BytesIO
from open_flamingo.train.distributed import init_distributed_device, world_info_from_env
from open_flamingo.eval.task.reg import evaluate_reg
from open_flamingo.eval.task.gqa import GQADataset
from open_flamingo.eval.task.vl_checklist import evaluate_vlc
from open_flamingo.eval.task.crepe import evaluate_crepe
from open_flamingo.eval.task.caption import evaluate_coco_flickr
from open_flamingo.eval.task.utils import is_correct, get_iou
from open_flamingo.eval.task.cola import evaluate_cola
from open_flamingo.eval.task.gqa import evaluate_gqa
from open_flamingo.eval.dataset_zoo import VG_Relation, VG_Attribution | 8,289 | )
parser.add_argument(
"--use_sam",
default=None,
type=str,
required=False,
)
parser.add_argument(
"--add_visual_token",
default=False,
action="store_true",
)
parser.add_argument(
"--use_format_v2",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_aro",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_pisc",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_reg",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_vlc",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_crepe",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_cola",
default=False,
action="store_true",
)
parser.add_argument(
"--level",
default=4,
type=int,
)
parser.add_argument(
"--type",
default="swap",
type=str,
)
parser.add_argument(
"--choose_left_right",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_exp",
default=False,
action="store_true",
)
def preprocess_image(sample, image_processor):
image = image_processor(sample)
if isinstance(image, transformers.image_processing_utils.BatchFeature):
image = torch.tensor(image["pixel_values"][0])
return image
class OKVQAPostProcess():
def __init__(self):
self._lemmatizer = None
def _lemmatize(self, answers):
def apply(answer):
doc = self.lemmatizer(answer)
words = []
for token in doc:
if token.pos_ in ["NOUN", "VERB"]:
words.append(token.lemma_)
else:
words.append(token.text)
answer = " ".join(words)
return answer
return [apply(answer) for answer in answers]
@property
def lemmatizer(self):
if self._lemmatizer is None:
try:
self._lemmatizer = spacy.load("en_core_web_sm")
except ImportError:
logging.error(
"""
Please install spacy and en_core_web_sm model to apply lemmatization.
python -m spacy download en_core_web_sm
OR
import spacy.cli
spacy.cli.download("en_core_web_sm")
"""
)
exit(1)
return self._lemmatizer
def main():
args = parser.parse_args()
if args.dist:
|
def expand2square(pil_img, background_color):
width, height = pil_img.size
if width == height:
return pil_img
elif width > height:
result = Image.new(pil_img.mode, (width, width), background_color)
result.paste(pil_img, (0, (width - height) // 2))
return result
else:
result = Image.new(pil_img.mode, (height, height), background_color)
result.paste(pil_img, ((height - width) // 2, 0))
return result
parser = argparse.ArgumentParser()
parser.add_argument("--lm_path", type=str, default="facebook/opt-1.3b")
parser.add_argument("--lm_tokenizer_path", type=str, default="facebook/opt-30b")
parser.add_argument("--vision_encoder_path", default="ViT-L-14", type=str)
parser.add_argument("--vision_encoder_pretrained", default="openai", type=str)
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument(
"--results_file", type=str, default=None, help="JSON file to save results"
)
# Trial arguments
parser.add_argument("--shots", nargs="+", default=[0, 4, 8, 16, 32], type=int)
parser.add_argument(
"--num_trials",
type=int,
default=1,
help="Number of trials to run for each shot using different demonstrations",
)
parser.add_argument(
"--trial_seeds",
nargs="+",
default=[0],
help="Seeds to use for each trial for picking demonstrations and eval sets",
)
parser.add_argument(
"--num_samples", type=int, default=5000, help="Number of samples to evaluate on"
)
parser.add_argument("--batch_size", type=int, default=8)
# Per-dataset evaluation flags
parser.add_argument(
"--eval_coco",
action="store_true",
default=False,
help="Whether to evaluate on COCO.",
)
parser.add_argument(
"--eval_vqav2",
action="store_true",
default=False,
help="Whether to evaluate on VQAV2.",
)
parser.add_argument(
"--eval_ok_vqa",
action="store_true",
default=False,
help="Whether to evaluate on OK-VQA.",
)
parser.add_argument(
"--eval_imagenet",
action="store_true",
default=False,
help="Whether to evaluate on ImageNet.",
)
parser.add_argument(
"--eval_flickr30",
action="store_true",
default=False,
help="Whether to evaluate on Flickr30.",
)
parser.add_argument(
"--eval_refcoco",
action="store_true",
default=False,
help="Whether to evaluate on RefCOCO.",
)
# Dataset arguments
## Flickr30 Dataset
parser.add_argument(
"--flickr_image_dir_path",
type=str,
help="Path to the flickr30/flickr30k_images directory.",
default=None,
)
parser.add_argument(
"--flickr_annotations_json_path",
type=str,
help="Path to the dataset_flickr30k_coco_style.json file.",
default=None,
)
## COCO Dataset
parser.add_argument(
"--coco_image_dir_path",
type=str,
help="Path to the flickr30/flickr30k_images directory.",
default=None,
)
parser.add_argument(
"--coco_annotations_json_path",
type=str,
default=None,
)
## VQAV2 Dataset
parser.add_argument(
"--vqav2_image_dir_path",
type=str,
default=None,
)
parser.add_argument(
"--vqav2_questions_json_path",
type=str,
default=None,
)
parser.add_argument(
"--vqav2_annotations_json_path",
type=str,
default=None,
)
## OK-VQA Dataset
parser.add_argument(
"--ok_vqa_image_dir_path",
type=str,
help="Path to the vqav2/train2014 directory.",
default=None,
)
parser.add_argument(
"--ok_vqa_questions_json_path",
type=str,
help="Path to the v2_OpenEnded_mscoco_train2014_questions.json file.",
default=None,
)
parser.add_argument(
"--ok_vqa_annotations_json_path",
type=str,
help="Path to the v2_mscoco_train2014_annotations.json file.",
default=None,
)
## Imagenet dataset
parser.add_argument("--imagenet_root", type=str, default="/tmp")
## RefCOCO dataset
parser.add_argument("--refcoco_tsvfile", type=str, default=None)
parser.add_argument(
"--location_token_num",
default=1000,
type=int,
)
# distributed training
parser.add_argument(
"--dist-url",
default="env://",
type=str,
help="url used to set up distributed training",
)
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--horovod",
default=False,
action="store_true",
help="Use horovod for distributed training.",
)
parser.add_argument(
"--no-set-device-rank",
default=False,
action="store_true",
help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).",
)
parser.add_argument(
"--dist",
default=False,
action="store_true",
)
parser.add_argument(
"--lora",
default=False,
action="store_true",
)
parser.add_argument(
"--lora_r",
default=16,
type=int,
required=False,
)
parser.add_argument(
"--legacy",
default=False,
action="store_true",
)
parser.add_argument(
"--special",
default=False,
action="store_true",
)
parser.add_argument(
"--id",
default=0,
type=int,
required=False,
)
parser.add_argument(
"--eval_gqa",
default=False,
action="store_true",
)
parser.add_argument(
"--use_sam",
default=None,
type=str,
required=False,
)
parser.add_argument(
"--add_visual_token",
default=False,
action="store_true",
)
parser.add_argument(
"--use_format_v2",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_aro",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_pisc",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_reg",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_vlc",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_crepe",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_cola",
default=False,
action="store_true",
)
parser.add_argument(
"--level",
default=4,
type=int,
)
parser.add_argument(
"--type",
default="swap",
type=str,
)
parser.add_argument(
"--choose_left_right",
default=False,
action="store_true",
)
parser.add_argument(
"--eval_exp",
default=False,
action="store_true",
)
def preprocess_image(sample, image_processor):
image = image_processor(sample)
if isinstance(image, transformers.image_processing_utils.BatchFeature):
image = torch.tensor(image["pixel_values"][0])
return image
class OKVQAPostProcess():
def __init__(self):
self._lemmatizer = None
def _lemmatize(self, answers):
def apply(answer):
doc = self.lemmatizer(answer)
words = []
for token in doc:
if token.pos_ in ["NOUN", "VERB"]:
words.append(token.lemma_)
else:
words.append(token.text)
answer = " ".join(words)
return answer
return [apply(answer) for answer in answers]
@property
def lemmatizer(self):
if self._lemmatizer is None:
try:
self._lemmatizer = spacy.load("en_core_web_sm")
except ImportError:
logging.error(
"""
Please install spacy and en_core_web_sm model to apply lemmatization.
python -m spacy download en_core_web_sm
OR
import spacy.cli
spacy.cli.download("en_core_web_sm")
"""
)
exit(1)
return self._lemmatizer
def main():
args = parser.parse_args()
if args.dist: | args.local_rank, args.rank, args.world_size = world_info_from_env() | 2 | 2023-11-07 04:23:57+00:00 | 12k |
HKU-BAL/ClairS-TO | src/extract_candidates_calling.py | [
{
"identifier": "VcfReader",
"path": "shared/vcf.py",
"snippet": "class VcfReader(object):\n def __init__(self, vcf_fn,\n ctg_name=None,\n ctg_start=None,\n ctg_end=None,\n is_var_format=False,\n is_happy_format=False,\n is_fp=None,\n show_ref=True,\n direct_open=False,\n keep_row_str=False,\n skip_genotype=False,\n filter_tag=None,\n taf_filter=None,\n save_header=False,\n min_qual=None,\n max_qual=None,\n discard_indel=False,\n keep_af=False):\n self.vcf_fn = vcf_fn\n self.ctg_name = ctg_name\n self.ctg_start = ctg_start\n self.ctg_end = ctg_end\n self.variant_dict = defaultdict(Position)\n self.is_var_format = is_var_format\n self.is_happy_format = is_happy_format\n self.is_fp = is_fp\n self.show_ref = show_ref\n self.direct_open = direct_open\n self.keep_row_str = keep_row_str\n self.skip_genotype = skip_genotype\n self.filter_tag = filter_tag # PASS;HighConf PASS;MedConf in hcc1395\n self.taf_filter = taf_filter\n self.header = \"\"\n self.save_header = save_header\n self.discard_indel = discard_indel\n self.min_qual = min_qual\n self.max_qual = max_qual\n self.keep_af = keep_af\n\n def read_vcf(self):\n is_ctg_region_provided = self.ctg_start is not None and self.ctg_end is not None\n\n if self.vcf_fn is None or not os.path.exists(self.vcf_fn):\n return\n\n header_last_column = []\n if self.direct_open:\n vcf_fp = open(self.vcf_fn)\n vcf_fo = vcf_fp\n else:\n vcf_fp = subprocess_popen(shlex.split(\"gzip -fdc %s\" % (self.vcf_fn)))\n vcf_fo = vcf_fp.stdout\n for row in vcf_fo:\n columns = row.strip().split()\n if columns[0][0] == \"#\":\n if self.save_header:\n self.header += row\n header_last_column = columns\n continue\n\n tumor_in_last = True if len(header_last_column) and header_last_column[\n -1].rstrip().lower() == \"tumor\" else False\n # position in vcf is 1-based\n chromosome, position = columns[0], columns[1]\n if self.ctg_name is not None and chromosome != self.ctg_name:\n continue\n if is_ctg_region_provided and not (self.ctg_start <= int(position) <= self.ctg_end):\n continue\n\n FILTER = columns[6] if len(columns) >= 7 else None\n if self.filter_tag is not None:\n filter_list = self.filter_tag.split(',')\n if sum([1 if filter == FILTER else 0 for filter in filter_list]) == 0:\n continue\n self.is_var_format = True if columns[2][0] in 'ACGT' else False\n self.is_var_format = False\n if self.is_var_format:\n reference, alternate = columns[2], columns[3]\n genotype_1 = int(columns[4])\n genotype_2 = int(columns[5])\n else:\n reference, alternate, last_column = columns[3], columns[4], columns[-1]\n\n if self.discard_indel and (len(reference) > 1 or len(alternate) > 1):\n continue\n\n try:\n qual = columns[5] if len(columns) > 5 else None\n\n if self.min_qual is not None and float(qual) < self.min_qual:\n continue\n\n if self.max_qual is not None and float(qual) > self.max_qual:\n continue\n except:\n qual = None\n\n last_column = last_column if not tumor_in_last else columns[-2]\n if self.is_happy_format and self.is_fp:\n last_column = columns[10]\n if self.is_happy_format and not self.is_fp:\n last_column = columns[9]\n genotype = last_column.split(\":\")[0].replace(\"/\", \"|\").replace(\".\", \"0\").split(\"|\")\n try:\n genotype_1, genotype_2 = genotype\n\n if int(genotype_1) > int(genotype_2):\n genotype_1, genotype_2 = genotype_2, genotype_1\n\n # remove * to guarentee vcf match\n if '*' in alternate:\n alternate = alternate.split(',')\n if int(genotype_1) + int(genotype_2) != 3 or len(alternate) != 2:\n print('error with variant representation')\n continue\n alternate = ''.join([alt_base for alt_base in alternate if alt_base != '*'])\n # * always have a genotype 1/2\n\n genotype_1, genotype_2 = '0', '1'\n except:\n genotype_1 = -1\n genotype_2 = -1\n if self.keep_af:\n tag_list = columns[8].split(':')\n if 'AF' in tag_list or 'VAF' in tag_list:\n taf_index = tag_list.index('AF') if 'AF' in tag_list else tag_list.index('VAF')\n taf = float(columns[9].split(':')[taf_index])\n else:\n taf = None\n else:\n taf = None\n position = int(position)\n have_extra_infos = 'VT' in row\n\n if genotype_1 == \"0\" and genotype_2 == \"0\" and not self.show_ref and not self.skip_genotype:\n continue\n extra_infos = columns[-1].split(':')[-1] if have_extra_infos else ''\n row_str = row if self.keep_row_str else False\n key = (chromosome, position) if self.ctg_name is None else position\n\n self.variant_dict[key] = Position(ctg_name=chromosome,\n pos=position,\n ref_base=reference,\n alt_base=alternate,\n genotype1=int(genotype_1),\n genotype2=int(genotype_2),\n qual=qual,\n row_str=row_str,\n af=taf,\n filter=FILTER,\n extra_infos=extra_infos)\n\n def get_alt_info(self, pos, extra_info=\"\"):\n pos = int(pos)\n if pos not in self.variant_dict:\n return \"\"\n ref_base = self.variant_dict[pos].reference_bases\n alt_base = ','.join(self.variant_dict[pos].alternate_bases)\n gentoype_str = '/'.join([str(g) for g in self.variant_dict[pos].genotype])\n extra_info = self.variant_dict[pos].extra_infos if self.variant_dict[pos].extra_infos != \"\" else extra_info\n return extra_info + '_' + ref_base + '_' + alt_base + '_' + gentoype_str"
},
{
"identifier": "VcfWriter",
"path": "shared/vcf.py",
"snippet": "class VcfWriter(object):\n def __init__(self,\n vcf_fn,\n ctg_name=None,\n ref_fn=None,\n sample_name=\"SAMPLE\",\n write_header=True,\n header=None,\n cmdline=None,\n show_ref_calls=False):\n self.vcf_fn = vcf_fn\n self.show_ref_calls = show_ref_calls\n # make directory if not exist\n vcf_folder = os.path.dirname(self.vcf_fn)\n if not os.path.exists(vcf_folder):\n print(\"[INFO] Output VCF folder {} not found, create it\".format(vcf_folder))\n return_code = run(\"mkdir -p {}\".format(vcf_folder), shell=True)\n\n self.vcf_writer = open(self.vcf_fn, 'w')\n self.ref_fn = ref_fn\n self.ctg_name = ctg_name\n if ctg_name is not None:\n self.ctg_name_list = ctg_name.split(',') if ',' in ctg_name else [ctg_name]\n else:\n self.ctg_name_list = None\n self.sample_name = sample_name\n if write_header:\n self.write_header(ref_fn=ref_fn, header=header, cmdline=cmdline)\n\n def close(self):\n try:\n self.vcf_writer.close()\n except:\n pass\n\n def write_header(self, ctg_name=None, ref_fn=None, header=None, cmdline=None):\n header = vcf_header if header is None else header\n if cmdline is not None and cmdline != \"\":\n header_list = header.rstrip('\\n').split('\\n')\n insert_index = 3 if len(header_list) >= 3 else len(header_list) - 1\n header_list.insert(insert_index, \"##cmdline={}\".format(cmdline))\n header = \"\\n\".join(header_list) + '\\n'\n if self.ref_fn is not None:\n reference_index_file_path = file_path_from(self.ref_fn, suffix=\".fai\", exit_on_not_found=True, sep='.')\n with open(reference_index_file_path, \"r\") as fai_fp:\n for row in fai_fp:\n columns = row.strip().split(\"\\t\")\n contig_name, contig_size = columns[0], columns[1]\n if self.ctg_name_list is not None and contig_name not in self.ctg_name_list:\n continue\n header += \"##contig=<ID=%s,length=%s>\\n\" % (contig_name, contig_size)\n\n header += '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\n' % (self.sample_name)\n\n self.vcf_writer.write(header)\n\n def write_row(self,\n POS=None,\n REF=None,\n ALT=None,\n QUAL=0,\n GT='0/0',\n DP=0,\n AF=0,\n AD=None,\n CHROM=None,\n GQ=None,\n ID='.',\n FILTER=\".\",\n INFO='.',\n TAF=None,\n VT=None,\n TDP=None,\n AU=None,\n CU=None,\n GU=None,\n TU=None,\n row_str=None):\n if row_str is not None:\n self.vcf_writer.write(row_str)\n return\n GQ = GQ if GQ else QUAL\n CHROM = CHROM if CHROM else self.ctg_name\n if not self.show_ref_calls and (GT == \"0/0\" or GT == \"./.\"):\n return\n FORMAT = \"GT:GQ:DP:AF\"\n FORMAT_V = \"%s:%.4f:%d:%.4f\" % (GT, GQ, DP, AF)\n basic_vcf_format = \"%s\\t%d\\t%s\\t%s\\t%s\\t%.4f\\t%s\\t%s\" % (\n CHROM,\n int(POS),\n ID,\n REF,\n ALT,\n QUAL,\n FILTER,\n INFO\n )\n if AD is not None and AD != \"\":\n FORMAT += \":AD\"\n FORMAT_V += \":%s\" % (AD)\n if TAF is not None:\n FORMAT += \":TAF\"\n FORMAT_V += \":%.4f\" % (TAF)\n if TDP is not None:\n FORMAT += \":TDP\"\n FORMAT_V += \":%d\" % (TDP)\n if AU is not None and CU is not None and GU is not None and TU is not None:\n FORMAT += \":AU:CU:GU:TU\"\n FORMAT_V += \":%d:%d:%d:%d\" % (AU, CU, GU, TU)\n\n if VT is not None:\n FORMAT += \":VT\"\n FORMAT_V += \":%s\" % (VT)\n vcf_format = '\\t'.join([basic_vcf_format, FORMAT, FORMAT_V]) + \"\\n\"\n\n self.vcf_writer.write(vcf_format)"
},
{
"identifier": "subprocess_popen",
"path": "shared/utils.py",
"snippet": "def subprocess_popen(args, stdin=None, stdout=PIPE, stderr=stderr, bufsize=8388608):\n return Popen(args, stdin=stdin, stdout=stdout, stderr=stderr, bufsize=bufsize, universal_newlines=True)"
},
{
"identifier": "file_path_from",
"path": "shared/utils.py",
"snippet": "def file_path_from(file_name, suffix=\"\", exit_on_not_found=False, sep=\"\", allow_none=False, is_directory=False):\n if allow_none and file_name is None:\n return None\n if is_directory:\n is_folder_exists(file_name, suffix)\n if exit_on_not_found:\n exit(log_error(\"[ERROR] directory %s not found\" % (file_name + suffix)))\n if is_file_exists(file_name, suffix):\n return abspath(file_name + suffix)\n #allow fn.bam.bai->fn.bai fn.fa.fai->fn.fai\n elif sep != \"\" and len(sep) == 1:\n file_name_remove_suffix = sep.join(file_name.split(sep)[:-1])\n if is_file_exists(file_name_remove_suffix, suffix):\n return abspath(file_name_remove_suffix + suffix)\n if exit_on_not_found:\n exit(log_error(\"[ERROR] file %s not found\" % (file_name + suffix)))\n return None"
},
{
"identifier": "region_from",
"path": "shared/utils.py",
"snippet": "def region_from(ctg_name, ctg_start=None, ctg_end=None):\n \"\"\"\n 1-based region string [start, end]\n \"\"\"\n if ctg_name is None:\n return \"\"\n if (ctg_start is None) != (ctg_end is None):\n return \"\"\n\n if ctg_start is None and ctg_end is None:\n return \"{}\".format(ctg_name)\n return \"{}:{}-{}\".format(ctg_name, ctg_start, ctg_end)"
},
{
"identifier": "reference_sequence_from",
"path": "shared/utils.py",
"snippet": "def reference_sequence_from(samtools_execute_command, fasta_file_path, regions):\n refernce_sequences = []\n region_value_for_faidx = \" \".join(regions)\n\n samtools_faidx_process = subprocess_popen(\n shlex.split(\"{} faidx {} {}\".format(samtools_execute_command, fasta_file_path, region_value_for_faidx))\n )\n while True:\n row = samtools_faidx_process.stdout.readline()\n is_finish_reading_output = row == '' and samtools_faidx_process.poll() is not None\n if is_finish_reading_output:\n break\n if row:\n refernce_sequences.append(row.rstrip())\n\n # first line is reference name \">xxxx\", need to be ignored\n reference_sequence = \"\".join(refernce_sequences[1:])\n\n # uppercase for masked sequences\n reference_sequence = reference_sequence.upper()\n\n samtools_faidx_process.stdout.close()\n samtools_faidx_process.wait()\n if samtools_faidx_process.returncode != 0:\n return None\n\n return reference_sequence"
},
{
"identifier": "str2bool",
"path": "shared/utils.py",
"snippet": "def str2bool(v):\n if v is None:\n return v\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'ture', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'flase', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')"
},
{
"identifier": "str_none",
"path": "shared/utils.py",
"snippet": "def str_none(v):\n if v is None:\n return None\n if v.upper() == \"NONE\":\n return None\n if isinstance(v, str):\n return v"
},
{
"identifier": "bed_tree_from",
"path": "shared/interval_tree.py",
"snippet": "def bed_tree_from(bed_file_path,\n expand_region=None,\n contig_name=None,\n bed_ctg_start=None,\n bed_ctg_end=None,\n return_bed_region=False,\n padding=None,\n region=None):\n \"\"\"\n 0-based interval tree [start, end)\n \"\"\"\n\n tree = {}\n if region is not None:\n try:\n ctg_name, start_end = region.split(':')\n ctg_start, ctg_end = int(start_end.split('-')[0]) - 1, int(start_end.split('-')[1]) - 1 # bed format\n except:\n sys.exit(\"[ERROR] Please input the correct format for --region ctg_name:start-end, your input is {}\".format(region))\n if ctg_end < ctg_start or ctg_start < 0 or ctg_end < 0:\n sys.exit(\"[ERROR] Invalid region input: {}\".format(region))\n\n if ctg_name not in tree:\n tree[ctg_name] = IntervalTree()\n tree[ctg_name].addi(ctg_start, ctg_end)\n if return_bed_region:\n return tree, None, None\n return tree\n\n if bed_file_path is None or bed_file_path == \"\":\n if return_bed_region:\n return tree, None, None\n return tree\n\n bed_start, bed_end = float('inf'), 0\n unzip_process = subprocess_popen(shlex.split(\"gzip -fdc %s\" % (bed_file_path)))\n for row_id, row in enumerate(unzip_process.stdout):\n if row[0] == '#':\n continue\n columns = row.strip().split()\n\n ctg_name = columns[0]\n if contig_name != None and ctg_name != contig_name:\n continue\n if ctg_name not in tree:\n tree[ctg_name] = IntervalTree()\n\n ctg_start, ctg_end = int(columns[1]), int(columns[2])\n\n if ctg_end < ctg_start or ctg_start < 0 or ctg_end < 0:\n sys.exit(\"[ERROR] Invalid bed input in {}-th row {} {} {}\".format(row_id+1, ctg_name, ctg_start, ctg_end))\n\n if bed_ctg_start and bed_ctg_end:\n if ctg_end < bed_ctg_start or ctg_start > bed_ctg_end:\n continue\n if padding:\n ctg_start += padding\n ctg_end -= padding\n bed_start = min(ctg_start, bed_start)\n bed_end = max(ctg_end, bed_end)\n if ctg_start == ctg_end:\n ctg_end += 1\n\n tree[ctg_name].addi(ctg_start, ctg_end)\n\n unzip_process.stdout.close()\n unzip_process.wait()\n if return_bed_region:\n return tree, bed_start, bed_end\n return tree"
},
{
"identifier": "is_region_in",
"path": "shared/interval_tree.py",
"snippet": "def is_region_in(tree, contig_name, region_start=None, region_end=None):\n if not tree or (contig_name is None) or (contig_name not in tree):\n return False\n\n interval_tree = tree[contig_name]\n return len(\n interval_tree.at(region_start)\n if region_end is None else\n interval_tree.overlap(begin=region_start, end=region_end)\n ) > 0"
}
] | import sys
import shlex
import os
import logging
import subprocess
import shared.param as param
from argparse import ArgumentParser, SUPPRESS
from collections import Counter, defaultdict
from shared.vcf import VcfReader, VcfWriter
from shared.utils import subprocess_popen, file_path_from, region_from, \
reference_sequence_from, str2bool, str_none
from shared.interval_tree import bed_tree_from, is_region_in | 7,954 | split_bed_size = param.split_bed_size
candidates_folder = args.candidates_folder
min_coverage = args.min_coverage
platform = args.platform
store_tumor_infos = args.store_tumor_infos
alt_fn = args.alt_fn
confident_bed_fn = file_path_from(args.bed_fn, allow_none=True, exit_on_not_found=False)
is_confident_bed_file_given = confident_bed_fn is not None
min_mapping_quality = args.min_mq
min_base_quality = args.min_bq
flankingBaseNum = param.flankingBaseNum if args.flanking is None else args.flanking
no_of_positions = 2 * flankingBaseNum + 1
genotyping_mode_vcf_fn = args.genotyping_mode_vcf_fn
hybrid_mode_vcf_fn = args.hybrid_mode_vcf_fn
truth_vcf_fn = args.truth_vcf_fn
is_truth_vcf_provided = truth_vcf_fn is not None
select_indel_candidates = args.select_indel_candidates
candidates_set = set()
indel_candidates_list = []
snv_candidates_set = set()
indel_candidates_set = set()
truths_variant_dict = {}
if is_truth_vcf_provided:
unified_vcf_reader = VcfReader(vcf_fn=truth_vcf_fn, ctg_name=ctg_name, is_var_format=False)
unified_vcf_reader.read_vcf()
truths_variant_dict = unified_vcf_reader.variant_dict
candidates_pos_set = set()
add_read_regions = True
hybrid_candidate_set = set()
indel_hybrid_candidate_set = set()
if hybrid_mode_vcf_fn is not None or genotyping_mode_vcf_fn is not None:
vcf_fn = hybrid_mode_vcf_fn if hybrid_mode_vcf_fn is not None else genotyping_mode_vcf_fn
vcf_reader = VcfReader(vcf_fn=vcf_fn, ctg_name=ctg_name, is_var_format=False)
vcf_reader.read_vcf()
hybrid_variant_dict = vcf_reader.variant_dict
for k, v in hybrid_variant_dict.items():
ref_base, alt_base = v.reference_bases, v.alternate_bases[0]
if len(ref_base) > 1 or len(alt_base) > 1:
if select_indel_candidates:
indel_hybrid_candidate_set.add(k)
candidates_set.add(k)
hybrid_candidate_set.add(k)
hybrid_info_dict = defaultdict(AltInfo)
fai_fn = file_path_from(fasta_file_path, suffix=".fai", exit_on_not_found=True, sep='.')
if chunk_id is not None:
"""
Whole genome calling option, acquire contig start end position from reference fasta index(.fai), then split the
reference accroding to chunk id and total chunk numbers.
"""
if is_confident_bed_file_given:
# consistent with pileup generation, faster to extract tensor using bed region
tree, bed_start, bed_end = bed_tree_from(bed_file_path=confident_bed_fn,
contig_name=ctg_name,
return_bed_region=True)
chunk_size = (bed_end - bed_start) // chunk_num + 1 if (bed_end - bed_start) % chunk_num else (
bed_end - bed_start) // chunk_num
ctg_start = bed_start + 1 + chunk_size * chunk_id # 0-base to 1-base
ctg_end = ctg_start + chunk_size
else:
contig_length = 0
with open(fai_fn, 'r') as fai_fp:
for row in fai_fp:
columns = row.strip().split("\t")
contig_name = columns[0]
if contig_name != ctg_name:
continue
contig_length = int(columns[1])
chunk_size = contig_length // chunk_num + 1 if contig_length % chunk_num else contig_length // chunk_num
ctg_start = chunk_size * chunk_id # 0-base to 1-base
ctg_end = ctg_start + chunk_size
candidates_pos_set = set([item for item in candidates_pos_set if item >= ctg_start and item <= ctg_end])
# 1-based regions [start, end] (start and end inclusive)
ref_regions = []
reads_regions = []
is_ctg_name_given = ctg_name is not None
is_ctg_range_given = is_ctg_name_given and ctg_start is not None and ctg_end is not None
if is_ctg_range_given:
extend_start = max(ctg_start - ( no_of_positions), 1)
extend_end = ctg_end + no_of_positions
reads_regions.append(region_from(ctg_name=ctg_name, ctg_start=extend_start, ctg_end=extend_end))
reference_start, reference_end = ctg_start - param.expandReferenceRegion, ctg_end + param.expandReferenceRegion
reference_start = 1 if reference_start < 1 else reference_start
ref_regions.append(region_from(ctg_name=ctg_name, ctg_start=reference_start, ctg_end=reference_end))
elif is_ctg_name_given:
reads_regions.append(region_from(ctg_name=ctg_name))
ref_regions.append(region_from(ctg_name=ctg_name))
reference_start = 1
reference_sequence = reference_sequence_from(
samtools_execute_command=samtools_execute_command,
fasta_file_path=fasta_file_path,
regions=ref_regions
)
if reference_sequence is None or len(reference_sequence) == 0:
sys.exit("[ERROR] Failed to load reference sequence from file ({}).".format(fasta_file_path))
mq_option = ' --min-MQ {}'.format(min_mapping_quality)
bq_option = ' --min-BQ {}'.format(min_base_quality)
read_name_option = ' --output-QNAME' if store_tumor_infos else ' '
bed_option = ' -l {}'.format(
confident_bed_fn) if is_confident_bed_file_given else ""
flags_option = ' --excl-flags {} '.format(param.SAMTOOLS_VIEW_FILTER_FLAG)
max_depth_option = ' --max-depth {} '.format(args.max_depth) if args.max_depth is not None else " "
reads_regions_option = ' -r {}'.format(" ".join(reads_regions)) if add_read_regions else ""
stdin = None if tumor_bam_file_path != "PIPE" else sys.stdin
tumor_bam_file_path = tumor_bam_file_path if tumor_bam_file_path != "PIPE" else "-"
samtools_command = samtools_execute_command + " mpileup --reverse-del" + read_name_option + reads_regions_option + \
mq_option + bq_option + bed_option + flags_option + max_depth_option
| # BSD 3-Clause License
#
# Copyright 2023 The University of Hong Kong, Department of Computer Science
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
logging.basicConfig(format='%(message)s', level=logging.INFO)
class AltInfo(object):
def __init__(self, ref_base='', tumor_alt_info=""):
self.ref_base = ref_base
self.tumor_alt_info = tumor_alt_info
def decode_pileup_bases(pileup_bases,
reference_base,
min_coverage,
minimum_snv_af_for_candidate,
minimum_indel_af_for_candidate,
alternative_base_num,
has_pileup_candidates,
read_name_list,
is_tumor,
select_indel_candidates=False,
platform="ont"):
"""
Decode mpileup input string.
pileup_bases: pileup base string for each position, include all mapping information.
reference_base: upper reference base for cigar calculation.
pileup_dict: dictionary (pos: pos info) which keep read information that cover specific position.
ref_seq: chunked reference sequence in window, start: center pos - flankingBaseNum, end: center + flankingBaseNum + 1.
reference_sequence: reference sequence index by contig:start-end. 0-based.
minimum_af_for_candidate: default minimum alleic frequency for candidate filtering, filter if below specific thredshold.
has_pileup_candidates: if the candidate is directly obtained from pileup output, then no need to check the af filtering.
"""
base_idx = 0
base_list = []
while base_idx < len(pileup_bases):
base = pileup_bases[base_idx]
if base == '+' or base == '-':
base_idx += 1
advance = 0
while True:
num = pileup_bases[base_idx]
if num.isdigit():
advance = advance * 10 + int(num)
base_idx += 1
else:
break
base_list[-1][1] = base + pileup_bases[base_idx: base_idx + advance] # add indel seq
base_idx += advance - 1
elif base in "ACGTNacgtn#*":
base_list.append([base, ""])
elif base == '^': # start of read, next base is mq, update mq info
base_idx += 1
# skip $, the end of read
base_idx += 1
pileup_dict = defaultdict(int)
base_counter = Counter([''.join(item) for item in base_list])
alt_dict = dict(Counter([''.join(item).upper() for item in base_list]))
tumor_alt_dict = dict(Counter([''.join(item).upper() for item, read_name in zip(base_list, read_name_list) if
read_name.startswith('t')])) if is_tumor else None
depth = 0
for key, count in base_counter.items():
if key[0].upper() in 'ACGT':
pileup_dict[key[0].upper()] += count
depth += count
elif key[0] in "#*":
depth += count
if len(key) > 1 and key[1] == '+':
if select_indel_candidates:
pileup_dict['I' + key[0].upper() + key[2:].upper()] += count
else:
pileup_dict['I'] += count
elif len(key) > 1 and key[1] == '-':
if select_indel_candidates:
pileup_dict['D' + len(key[2:]) * "N"] += count
else:
pileup_dict['D'] += count
denominator = depth if depth > 0 else 1
pileup_list = sorted(list(pileup_dict.items()), key=lambda x: x[1], reverse=True)
pass_snv_af = False
pass_indel_af = False
pass_depth = depth > min_coverage
for item, count in pileup_list:
if item == reference_base:
continue
elif item[0] in 'ID':
if select_indel_candidates:
pass_indel_af = (pass_indel_af or (float(count) / denominator >= minimum_indel_af_for_candidate and (
alternative_base_num is not None and count >= alternative_base_num)))
continue
pass_snv_af = pass_snv_af or (float(count) / denominator >= minimum_snv_af_for_candidate) and (
alternative_base_num is not None and count >= alternative_base_num)
af = (float(pileup_list[1][1]) / denominator) if len(pileup_list) > 1 else 0.0
af = (float(pileup_list[0][1]) / denominator) if len(pileup_list) >= 1 and pileup_list[0][
0] != reference_base else af
pass_af = (pass_snv_af or pass_indel_af) and pass_depth
alt_list = sorted(list(alt_dict.items()), key=lambda x: x[1], reverse=True)
alt_list = [[item[0], str(round(item[1] / denominator, 3))] for item in alt_list if
item[0].upper() != reference_base]
if not pass_af:
return base_list, depth, pass_af, af, "", "", "", alt_list, pass_snv_af, pass_indel_af, pileup_list
pileup_list = [[item[0], str(round(item[1] / denominator, 3))] for item in pileup_list]
af_infos = ','.join([item[1] for item in pileup_list if item[0] != reference_base])
pileup_infos = ' '.join([item[0] + ':' + item[1] for item in alt_list])
if tumor_alt_dict is not None:
tumor_alt_list = sorted(list(tumor_alt_dict.items()), key=lambda x: x[1], reverse=True)
tumor_alt_list = [[item[0], str(round(item[1] / denominator, 3))] for item in tumor_alt_list]
tumor_pileup_infos = ' '.join([item[0] + ':' + item[1] for item in tumor_alt_list])
else:
tumor_pileup_infos = ""
return base_list, depth, pass_af, af, af_infos, pileup_infos, tumor_pileup_infos, alt_list, pass_snv_af, pass_indel_af, pileup_list
def extract_pair_candidates(args):
ctg_start = args.ctg_start
ctg_end = args.ctg_end
fasta_file_path = args.ref_fn
ctg_name = args.ctg_name
samtools_execute_command = args.samtools
output_depth = args.output_depth
output_alt_info = args.output_alt_info
tumor_bam_file_path = args.tumor_bam_fn
chunk_id = args.chunk_id - 1 if args.chunk_id else None # 1-base to 0-base
chunk_num = args.chunk_num
minimum_snv_af_for_candidate = args.snv_min_af
minimum_indel_af_for_candidate = args.indel_min_af
minimum_snv_af_for_truth = args.min_truth_snv_af
minimum_indel_af_for_truth = args.min_truth_snv_af
alternative_base_num = args.alternative_base_num
split_bed_size = param.split_bed_size
candidates_folder = args.candidates_folder
min_coverage = args.min_coverage
platform = args.platform
store_tumor_infos = args.store_tumor_infos
alt_fn = args.alt_fn
confident_bed_fn = file_path_from(args.bed_fn, allow_none=True, exit_on_not_found=False)
is_confident_bed_file_given = confident_bed_fn is not None
min_mapping_quality = args.min_mq
min_base_quality = args.min_bq
flankingBaseNum = param.flankingBaseNum if args.flanking is None else args.flanking
no_of_positions = 2 * flankingBaseNum + 1
genotyping_mode_vcf_fn = args.genotyping_mode_vcf_fn
hybrid_mode_vcf_fn = args.hybrid_mode_vcf_fn
truth_vcf_fn = args.truth_vcf_fn
is_truth_vcf_provided = truth_vcf_fn is not None
select_indel_candidates = args.select_indel_candidates
candidates_set = set()
indel_candidates_list = []
snv_candidates_set = set()
indel_candidates_set = set()
truths_variant_dict = {}
if is_truth_vcf_provided:
unified_vcf_reader = VcfReader(vcf_fn=truth_vcf_fn, ctg_name=ctg_name, is_var_format=False)
unified_vcf_reader.read_vcf()
truths_variant_dict = unified_vcf_reader.variant_dict
candidates_pos_set = set()
add_read_regions = True
hybrid_candidate_set = set()
indel_hybrid_candidate_set = set()
if hybrid_mode_vcf_fn is not None or genotyping_mode_vcf_fn is not None:
vcf_fn = hybrid_mode_vcf_fn if hybrid_mode_vcf_fn is not None else genotyping_mode_vcf_fn
vcf_reader = VcfReader(vcf_fn=vcf_fn, ctg_name=ctg_name, is_var_format=False)
vcf_reader.read_vcf()
hybrid_variant_dict = vcf_reader.variant_dict
for k, v in hybrid_variant_dict.items():
ref_base, alt_base = v.reference_bases, v.alternate_bases[0]
if len(ref_base) > 1 or len(alt_base) > 1:
if select_indel_candidates:
indel_hybrid_candidate_set.add(k)
candidates_set.add(k)
hybrid_candidate_set.add(k)
hybrid_info_dict = defaultdict(AltInfo)
fai_fn = file_path_from(fasta_file_path, suffix=".fai", exit_on_not_found=True, sep='.')
if chunk_id is not None:
"""
Whole genome calling option, acquire contig start end position from reference fasta index(.fai), then split the
reference accroding to chunk id and total chunk numbers.
"""
if is_confident_bed_file_given:
# consistent with pileup generation, faster to extract tensor using bed region
tree, bed_start, bed_end = bed_tree_from(bed_file_path=confident_bed_fn,
contig_name=ctg_name,
return_bed_region=True)
chunk_size = (bed_end - bed_start) // chunk_num + 1 if (bed_end - bed_start) % chunk_num else (
bed_end - bed_start) // chunk_num
ctg_start = bed_start + 1 + chunk_size * chunk_id # 0-base to 1-base
ctg_end = ctg_start + chunk_size
else:
contig_length = 0
with open(fai_fn, 'r') as fai_fp:
for row in fai_fp:
columns = row.strip().split("\t")
contig_name = columns[0]
if contig_name != ctg_name:
continue
contig_length = int(columns[1])
chunk_size = contig_length // chunk_num + 1 if contig_length % chunk_num else contig_length // chunk_num
ctg_start = chunk_size * chunk_id # 0-base to 1-base
ctg_end = ctg_start + chunk_size
candidates_pos_set = set([item for item in candidates_pos_set if item >= ctg_start and item <= ctg_end])
# 1-based regions [start, end] (start and end inclusive)
ref_regions = []
reads_regions = []
is_ctg_name_given = ctg_name is not None
is_ctg_range_given = is_ctg_name_given and ctg_start is not None and ctg_end is not None
if is_ctg_range_given:
extend_start = max(ctg_start - ( no_of_positions), 1)
extend_end = ctg_end + no_of_positions
reads_regions.append(region_from(ctg_name=ctg_name, ctg_start=extend_start, ctg_end=extend_end))
reference_start, reference_end = ctg_start - param.expandReferenceRegion, ctg_end + param.expandReferenceRegion
reference_start = 1 if reference_start < 1 else reference_start
ref_regions.append(region_from(ctg_name=ctg_name, ctg_start=reference_start, ctg_end=reference_end))
elif is_ctg_name_given:
reads_regions.append(region_from(ctg_name=ctg_name))
ref_regions.append(region_from(ctg_name=ctg_name))
reference_start = 1
reference_sequence = reference_sequence_from(
samtools_execute_command=samtools_execute_command,
fasta_file_path=fasta_file_path,
regions=ref_regions
)
if reference_sequence is None or len(reference_sequence) == 0:
sys.exit("[ERROR] Failed to load reference sequence from file ({}).".format(fasta_file_path))
mq_option = ' --min-MQ {}'.format(min_mapping_quality)
bq_option = ' --min-BQ {}'.format(min_base_quality)
read_name_option = ' --output-QNAME' if store_tumor_infos else ' '
bed_option = ' -l {}'.format(
confident_bed_fn) if is_confident_bed_file_given else ""
flags_option = ' --excl-flags {} '.format(param.SAMTOOLS_VIEW_FILTER_FLAG)
max_depth_option = ' --max-depth {} '.format(args.max_depth) if args.max_depth is not None else " "
reads_regions_option = ' -r {}'.format(" ".join(reads_regions)) if add_read_regions else ""
stdin = None if tumor_bam_file_path != "PIPE" else sys.stdin
tumor_bam_file_path = tumor_bam_file_path if tumor_bam_file_path != "PIPE" else "-"
samtools_command = samtools_execute_command + " mpileup --reverse-del" + read_name_option + reads_regions_option + \
mq_option + bq_option + bed_option + flags_option + max_depth_option
| samtools_mpileup_process = subprocess_popen( | 2 | 2023-11-07 04:39:16+00:00 | 12k |
sb-ai-lab/HypEx | tests/test_aa.py | [
{
"identifier": "AATest",
"path": "hypex/ab_test/aa_tester.py",
"snippet": "class AATest:\n \"\"\"\n A class for conducting AA testing (random split testing) to assess the\n statistical uniform of two samples.\n\n AA testing is used to validate that the splitting mechanism of an A/B test\n is unbiased and random. This class supports various statistical methods to\n evaluate the equivalence of two randomly split samples in terms of various\n metrics.\n\n Attributes:\n target_fields (Union[Iterable[str], str]): Target column names to analyze. This fields should be numeric.\n group_cols (Union[str, Iterable[str]]): Column names used for grouping. This fields should be categorical. It's a field for stratification. Stratification - the way to divide groups with equal number of categories in each of them.\n info_cols (Union[str, Iterable[str]]): Column names for additional information.\n quant_field (str): Name of the column for quantization. This fields should be categorical. A quantum is a category that passes entirely into one of the groups.\n mode (str): Mode of the AA test. Options are 'simple' and 'balanced'. 'simple' - naively splits groups in half. 'balanced' - separation with quantum balancing at each step (only used if a quantization field is specified.\n alpha (float): Level of significance for statistical tests.\n\n Methods:\n columns_labeling(data):\n Classifies columns in the input DataFrame as target or group columns.\n __simple_mode(data, random_state, test_size):\n Internal method to create a simple random split of the data.\n split(data, random_state, test_size):\n Splits the dataset into test and control groups.\n _postprep_data(data, spit_indexes):\n Combines the index markup obtained at the split step.\n calc_ab_delta(a_mean, b_mean, mode):\n Calculates the difference between averages of two samples.\n sampling_metrics(data, random_state, test_size):\n Computes various metrics for a single random split of the data.\n calc_uniform_tests(data, test_size, iterations, file_name, experiment_write_mode, split_write_mode, write_step, pbar):\n Runs multiple iterations of AA tests to find a uniform distribution.\n features_p_value_distribution(experiment_results, figsize, bin_step):\n Plots the distribution of p-values for each feature.\n aa_score(experiment_results):\n Computes the average score for passed tests in AA testing.\n uniform_tests_interpretation(experiment_results):\n Analyzes and plots the results of uniform tests.\n num_feature_uniform_analysis(control_data, test_data, plot_set):\n Analyzes and plots numerical feature distributions in control and test data.\n cat_feature_uniform_analysis(control_data, test_data):\n Analyzes and plots categorical feature distributions in control and test data.\n experiment_result_transform(experiment):\n Transforms the result of an experiment into a readable format.\n split_analysis(splited_data):\n Analyzes split data for both target and group columns.\n get_resume(aa_score, best_experiment_stat):\n Formats the final results of AA testing for clarity.\n process(data, optimize_groups, iterations, show_plots, test_size, pbar):\n Main method to perform the complete AA test process, including optimization, testing, and result presentation.\n\n Example:\n >>> aa_test = AATest(target_fields=[\"metric1\", \"metric2\"], group_cols=[\"group\"], info_cols=[\"info1\", \"info2\"])\n >>> results = aa_test.process(data, optimize_groups=True, iterations=1000, show_plots=True)\n \"\"\"\n\n def __init__(\n self,\n target_fields: Union[Iterable[str], str] = None,\n group_cols: Union[str, Iterable[str]] = None,\n info_cols: Union[str, Iterable[str]] = None,\n quant_field: str = None,\n mode: str = \"simple\",\n alpha: float = 0.05,\n ):\n \"\"\"Initialize the AATest class.\n\n Args:\n target_fields:\n List or str with target columns. This fields should be numeric.\n group_cols:\n List or str with columns for grouping. This fields should be categorical. It's a field for stratification. Stratification - the way to divide groups with equal number of categories in each of them.\n info_cols:\n List or str with informational columns\n quant_field:\n String with name of column for quantization. This fields should be categorical. A quantum is a category that passes entirely into one of the groups.\n mode:\n Mode of the AA-test\n Available modes:\n * simple - naively splits groups in half\n * balanced - separation with quantum balancing at each step (only used if a quantization field is specified)\n alpha:\n Level of significance\n \"\"\"\n self.target_fields = (\n [target_fields] if isinstance(target_fields, str) else target_fields\n )\n self.group_cols = (\n [group_cols] if isinstance(group_cols, str) else group_cols\n ) or []\n self.info_cols = [info_cols] if isinstance(info_cols, str) else info_cols\n self.quant_field = quant_field\n self.mode = mode\n self.alpha = alpha\n\n def columns_labeling(self, data: pd.DataFrame) -> Dict[str, List[str]]:\n \"\"\"\n Label columns as target columns and group columns.\n\n Args:\n data:\n Input dataframe\n\n Returns:\n Dictionary with list of target columns and group columns\n\n \"\"\"\n return {\n \"target_field\": list(\n data.select_dtypes(include=\"number\").columns.drop(\n self.info_cols, errors=\"ignore\"\n )\n ),\n \"group_col\": list(\n data.select_dtypes(include=\"object\").columns.drop(\n self.info_cols, errors=\"ignore\"\n )\n ),\n }\n\n def __simple_mode(\n self, data: pd.DataFrame, random_state: int = None, test_size: float = 0.5\n ) -> Dict:\n \"\"\"Separates data on A and B samples within simple mode.\n Separation performed to divide groups of equal sizes - equal amount of records\n or equal amount of groups in each sample.\n\n Args:\n data:\n Input data\n random_state:\n Seed of random\n\n Returns:\n Test and control samples of indexes dictionary\n \"\"\"\n result = {\"test_indexes\": [], \"control_indexes\": []}\n\n if self.quant_field:\n random_ids = shuffle(\n data[self.quant_field].unique(), random_state=random_state\n )\n edge = int(len(random_ids) * test_size)\n result[\"test_indexes\"] = list(\n data[data[self.quant_field].isin(random_ids[:edge])].index\n )\n result[\"control_indexes\"] = list(\n data[data[self.quant_field].isin(random_ids[edge:])].index\n )\n\n else:\n addition_indexes = list(shuffle(data.index, random_state=random_state))\n edge = int(len(addition_indexes) * test_size)\n result[\"test_indexes\"] = addition_indexes[:edge]\n result[\"control_indexes\"] = addition_indexes[edge:]\n\n return result\n\n def split(\n self, data: pd.DataFrame, random_state: int = None, test_size: float = 0.5\n ) -> Dict:\n \"\"\"Divides sample on two groups.\n\n Args:\n data:\n Input data\n random_state:\n Seed of random - one integer to fix split\n test_size:\n Proportion of the test group\n\n Returns:\n Dict of indexes with division on test and control group\n \"\"\"\n result = {\"test_indexes\": [], \"control_indexes\": []}\n\n if self.group_cols:\n groups = data.groupby(self.group_cols)\n for _, gd in groups:\n if self.mode not in (\"balanced\", \"simple\"):\n warnings.warn(\n f\"The mode '{self.mode}' is not supported for group division. Implemented mode 'simple'.\"\n )\n self.mode = \"simple\"\n\n if self.mode == \"simple\":\n t_result = self.__simple_mode(gd, random_state, test_size)\n result[\"test_indexes\"] += t_result[\"test_indexes\"]\n result[\"control_indexes\"] += t_result[\"control_indexes\"]\n\n elif self.mode == \"balanced\":\n if self.quant_field:\n random_ids = shuffle(\n gd[self.quant_field].unique(), random_state=random_state\n )\n addition_indexes = list(\n gd[gd[self.quant_field].isin(random_ids)].index\n )\n else:\n addition_indexes = list(\n shuffle(gd.index, random_state=random_state)\n )\n\n if len(result[\"control_indexes\"]) > len(result[\"test_indexes\"]):\n result[\"test_indexes\"] += addition_indexes\n else:\n result[\"control_indexes\"] += addition_indexes\n\n else:\n if self.mode != \"simple\":\n warnings.warn(\n f\"The mode '{self.mode}' is not supported for regular division. \"\n f\"Implemented mode 'simple'.\"\n )\n\n t_result = self.__simple_mode(data, random_state, test_size)\n result[\"test_indexes\"] = t_result[\"test_indexes\"]\n result[\"control_indexes\"] = t_result[\"control_indexes\"]\n\n result[\"test_indexes\"] = list(set(result[\"test_indexes\"]))\n result[\"control_indexes\"] = list(set(result[\"control_indexes\"]))\n\n return result\n\n @staticmethod\n def _postprep_data(data, spit_indexes: Dict = None) -> pd.DataFrame:\n \"\"\"Prepares data to show user.\n Adds info_cols and decode binary variables.\n\n Args:\n data:\n Input data\n spit_indexes:\n Dict of indexes with separation on test and control group\n\n Returns:\n Separated initial data with column \"group\"\n \"\"\"\n test = data.loc[spit_indexes[\"test_indexes\"]]\n control = data.loc[spit_indexes[\"control_indexes\"]]\n data = merge_groups(control, test)\n\n return data\n\n @staticmethod\n def calc_ab_delta(a_mean: float, b_mean: float, mode: str = \"percentile\")->float:\n \"\"\"Calculates target delta between A and B groups.\n\n Args:\n a_mean:\n Average of target in one group\n b_mean:\n Average of target in another group\n mode:\n Type of expected result:\n * 'percentile' - percentage exceeding the average in group A compared to group B\n * 'absolute' - absolute value of difference between B and A group\n * 'relative' - percent in format of number (absolute) exceeding the average in group A compared to group B\n\n Returns:\n Delta between groups as percent or absolute value\n \"\"\"\n if mode == \"percentile\":\n return (1 - a_mean / b_mean) * 100\n if mode == \"absolute\":\n return b_mean - a_mean\n if mode == \"relative\":\n return 1 - a_mean / b_mean\n\n def sampling_metrics(\n self, data: pd.DataFrame, random_state: int = None, test_size: float = 0.5\n ) -> Dict:\n \"\"\"Calculates metrics of one sampling.\n\n Args:\n data:\n Input data\n random_state:\n Random seeds for searching\n test_size:\n Proportion of the test group\n\n Returns:\n Dict of\n 1) metrics dataframe (stat tests) and\n 2) dict of random state with test_control dataframe\n \"\"\"\n scores = []\n t_result = {\"random_state\": random_state}\n\n split = self.split(data, random_state, test_size)\n\n a = data.loc[split[\"control_indexes\"]]\n b = data.loc[split[\"test_indexes\"]]\n\n data_from_sampling_dict = {random_state: self._postprep_data(data, split)}\n for tf in self.target_fields:\n ta = a[tf]\n tb = b[tf]\n\n t_result[f\"{tf} a mean\"] = ta.mean()\n t_result[f\"{tf} b mean\"] = tb.mean()\n t_result[f\"{tf} ab delta\"] = self.calc_ab_delta(\n t_result[f\"{tf} a mean\"], t_result[f\"{tf} b mean\"], \"absolute\"\n )\n t_result[f\"{tf} ab delta %\"] = self.calc_ab_delta(\n t_result[f\"{tf} a mean\"], t_result[f\"{tf} b mean\"], \"percentile\"\n )\n t_result[f\"{tf} t-test p-value\"] = ttest_ind(\n ta, tb, nan_policy=\"omit\"\n ).pvalue\n t_result[f\"{tf} ks-test p-value\"] = ks_2samp(ta, tb).pvalue\n t_result[f\"{tf} t-test passed\"] = (\n t_result[f\"{tf} t-test p-value\"] < self.alpha\n )\n t_result[f\"{tf} ks-test passed\"] = (\n t_result[f\"{tf} ks-test p-value\"] < self.alpha\n )\n scores.append(\n (\n t_result[f\"{tf} t-test p-value\"]\n + 2 * t_result[f\"{tf} ks-test p-value\"]\n )\n / 3\n )\n\n t_result[\"control %\"] = len(a) / len(data) * 100\n t_result[\"test %\"] = len(b) / len(data) * 100\n t_result[\"control size\"] = len(a)\n t_result[\"test size\"] = len(b)\n t_result[\"t-test mean p-value\"] = np.mean(\n [p_value for key, p_value in t_result.items() if \"t-test p-value\" in key]\n )\n t_result[\"ks-test mean p-value\"] = np.mean(\n [p_value for key, p_value in t_result.items() if \"ks-test p-value\" in key]\n )\n t_result[\"t-test passed %\"] = np.mean(\n [passed * 100 for key, passed in t_result.items() if \"t-test passed\" in key]\n )\n t_result[\"ks-test passed %\"] = np.mean(\n [\n passed * 100\n for key, passed in t_result.items()\n if \"ks-test passed\" in key\n ]\n )\n t_result[\"mean_tests_score\"] = np.mean(scores)\n return {\"metrics\": t_result, \"data_from_experiment\": data_from_sampling_dict}\n\n def calc_uniform_tests(\n self,\n data: pd.DataFrame,\n test_size: float = 0.5,\n iterations: int = 2000,\n file_name: Union[Path, str] = None,\n experiment_write_mode: str = \"full\",\n split_write_mode: str = \"full\",\n write_step: int = None,\n pbar: bool = True,\n **kwargs,\n ) -> Optional[Tuple[pd.DataFrame, Dict[Any, Dict]]]:\n \"\"\"Performs multiple separation experiments for different random states.\n\n Args:\n data:\n Input data\n iterations:\n Number of iterations to search uniform sampling to searching\n test_size:\n Proportion of the test group\n file_name:\n Name of file to save results (if None - no results will be saved, func returns result)\n experiment_write_mode:\n Mode to write experiment results:\n 'full' - save all experiments\n 'all' - save experiments that passed all statistical tests\n 'any' - save experiments that passed any statistical test\n split_write_mode:\n Mode to write split results:\n 'full' - save all experiments\n 'all' - save experiments that passed all statistical tests\n 'any' - save experiments that passed any statistical test\n write_step:\n Step to write experiments to file\n pbar:\n Flag to show progress bar\n\n Returns:\n If no saving (no file_name, no write mode and no write_step) returns dataframe\n else None and saves file to csv\n \"\"\"\n random_states = range(iterations)\n results = []\n data_from_sampling = {}\n\n if experiment_write_mode not in (\"full\", \"all\", \"any\"):\n warnings.warn(\n f\"Write mode '{experiment_write_mode}' is not supported. Mode 'full' will be used\"\n )\n experiment_write_mode = \"full\"\n if split_write_mode not in (\"full\", \"all\", \"any\"):\n warnings.warn(\n f\"Write mode '{split_write_mode}' is not supported. Mode 'full' will be used\"\n )\n split_write_mode = \"full\"\n\n for i, rs in tqdm(\n enumerate(random_states), total=len(random_states), disable=not pbar\n ):\n res = self.sampling_metrics(data, random_state=rs, test_size=test_size)\n\n # write to file\n passed = []\n for tf in self.target_fields:\n passed += [\n not res[\"metrics\"][f\"{tf} t-test passed\"],\n not res[\"metrics\"][f\"{tf} ks-test passed\"],\n ]\n\n if all(passed):\n if experiment_write_mode == \"all\":\n results.append(res[\"metrics\"])\n if split_write_mode == \"all\":\n data_from_sampling.update(res[\"data_from_experiment\"])\n if any(passed):\n if experiment_write_mode == \"any\":\n results.append(res[\"metrics\"])\n if split_write_mode == \"any\":\n data_from_sampling.update(res[\"data_from_experiment\"])\n if experiment_write_mode == \"full\":\n results.append(res[\"metrics\"])\n if split_write_mode == \"full\":\n data_from_sampling.update(res[\"data_from_experiment\"])\n\n if file_name and write_step:\n if i == write_step:\n pd.DataFrame(results).to_csv(file_name, index=False)\n elif i % write_step == 0:\n pd.DataFrame(results).to_csv(\n file_name, index=False, header=False, mode=\"a\"\n )\n results = []\n\n results = pd.DataFrame(results)\n if file_name and write_step:\n results.to_csv(file_name, index=False, header=False, mode=\"a\")\n elif file_name:\n results.to_csv(file_name, index=False)\n return results, data_from_sampling\n else:\n return results, data_from_sampling\n\n def features_p_value_distribution(\n self, experiment_results: pd.DataFrame, figsize=None, bin_step=0.05\n ):\n \"\"\"Process plots of features' p-value distribution.\n\n Args:\n experiment_results:\n Results of experiments\n figsize:\n Size of figure for plot\n bin_step:\n Step for bins in X axis\n \"\"\"\n feature_num = len(self.target_fields)\n figsize = figsize or (15, 7 * feature_num)\n bin_step = bin_step or self.alpha\n bins = np.arange(0, 1 + bin_step, bin_step)\n figure, axs = plt.subplots(nrows=feature_num, ncols=2, figsize=figsize)\n for i in range(feature_num):\n sns.histplot(\n data=experiment_results,\n x=f\"{self.target_fields[i]} t-test p-value\",\n ax=axs[i, 0],\n bins=bins,\n stat=\"percent\",\n shrink=0.8,\n )\n sns.histplot(\n data=experiment_results,\n x=f\"{self.target_fields[i]} ks-test p-value\",\n ax=axs[i, 1],\n bins=bins,\n stat=\"percent\",\n shrink=0.8,\n )\n\n axs[i, 0].set_title(\n f\"{self.target_fields[i]} t-test p-value\\npassed score: {experiment_results[f'{self.target_fields[i]} t-test passed'].mean():.3f}\"\n )\n axs[i, 1].set_title(\n f\"{self.target_fields[i]} ks-test p-value\\npassed score: {experiment_results[f'{self.target_fields[i]} ks-test passed'].mean():.3f}\"\n )\n plt.show()\n\n def aa_score(self, experiment_results: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Estimates mean passed score for t-test and ks-test in AA-test.\n\n Args:\n experiment_results:\n Results of the experiment\n\n Returns:\n Pandas dataframe containing the results of the AA-test\n \"\"\"\n result = pd.DataFrame(\n {\n f: {\n \"t-test passed score\": experiment_results[\n f\"{f} t-test passed\"\n ].mean(),\n \"ks-test passed score\": experiment_results[\n f\"{f} ks-test passed\"\n ].mean(),\n }\n for f in self.target_fields\n }\n ).T\n\n result[\"t-test aa passed\"] = result[\"t-test passed score\"].apply(\n lambda x: 0.8 * self.alpha <= x <= 1.2 * self.alpha\n )\n result[\"ks-test aa passed\"] = result[\"ks-test passed score\"].apply(\n lambda x: 0.8 * self.alpha <= x <= 1.2 * self.alpha\n )\n result.loc[\"mean\"] = result.mean()\n\n return result\n\n def uniform_tests_interpretation(\n self, experiment_results: pd.DataFrame, **kwargs\n ) -> pd.DataFrame:\n \"\"\"Process plotting of p-value distribution and results of AA-test.\n\n Args:\n experiment_results:\n Results of experiments\n **kwargs:\n Some extra keyword arguments:\n * figsize: Size of figure for plot\n * bin_step: Step for bins in X axis\n\n Returns:\n Pandas dataframe containing the results of the AA-test\n \"\"\"\n self.features_p_value_distribution(\n experiment_results,\n figsize=kwargs.get(\"figsize\"),\n bin_step=kwargs.get(\"bin_step\"),\n )\n return self.aa_score(experiment_results)\n\n def num_feature_uniform_analysis(\n self,\n control_data: pd.Series,\n test_data: pd.Series,\n plot_set: Tuple = (\"hist\", \"cumulative\", \"percentile\"),\n **kwargs,\n ):\n \"\"\"Show plots of distribution in groups with uniform tests.\n\n Args:\n control_data:\n Data from control group\n test_data:\n Data from test group\n plot_set:\n Type of plot\n Available types:\n * hist\n * cumulative\n * percentile\n **kwargs:\n Some extra keyword arguments:\n * figsize: Size of figure for plot\n * bins: Number of bins in X axis\n * alpha: Transparency of histograms\n \"\"\"\n if not plot_set:\n return\n\n figsize = kwargs.get(\"figsize\", (25, 20))\n figure, axs = plt.subplots(\n nrows=len(plot_set),\n ncols=1,\n figsize=figsize,\n facecolor=\"honeydew\",\n edgecolor=\"black\",\n )\n ax_count = 0\n\n bins = np.arange(\n min(control_data.min(), test_data.min()),\n max(control_data.max(), test_data.max()),\n (\n max(control_data.max(), test_data.max())\n - min(control_data.min(), test_data.min())\n )\n / kwargs.get(\"bins\", 100),\n )\n\n if \"hist\" in plot_set:\n sns.histplot(\n data=control_data,\n ax=axs[ax_count],\n bins=bins,\n stat=\"percent\",\n element=\"poly\",\n alpha=kwargs.get(\"alpha\", 0.3),\n color=\"blue\",\n )\n sns.histplot(\n data=test_data,\n ax=axs[ax_count],\n bins=bins,\n stat=\"percent\",\n element=\"poly\",\n alpha=kwargs.get(\"alpha\", 0.3),\n color=\"red\",\n )\n axs[ax_count].grid(True)\n axs[ax_count].legend([\"control\", \"test\"])\n axs[ax_count].set_title(\"Histogram\")\n ax_count += 1\n\n if \"cumulative\" in plot_set:\n sns.histplot(\n data=control_data,\n ax=axs[ax_count],\n bins=bins,\n stat=\"percent\",\n element=\"poly\",\n cumulative=True,\n alpha=kwargs.get(\"alpha\", 0.3),\n color=\"blue\",\n )\n sns.histplot(\n data=test_data,\n ax=axs[ax_count],\n bins=bins,\n stat=\"percent\",\n element=\"poly\",\n cumulative=True,\n alpha=kwargs.get(\"alpha\", 0.3),\n color=\"red\",\n )\n axs[ax_count].legend([\"control\", \"test\"])\n axs[ax_count].set_title(\"Cumulative destribution\")\n ax_count += 1\n\n if \"percentile\" in plot_set:\n axs[ax_count].fill_between(\n range(101),\n [control_data.quantile(q) for q in np.arange(0, 1.01, 0.01)],\n color=\"blue\",\n alpha=kwargs.get(\"alpha\", 0.3),\n )\n axs[ax_count].fill_between(\n range(101),\n [test_data.quantile(q) for q in np.arange(0, 1.01, 0.01)],\n color=\"red\",\n alpha=kwargs.get(\"alpha\", 0.3),\n )\n axs[ax_count].legend([\"control\", \"test\"])\n axs[ax_count].set_xticks(np.arange(0, 101))\n axs[ax_count].set_xticklabels(np.arange(0, 101), rotation=45)\n axs[ax_count].set_title(\"Percentile destribution\")\n\n fig_title = f\"\"\"{control_data.name}\n\n t-test p-value: {ttest_ind(control_data, test_data, nan_policy='omit').pvalue:.3f}\n ks-test p-value: {ks_2samp(control_data, test_data).pvalue:.3f}\"\"\"\n figure.suptitle(fig_title, fontsize=kwargs.get(\"title_size\", 20))\n plt.show()\n\n def cat_feature_uniform_analysis(\n self, control_data: pd.Series, test_data: pd.Series, **kwargs\n ):\n \"\"\"Show plots of distribution in groups.\n\n Args:\n control_data:\n Data from control group\n test_data:\n Data from test group\n **kwargs:\n Some extra keyword arguments:\n * figsize: Size of figure for plot\n * alpha: Transparency of histograms\n \"\"\"\n s_control_data = control_data.astype(\"str\")\n s_test_data = test_data.astype(\"str\")\n\n figsize = kwargs.get(\"figsize\", (15, 10))\n figure, ax = plt.subplots(\n nrows=1, ncols=1, figsize=figsize, facecolor=\"honeydew\", edgecolor=\"black\"\n )\n\n control_counts = s_control_data.value_counts(normalize=True) * 100\n test_counts = s_test_data.value_counts(normalize=True) * 100\n\n ax.fill_between(\n control_counts.index,\n control_counts.values,\n color=\"blue\",\n alpha=kwargs.get(\"alpha\", 0.3),\n label=\"control\",\n )\n ax.fill_between(\n test_counts.index,\n test_counts[\n [i for i in test_counts.index if i in control_counts.index]\n ].values,\n color=\"red\",\n alpha=kwargs.get(\"alpha\", 0.3),\n label=\"test\",\n )\n\n ax.legend()\n ax.tick_params(axis=\"x\", rotation=90)\n figure.suptitle(f\"{control_data.name}\", fontsize=kwargs.get(\"title_size\", 20))\n plt.show()\n\n def experiment_result_transform(self, experiment: pd.Series):\n \"\"\"\n Transform experiments results into readable view.\n\n Args:\n experiment:\n Results of experiments\n\n Returns:\n DataFrame with results of the experiment and statistics from best split\n \"\"\"\n targets_dict = {}\n for tf in self.target_fields:\n targets_dict[tf] = {}\n for i in experiment.index:\n if i.startswith(f\"{tf} \"):\n targets_dict[tf][i[len(tf) + 1 :]] = experiment[i]\n return pd.DataFrame(targets_dict).T, experiment.iloc[-9:]\n\n def split_analysis(self, splited_data: pd.DataFrame, **kwargs):\n \"\"\"Conducts a full splitting analysis.\n\n Args:\n splited_data:\n Data that has already been split\n **kwargs:\n Some extra keyword arguments for plots in visualization\n \"\"\"\n ssp = split_splited_data(splited_data)\n for nf in self.target_fields:\n self.num_feature_uniform_analysis(\n ssp[\"control\"][nf], ssp[\"test\"][nf], **kwargs\n )\n for cf in self.group_cols:\n self.cat_feature_uniform_analysis(\n ssp[\"control\"][cf], ssp[\"test\"][cf], **kwargs\n )\n\n def get_resume(self, aa_score: pd.DataFrame, best_experiment_stat: pd.DataFrame):\n \"\"\"Format results into clear format for understanding.\n\n Args:\n aa_score:\n Results of aa-test\n best_experiment_stat:\n Results of the best experiment\n\n Returns:\n DataFrame with OK and not OK depending on the results of statistical tests\n \"\"\"\n result = {\"aa test passed\": {}, \"split is uniform\": {}}\n for field in self.target_fields:\n result[\"aa test passed\"][field] = (\n aa_score.loc[field, \"t-test aa passed\"]\n or aa_score.loc[field, \"ks-test aa passed\"]\n )\n result[\"split is uniform\"][field] = (\n best_experiment_stat.loc[field, \"t-test passed\"]\n or best_experiment_stat.loc[field, \"ks-test passed\"]\n )\n result = pd.DataFrame(result)\n result[\"split is uniform\"] = (\n result[\"split is uniform\"]\n .astype(\"bool\")\n .replace({False: \"OK\", True: \"not OK\"})\n )\n result[\"aa test passed\"] = (\n result[\"aa test passed\"]\n .astype(\"bool\")\n .replace({False: \"not OK\", True: \"OK\"})\n )\n return result\n\n def process(\n self,\n data: pd.DataFrame,\n optimize_groups: bool = False,\n iterations: int = 2000,\n show_plots: bool=True,\n test_size: float=0.5,\n pbar: bool=True,\n **kwargs,\n ):\n \"\"\"Main function for AATest estimation.\n\n Provides:\n * Columns labeling\n * Results calculations\n * Plotting results\n\n Args:\n test_size:\n Proportion of the test group\n data:\n Input dataset\n optimize_groups:\n Is in necessary to optimize groups\n iterations:\n Number of iterations for AA-test\n show_plots:\n Is in necessary to show plots\n pbar:\n Show progress-bar\n **kwargs:\n Some extra keyword arguments\n\n Returns:\n best_results:\n Results of the experiment with metrics for all fields\n best_split:\n Result of separation\n \"\"\"\n labeling = self.columns_labeling(data)\n best_results, best_split = None, None\n\n if not self.target_fields:\n self.target_fields = labeling[\"target_fields\"]\n\n if optimize_groups:\n max_score = -1\n\n group_variants = [[]]\n for i in range(1, len(labeling[\"group_col\"])):\n i_combinations = combinations(labeling[\"group_col\"], i)\n group_variants.extend(iter(i_combinations))\n\n for gs in tqdm(group_variants, desc=\"Group optimization\", disable=not pbar):\n self.group_cols = list(gs)\n experiment_results, data_splits = self.calc_uniform_tests(\n data,\n pbar=False,\n iterations=iterations,\n test_size=test_size,\n **kwargs,\n )\n if len(experiment_results):\n aa_scores = self.aa_score(experiment_results)\n group_score = max(\n aa_scores.loc[\"mean\", \"t-test aa passed\"],\n aa_scores.loc[\"mean\", \"ks-test aa passed\"],\n )\n if group_score > max_score:\n best_results, best_split = experiment_results, data_splits\n max_score = group_score\n\n else:\n best_results, best_split = self.calc_uniform_tests(\n data,\n experiment_write_mode=\"full\",\n split_write_mode=\"any\",\n iterations=iterations,\n test_size=test_size,\n pbar=pbar,\n **kwargs,\n )\n\n if len(best_results) == 0:\n return best_results, best_split\n if len(best_results) > 0:\n if show_plots:\n aa_scores = self.uniform_tests_interpretation(best_results)\n else:\n aa_scores = self.aa_score(best_results)\n best_rs = best_results.loc[\n best_results[\"mean_tests_score\"].idxmax(), \"random_state\"\n ]\n final_split = best_split[best_rs]\n if show_plots:\n self.split_analysis(final_split, **kwargs)\n\n best_experiment_stat, best_split_stat = self.experiment_result_transform(\n best_results[best_results[\"random_state\"] == best_rs].iloc[0]\n )\n resume = self.get_resume(aa_scores, best_experiment_stat)\n else:\n aa_scores = None\n final_split = None\n best_experiment_stat = None\n best_split_stat = None\n resume = None\n\n return {\n \"experiments\": best_results,\n \"aa_score\": aa_scores,\n \"split\": final_split,\n \"best_experiment_stat\": best_experiment_stat,\n \"split_stat\": best_split_stat,\n \"resume\": resume,\n }"
},
{
"identifier": "create_test_data",
"path": "hypex/utils/tutorial_data_creation.py",
"snippet": "def create_test_data(\n num_users: int = 10000,\n na_step: Union[Iterable[int], int] = None,\n nan_cols: Union[Iterable[str], str] = None,\n file_name: str = None,\n rs=None\n):\n \"\"\"Creates data for tutorial.\n\n Args:\n num_users: num of strings\n na_step: \n num or list of nums of period to make NaN (step of range)\n If list - iterates accordingly order of columns\n nan_cols: \n name of one or several columns to fill with NaN\n If list - iterates accordingly order of na_step\n file_name: name of file to save; doesn't save file if None\n\n Returns:\n data: dataframe with\n \"\"\"\n if rs is not None:\n np.random.seed(rs)\n\n if (nan_cols is not None) and isinstance(nan_cols, str):\n nan_cols = [nan_cols]\n # Simulating dataset with known effect size\n num_months = 12\n\n # signup_months == 0 means customer did not sign up\n signup_months = np.random.choice(np.arange(1, num_months), num_users) * np.random.randint(0, 2, size=num_users)\n\n data = pd.DataFrame(\n {\n \"user_id\": np.repeat(np.arange(num_users), num_months),\n \"signup_month\": np.repeat(signup_months, num_months), # signup month == 0 means customer did not sign up\n \"month\": np.tile(np.arange(1, num_months + 1), num_users), # months are from 1 to 12\n \"spend\": np.random.poisson(500, num_users * num_months),\n }\n )\n\n # A customer is in the treatment group if and only if they signed up\n data[\"treat\"] = data[\"signup_month\"] > 0\n\n # Simulating an effect of month (monotonically decreasing--customers buy less later in the year)\n data[\"spend\"] = data[\"spend\"] - data[\"month\"] * 10\n\n # Simulating a simple treatment effect of 100\n after_signup = (data[\"signup_month\"] < data[\"month\"]) & (data[\"treat\"])\n data.loc[after_signup, \"spend\"] = data[after_signup][\"spend\"] + 100\n\n # Setting the signup month (for ease of analysis)\n i = 3\n data = (\n data[data.signup_month.isin([0, i])]\n .groupby([\"user_id\", \"signup_month\", \"treat\"])\n .apply(\n lambda x: pd.Series(\n {\"pre_spends\": x.loc[x.month < i, \"spend\"].mean(), \"post_spends\": x.loc[x.month > i, \"spend\"].mean(), }\n )\n )\n .reset_index()\n )\n\n # Additional category features\n gender_i = np.random.choice(a=[0, 1], size=data.user_id.nunique())\n gender = [[\"M\", \"F\"][i] for i in gender_i]\n\n age = np.random.choice(a=range(18, 70), size=data.user_id.nunique())\n\n industry_i = np.random.choice(a=range(1, 3), size=data.user_id.nunique())\n industry_names = [\"Finance\", \"E-commerce\", \"Logistics\"]\n industry = [industry_names[i] for i in industry_i]\n\n data[\"age\"] = age\n data[\"gender\"] = gender\n data[\"industry\"] = industry\n data[\"industry\"] = data[\"industry\"].astype(\"str\")\n data[\"treat\"] = data[\"treat\"].astype(int)\n\n # input nans in data if needed\n data = set_nans(data, na_step, nan_cols)\n\n if file_name is not None:\n data.to_csv(ROOT / f\"{file_name}.csv\", index=False)\n\n return data"
}
] | import pandas as pd
import pytest
import sys
from pathlib import Path
from hypex import AATest
from hypex.utils.tutorial_data_creation import create_test_data | 9,049 |
sys.path.append(str(Path(".").absolute().parent))
@pytest.fixture
def data():
|
sys.path.append(str(Path(".").absolute().parent))
@pytest.fixture
def data(): | return create_test_data(rs=52) | 1 | 2023-11-01 08:58:57+00:00 | 12k |
mileswyn/SAMIHS | models/segment_anything_samihs/automatic_mask_generator.py | [
{
"identifier": "Samihs",
"path": "models/segment_anything_samihs/modeling/samihs.py",
"snippet": "class Samihs(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n ####################################################\n # for param in self.prompt_encoder.parameters():\n # param.requires_grad = False\n # for param in self.mask_decoder.parameters():\n # param.requires_grad = False\n ####################################################\n # for param in self.image_encoder.parameters():\n # param.requires_grad = False\n for n, value in self.image_encoder.named_parameters():\n if \"down_projection\" not in n and \"Adapter\" not in n:\n value.requires_grad = False\n if \"down_projection\" in n or \"adapter\" in n:\n value.requires_grad = True\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward_sam(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings, skip_cache = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n encoder_cache=skip_cache,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def forward(\n self, \n imgs: torch.Tensor,\n pt: Tuple[torch.Tensor, torch.Tensor], # [b n 2, b n]\n bbox: torch.Tensor=None, # b 4\n ) -> torch.Tensor:\n # imge, skip_cache = self.image_encoder(imgs)\n imge = self.image_encoder(imgs)\n if len(pt[0].shape) == 3:\n se, de = self.prompt_encoder( # se b 2 256, de b 256 32 32\n points=pt,\n boxes=None,\n masks=None,\n )\n low_res_masks, _ = self.mask_decoder( # low_res_mask b 1 128 128\n image_embeddings=imge,\n image_pe=self.prompt_encoder.get_dense_pe(), \n sparse_prompt_embeddings=se,\n dense_prompt_embeddings=de, \n multimask_output=False,\n # encoder_cache=skip_cache,\n )\n masks = F.interpolate(low_res_masks, (256, 256), mode=\"bilinear\", align_corners=False)\n outputs = {\"low_res_logits\": low_res_masks, \"masks\": low_res_masks} # 10.10\n return outputs\n else:\n low_res_masks, masks = [], []\n for i in range(pt[0].shape[1]):\n pti = (pt[0][:, i, :, :], pt[1][:, i, :])\n sei, dei = self.prompt_encoder( # se b 2 256, de b 256 32 32\n points=pti,\n boxes=None,\n masks=None,\n )\n low_res_masksi, _ = self.mask_decoder( # low_res_mask b 1 128 128\n image_embeddings=imge,\n image_pe=self.prompt_encoder.get_dense_pe(), \n sparse_prompt_embeddings=sei,\n dense_prompt_embeddings=dei, \n multimask_output=False,\n )\n masksi = F.interpolate(low_res_masksi, (256, 256), mode=\"bilinear\", align_corners=False)\n low_res_masks.append(low_res_masksi)\n masks.append(masksi)\n low_res_masks = torch.stack(low_res_masks, dim=1)\n masks = torch.stack(masks, dim=1) # b c 1 255 255\n masks = masks.reshape(masks.shape[0], -1, masks.shape[3], masks.shape[4])\n low_res_masks = low_res_masks.reshape(low_res_masks.shape[0], -1, low_res_masks.shape[3], low_res_masks.shape[4])\n outputs = {\"low_res_logits\": low_res_masks, \"masks\": masks}\n return outputs\n\n\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x"
},
{
"identifier": "MaskData",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()"
},
{
"identifier": "area_from_rle",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])"
},
{
"identifier": "batch_iterator",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]"
},
{
"identifier": "batched_mask_to_box",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out"
},
{
"identifier": "box_xyxy_to_xywh",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh"
},
{
"identifier": "build_all_layer_point_grids",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer"
},
{
"identifier": "calculate_stability_score",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions"
},
{
"identifier": "coco_encode_rle",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle"
},
{
"identifier": "generate_crop_boxes",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs"
},
{
"identifier": "is_box_near_crop_edge",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)"
},
{
"identifier": "mask_to_rle_pytorch",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out"
},
{
"identifier": "remove_small_regions",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True"
},
{
"identifier": "rle_to_mask",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order"
},
{
"identifier": "uncrop_boxes_xyxy",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset"
},
{
"identifier": "uncrop_masks",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)"
},
{
"identifier": "uncrop_points",
"path": "models/segment_anything_samihs/utils/amg.py",
"snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset"
}
] | import numpy as np
import torch
import cv2 # type: ignore # noqa: F401
from torchvision.ops.boxes import batched_nms, box_area # type: ignore
from typing import Any, Dict, List, Optional, Tuple
from .modeling import Samihs
from .utils.amg import (
MaskData,
area_from_rle,
batch_iterator,
batched_mask_to_box,
box_xyxy_to_xywh,
build_all_layer_point_grids,
calculate_stability_score,
coco_encode_rle,
generate_crop_boxes,
is_box_near_crop_edge,
mask_to_rle_pytorch,
remove_small_regions,
rle_to_mask,
uncrop_boxes_xyxy,
uncrop_masks,
uncrop_points,
)
from pycocotools import mask as mask_utils # type: ignore # noqa: F401 | 8,058 | point sampling.
points_per_batch (int): Sets the number of points run simultaneously
by the model. Higher numbers may be faster but use more GPU memory.
pred_iou_thresh (float): A filtering threshold in [0,1], using the
model's predicted mask quality.
stability_score_thresh (float): A filtering threshold in [0,1], using
the stability of the mask under changes to the cutoff used to binarize
the model's mask predictions.
stability_score_offset (float): The amount to shift the cutoff when
calculated the stability score.
box_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks.
crop_n_layers (int): If >0, mask prediction will be run again on
crops of the image. Sets the number of layers to run, where each
layer has 2**i_layer number of image crops.
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks between different crops.
crop_overlap_ratio (float): Sets the degree to which crops overlap.
In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (int): The number of points-per-side
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
point_grids (list(np.ndarray) or None): A list over explicit grids
of points used for sampling, normalized to [0,1]. The nth grid in the
list is used in the nth crop layer. Exclusive with points_per_side.
min_mask_region_area (int): If >0, postprocessing will be applied
to remove disconnected regions and holes in masks with area smaller
than min_mask_region_area. Requires opencv.
output_mode (str): The form masks are returned in. Can be 'binary_mask',
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
For large resolutions, 'binary_mask' may consume large amounts of
memory.
"""
assert (points_per_side is None) != (
point_grids is None
), "Exactly one of points_per_side or point_grid must be provided."
if points_per_side is not None:
self.point_grids = build_all_layer_point_grids(
points_per_side,
crop_n_layers,
crop_n_points_downscale_factor,
)
elif point_grids is not None:
self.point_grids = point_grids
else:
raise ValueError("Can't have both points_per_side and point_grid be None.")
assert output_mode in [
"binary_mask",
"uncompressed_rle",
"coco_rle",
], f"Unknown output_mode {output_mode}."
if output_mode == "coco_rle":
if min_mask_region_area > 0:
self.points_per_batch = points_per_batch
self.pred_iou_thresh = pred_iou_thresh
self.stability_score_thresh = stability_score_thresh
self.stability_score_offset = stability_score_offset
self.box_nms_thresh = box_nms_thresh
self.crop_n_layers = crop_n_layers
self.crop_nms_thresh = crop_nms_thresh
self.crop_overlap_ratio = crop_overlap_ratio
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
self.min_mask_region_area = min_mask_region_area
self.output_mode = output_mode
@torch.no_grad()
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""
Generates masks for the given image.
Arguments:
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
Returns:
list(dict(str, any)): A list over records for masks. Each record is
a dict containing the following keys:
segmentation (dict(str, any) or np.ndarray): The mask. If
output_mode='binary_mask', is an array of shape HW. Otherwise,
is a dictionary containing the RLE.
bbox (list(float)): The box around the mask, in XYWH format.
area (int): The area in pixels of the mask.
predicted_iou (float): The model's own prediction of the mask's
quality. This is filtered by the pred_iou_thresh parameter.
point_coords (list(list(float))): The point coordinates input
to the model to generate this mask.
stability_score (float): A measure of the mask's quality. This
is filtered on using the stability_score_thresh parameter.
crop_box (list(float)): The crop of the image used to generate
the mask, given in XYWH format.
"""
# Generate masks
mask_data = self._generate_masks(image)
# Filter small disconnected regions and holes in masks
if self.min_mask_region_area > 0:
mask_data = self.postprocess_small_regions(
mask_data,
self.min_mask_region_area,
max(self.box_nms_thresh, self.crop_nms_thresh),
)
# Encode masks
if self.output_mode == "coco_rle":
mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]]
elif self.output_mode == "binary_mask":
mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
else:
mask_data["segmentations"] = mask_data["rles"]
# Write mask records
curr_anns = []
for idx in range(len(mask_data["segmentations"])):
ann = {
"segmentation": mask_data["segmentations"][idx],
"area": area_from_rle(mask_data["rles"][idx]),
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamAutomaticMaskGenerator:
def __init__(
self,
model: Samihs,
points_per_side: Optional[int] = 32,
points_per_batch: int = 64,
pred_iou_thresh: float = 0.88,
stability_score_thresh: float = 0.95,
stability_score_offset: float = 1.0,
box_nms_thresh: float = 0.7,
crop_n_layers: int = 0,
crop_nms_thresh: float = 0.7,
crop_overlap_ratio: float = 512 / 1500,
crop_n_points_downscale_factor: int = 1,
point_grids: Optional[List[np.ndarray]] = None,
min_mask_region_area: int = 0,
output_mode: str = "binary_mask",
) -> None:
"""
Using a SAM model, generates masks for the entire image.
Generates a grid of point prompts over the image, then filters
low quality and duplicate masks. The default settings are chosen
for SAM with a ViT-H backbone.
Arguments:
model (Sam): The SAM model to use for mask prediction.
points_per_side (int or None): The number of points to be sampled
along one side of the image. The total number of points is
points_per_side**2. If None, 'point_grids' must provide explicit
point sampling.
points_per_batch (int): Sets the number of points run simultaneously
by the model. Higher numbers may be faster but use more GPU memory.
pred_iou_thresh (float): A filtering threshold in [0,1], using the
model's predicted mask quality.
stability_score_thresh (float): A filtering threshold in [0,1], using
the stability of the mask under changes to the cutoff used to binarize
the model's mask predictions.
stability_score_offset (float): The amount to shift the cutoff when
calculated the stability score.
box_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks.
crop_n_layers (int): If >0, mask prediction will be run again on
crops of the image. Sets the number of layers to run, where each
layer has 2**i_layer number of image crops.
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks between different crops.
crop_overlap_ratio (float): Sets the degree to which crops overlap.
In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (int): The number of points-per-side
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
point_grids (list(np.ndarray) or None): A list over explicit grids
of points used for sampling, normalized to [0,1]. The nth grid in the
list is used in the nth crop layer. Exclusive with points_per_side.
min_mask_region_area (int): If >0, postprocessing will be applied
to remove disconnected regions and holes in masks with area smaller
than min_mask_region_area. Requires opencv.
output_mode (str): The form masks are returned in. Can be 'binary_mask',
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
For large resolutions, 'binary_mask' may consume large amounts of
memory.
"""
assert (points_per_side is None) != (
point_grids is None
), "Exactly one of points_per_side or point_grid must be provided."
if points_per_side is not None:
self.point_grids = build_all_layer_point_grids(
points_per_side,
crop_n_layers,
crop_n_points_downscale_factor,
)
elif point_grids is not None:
self.point_grids = point_grids
else:
raise ValueError("Can't have both points_per_side and point_grid be None.")
assert output_mode in [
"binary_mask",
"uncompressed_rle",
"coco_rle",
], f"Unknown output_mode {output_mode}."
if output_mode == "coco_rle":
if min_mask_region_area > 0:
self.points_per_batch = points_per_batch
self.pred_iou_thresh = pred_iou_thresh
self.stability_score_thresh = stability_score_thresh
self.stability_score_offset = stability_score_offset
self.box_nms_thresh = box_nms_thresh
self.crop_n_layers = crop_n_layers
self.crop_nms_thresh = crop_nms_thresh
self.crop_overlap_ratio = crop_overlap_ratio
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
self.min_mask_region_area = min_mask_region_area
self.output_mode = output_mode
@torch.no_grad()
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""
Generates masks for the given image.
Arguments:
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
Returns:
list(dict(str, any)): A list over records for masks. Each record is
a dict containing the following keys:
segmentation (dict(str, any) or np.ndarray): The mask. If
output_mode='binary_mask', is an array of shape HW. Otherwise,
is a dictionary containing the RLE.
bbox (list(float)): The box around the mask, in XYWH format.
area (int): The area in pixels of the mask.
predicted_iou (float): The model's own prediction of the mask's
quality. This is filtered by the pred_iou_thresh parameter.
point_coords (list(list(float))): The point coordinates input
to the model to generate this mask.
stability_score (float): A measure of the mask's quality. This
is filtered on using the stability_score_thresh parameter.
crop_box (list(float)): The crop of the image used to generate
the mask, given in XYWH format.
"""
# Generate masks
mask_data = self._generate_masks(image)
# Filter small disconnected regions and holes in masks
if self.min_mask_region_area > 0:
mask_data = self.postprocess_small_regions(
mask_data,
self.min_mask_region_area,
max(self.box_nms_thresh, self.crop_nms_thresh),
)
# Encode masks
if self.output_mode == "coco_rle":
mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]]
elif self.output_mode == "binary_mask":
mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
else:
mask_data["segmentations"] = mask_data["rles"]
# Write mask records
curr_anns = []
for idx in range(len(mask_data["segmentations"])):
ann = {
"segmentation": mask_data["segmentations"][idx],
"area": area_from_rle(mask_data["rles"][idx]), | "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), | 5 | 2023-11-09 07:26:33+00:00 | 12k |
tianhaowuhz/human-assisting-dex-grasp | Runners/TrainGFPPO.py | [
{
"identifier": "GFPPO",
"path": "Algorithms/ppo/gf_ppo_update.py",
"snippet": "class GFPPO:\n def __init__(self,\n vec_env,\n cfg_train,\n device='cpu',\n sampler='sequential',\n log_dir='run',\n is_testing=False,\n print_log=True,\n apply_reset=False,\n asymmetric=False,\n args=None,\n ):\n self.args = args\n ''' PPO '''\n # PPO parameters\n if not isinstance(vec_env.observation_space, Space):\n raise TypeError(\"vec_env.observation_space must be a gym Space\")\n if not isinstance(vec_env.state_space, Space):\n raise TypeError(\"vec_env.state_space must be a gym Space\")\n if not isinstance(vec_env.action_space, Space):\n raise TypeError(\"vec_env.action_space must be a gym Space\")\n self.observation_space = vec_env.observation_space\n self.action_space = vec_env.action_space\n self.state_space = vec_env.state_space\n self.cfg_train = copy.deepcopy(cfg_train)\n learn_cfg = self.cfg_train[\"learn\"]\n self.device = device\n self.asymmetric = asymmetric\n self.desired_kl = learn_cfg.get(\"desired_kl\", None)\n self.schedule = learn_cfg.get(\"schedule\", \"fixed\")\n self.step_size = learn_cfg[\"optim_stepsize\"]\n self.init_noise_std = learn_cfg.get(\"init_noise_std\", 0.3)\n self.model_cfg = self.cfg_train[\"policy\"]\n self.num_transitions_per_env=learn_cfg[\"nsteps\"]\n self.learning_rate=learn_cfg[\"optim_stepsize\"]\n\n self.clip_param = learn_cfg[\"cliprange\"]\n self.num_learning_epochs = learn_cfg[\"noptepochs\"]\n self.num_mini_batches = learn_cfg[\"nminibatches\"]\n self.value_loss_coef = learn_cfg.get(\"value_loss_coef\", 2.0)\n self.entropy_coef = learn_cfg[\"ent_coef\"]\n self.gamma = learn_cfg[\"gamma\"]\n self.lam = learn_cfg[\"lam\"]\n self.max_grad_norm = learn_cfg.get(\"max_grad_norm\", 2.0)\n self.use_clipped_value_loss = learn_cfg.get(\"use_clipped_value_loss\", False)\n\n # policy type \n self.action_type = self.cfg_train[\"setting\"][\"action_type\"]\n self.sub_action_type = self.cfg_train[\"setting\"][\"sub_action_type\"]\n self.action_clip = self.cfg_train[\"setting\"][\"action_clip\"]\n self.grad_process = self.cfg_train[\"setting\"][\"grad_process\"]\n self.grad_scale = self.cfg_train[\"setting\"][\"grad_scale\"]\n\n if self.action_type=='joint' and self.sub_action_type=='add+jointscale':\n action_space_shape = (18+18,)\n else:\n action_space_shape = self.action_space.shape\n print(f'action_space_shape:{action_space_shape}!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n self.vec_env = vec_env\n self.vec_env.grad_scale = self.grad_scale\n \n pointnet_version = self.cfg_train[\"policy\"][\"pointnet_version\"]\n\n hand_pcl = self.cfg_train[\"policy\"][\"hand_pcl\"]\n hand_model = None\n\n # PPO components\n self.stack_frame_numer = self.vec_env.stack_frame_numbers\n self.actor_critic = ActorCritic(self.observation_space.shape, self.state_space.shape, action_space_shape,\n self.init_noise_std, self.model_cfg, asymmetric=asymmetric, stack_frame_number=self.stack_frame_numer, \n sub_obs_type=self.vec_env.sub_obs_type, num_fingertip=self.vec_env.num_fingertips, pointnet_type=pointnet_version, \n envs=self.vec_env, hand_pcl=hand_pcl, hand_model=hand_model, args=args)\n\n # pointnet backbone\n \n self.pointnet_finetune = self.model_cfg['finetune_pointnet']\n self.finetune_pointnet_bz = 128\n if self.model_cfg['pretrain_pointnet']:\n if pointnet_version == 'pt2':\n pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt2.pt'), map_location=self.device)\n elif pointnet_version == 'pt':\n pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt.pt'), map_location=self.device)\n if self.model_cfg['shared_pointnet']:\n self.actor_critic.pointnet_enc.load_state_dict(pointnet_model_dict)\n if not self.model_cfg['finetune_pointnet']:\n # freeze pointnet\n for name,param in self.actor_critic.pointnet_enc.named_parameters():\n param.requires_grad = False\n else:\n self.actor_critic.actor_pointnet_enc.load_state_dict(pointnet_model_dict)\n self.actor_critic.critic_pointnet_enc.load_state_dict(pointnet_model_dict)\n\n if not self.model_cfg['finetune_pointnet']:\n # freeze pointnet\n for name,param in self.actor_critic.actor_pointnet_enc.named_parameters():\n param.requires_grad = False\n for name,param in self.actor_critic.critic_pointnet_enc.named_parameters():\n param.requires_grad = False\n\n self.actor_critic.to(self.device)\n self.storage = RolloutStorage(self.vec_env.num_envs, self.num_transitions_per_env, self.observation_space.shape,\n self.state_space.shape, action_space_shape, self.device, sampler)\n \n self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.actor_critic.parameters()), lr=self.learning_rate)\n\n ''' SDE '''\n if 'gf' in self.vec_env.sub_obs_type:\n # init SDE config\n self.prior_fn, self.marginal_prob_fn, self.sde_fn = init_sde(\"vp\")\n self.score = CondScoreModel(\n self.marginal_prob_fn,\n hidden_dim=args.hidden_dim,\n embed_dim=args.embed_dim,\n mode=args.score_mode,\n relative=args.relative,\n space=args.space,\n pointnet_version='pt2',\n )\n model_dict = torch.load(os.path.join(args.score_model_path,'score.pt'))\n self.score.load_state_dict(model_dict)\n self.score.to(device)\n self.score.eval()\n self.points_per_object = args.points_per_object\n self.t0 = args.t0\n self.ori_grad = None\n\n ''' Log '''\n # self.log_dir = log_dir\n if self.args.model_dir != \"\" and self.vec_env.mode=='train':\n time_now = self.args.model_dir.split('/')[8].split('_')[0] \n else:\n time_now = time.strftime('%m-%d-%H-%M',time.localtime(time.time()))\n\n self.log_dir = os.path.join(f\"./logs/{args.exp_name}/{time_now}_handrot:{self.vec_env.hand_rotation}_t0:{self.t0}_sfn:{self.vec_env.stack_frame_numbers}_{self.vec_env.num_envs}ne_{len(self.vec_env.shapes_all)}obj_gpt:{self.grad_process}_gs:{self.grad_scale}_at:{self.action_type}_subat:{self.sub_action_type}_rt:{self.vec_env.reward_type}_rn:{self.vec_env.reward_normalize}_simfreq:{self.vec_env.similarity_reward_freq}_cd:{self.vec_env.close_dis}_pts:{pointnet_version}_seed{args.seed}\")\n self.print_log = print_log\n self.writer = SummaryWriter(log_dir=self.log_dir, flush_secs=10)\n self.tot_timesteps = 0\n self.tot_time = 0\n self.is_testing = is_testing\n self.current_learning_iteration = 0\n\n if save_video:\n self.video_log_dir = os.path.join(self.log_dir,'video')\n os.makedirs(self.video_log_dir,exist_ok=True)\n self.vis_env_num = self.args.vis_env_num\n\n self.apply_reset = apply_reset\n\n ''' Evaluation '''\n if 'gf_check' in self.action_type:\n self.eval_round = 20\n else:\n self.eval_round = 5\n\n if self.vec_env.mode == 'eval':\n self.eval_round = self.args.eval_times\n\n if save_state:\n self.eval_metrics = {\n 'obj_shapes':[],\n 'time_step':[],\n 'success_rate':[],\n 'gt_dist':[],\n 'stability':[],\n 'lift_nums':np.zeros(self.vec_env.num_envs),\n 'gf_state_init':[],\n 'gf_state_final':[],\n 'gf_state_gt':[],\n }\n else:\n self.eval_metrics = {\n 'obj_shapes':[],\n 'time_step':[],\n 'success_rate':[],\n 'gt_dist':[],\n 'stability':[],\n 'lift_nums':np.zeros(self.vec_env.num_envs),\n 'obj_translation':[],\n 'obj_cosine_similarity':[],\n }\n self.eval_metrics['obj_shapes'] = self.vec_env.object_types\n\n def test(self, path):\n self.actor_critic.load_state_dict(torch.load(path, map_location=self.device))\n self.actor_critic.eval()\n\n def load(self, path):\n self.actor_critic.load_state_dict(torch.load(path, map_location=self.device))\n self.current_learning_iteration = int(path.split(\"_\")[-1].split(\".\")[0])\n self.actor_critic.train()\n\n model_dir = path[:-len(path.split('/')[-1])] + f\"metric_{self.args.exp_name}_{self.args.seed}.pkl\"\n self.eval_metrics = CPickle.load(open(model_dir, 'rb'))\n\n def save(self, path):\n torch.save(self.actor_critic.state_dict(), path)\n \n def eval(self, it):\n # eval initilization\n self.vec_env.eval(vis=save_video)\n test_times = 0\n success_times = 0 # total_success_times / total_trials\n success_rates = [] # s_rate for each round\n reward_all = []\n if 'gf_check' in self.action_type:\n total_diff_direction_num = 0\n total_dof_error = 0\n diff_joint_num = torch.zeros(18,device=self.device)\n \n if self.vec_env.mode == 'train':\n save_time = 0 # means save all videos\n else:\n save_time = self.eval_round - 1\n\n # start evaluation\n with tqdm(total=self.eval_round) as pbar:\n pbar.set_description('Validating:')\n with torch.no_grad():\n for r in range(self.eval_round) :\n if save_video and r<=save_time:\n all_images = torch.tensor([],device=self.device)\n # reset env\n current_obs = self.vec_env.reset()['obs']\n current_states = self.vec_env.get_state()\n eval_done_envs = torch.zeros(self.vec_env.num_envs, dtype=torch.long, device=self.device)\n\n if save_state:\n self.eval_metrics['gf_state_init'].append(self.vec_env.get_states(gf_state=True))\n self.eval_metrics['gf_state_gt'].append(self.vec_env.target_hand_dof)\n\n # step\n while True :\n # Compute the action\n actions, grad = self.compute_action(current_obs=current_obs,mode='eval')\n # print(grad)\n step_actions = self.process_actions(actions=actions.clone(), grad=grad)\n # primitive_actions.append(torch.mean(grad).item())\n # all_actions.append(torch.mean(step_actions).item())\n if self.vec_env.progress_buf[0] == 49 and save_state:\n self.eval_metrics['gf_state_final'].append(self.vec_env.get_states(gf_state=True))\n\n # Step the vec_environment\n next_obs, rews, dones, infos = self.vec_env.step(step_actions, (actions,grad))\n\n if save_video and r<=save_time:\n image = self.vec_env.render(rgb=True, img_size=img_size, vis_env_num=self.vis_env_num).reshape(self.vis_env_num, 1, img_size, img_size, 3)\n all_images = torch.cat([all_images, image],1)\n current_obs.copy_(next_obs['obs'])\n\n # done\n new_done_env_ids = (dones&(1-eval_done_envs)).nonzero(as_tuple=False).squeeze(-1)\n if len(new_done_env_ids) > 0:\n if self.vec_env.disable_collision:\n print('-----------------------------------')\n print('no coll succ:', infos['success_num'])\n self.vec_env.grasp_filter(states=self.eval_metrics['gf_state_final'][r], test_time=1, reset_coll=True)\n \n self.eval_metrics['time_step'].append(it)\n self.eval_metrics['success_rate'].append(float(infos['success_rate'].cpu().numpy()))\n # self.eval_metrics['obj_translation'].append(float(infos['obj_translation'].cpu().numpy()))\n # self.eval_metrics['obj_cosine_similarity'].append(float(infos['obj_cosine_similarity'].cpu().numpy()))\n self.eval_metrics['gt_dist'].append(float(infos['gt_dist'].cpu().numpy()))\n self.eval_metrics['lift_nums']+=infos['lift_nums'].cpu().numpy()\n if self.vec_env.mode == 'eval':\n with open(f'logs/{self.args.exp_name}/metrics_{self.args.eval_name}_eval_{self.args.seed}.pkl', 'wb') as f: \n pickle.dump(self.eval_metrics, f)\n else:\n with open(os.path.join(self.log_dir, f'metric_{self.args.exp_name}_{self.args.seed}.pkl'), 'wb') as f: \n pickle.dump(self.eval_metrics, f)\n\n if 'gf_check' in self.action_type:\n final_hand_dof = self.vec_env.final_hand_dof\n target_hand_dof = self.vec_env.target_hand_dof\n diff_direction_ids = ((self.vec_env.final_hand_dof * self.vec_env.target_hand_dof)<0).nonzero() \n same_direction_ids = ((self.vec_env.final_hand_dof * self.vec_env.target_hand_dof)>0).nonzero() \n for mm in range(18):\n diff_joint_num[mm] += torch.sum(diff_direction_ids[:,1]==mm) \n print(len(diff_direction_ids)/self.vec_env.num_envs)\n print(diff_joint_num)\n dof_error = torch.mean(abs(target_hand_dof[same_direction_ids[:,0],same_direction_ids[:,1]] - final_hand_dof[same_direction_ids[:,0],same_direction_ids[:,1]]))\n print(dof_error)\n total_diff_direction_num+=(len(diff_direction_ids)/self.vec_env.num_envs)\n total_dof_error+=(dof_error)\n\n if r > save_time:\n self.vec_env.graphics_device_id = -1\n self.vec_env.enable_camera_sensors = False\n\n if save_video and r<=save_time:\n for (i,images) in enumerate(all_images):\n obj_type = self.vec_env.object_type_per_env[i]\n save_path = os.path.join(self.video_log_dir,f'{obj_type}_epoach:{it}_round:{r}')\n images_to_video(path=save_path, images=images.cpu().numpy(), size=(img_size,img_size))\n\n test_times += len(new_done_env_ids)\n success_times += infos['success_num']\n reward_all.extend(rews[new_done_env_ids].cpu().numpy())\n eval_done_envs[new_done_env_ids] = 1\n print(f'eval_success_rate: {success_times/test_times}')\n success_rates.append(infos['success_num'] / len(new_done_env_ids))\n\n if test_times==(r+1)*self.vec_env.num_envs:\n break\n pbar.update(1)\n if 'gf_check' in self.action_type:\n print(f'total_diff_direction_num:{total_diff_direction_num/self.eval_round}')\n print(f'total_dof_error:{total_dof_error/self.eval_round}')\n\n assert test_times==self.eval_round*self.vec_env.num_envs\n success_rates = torch.tensor(success_rates)\n sr_mu, sr_std = success_rates.mean().cpu().numpy().item(), success_rates.std().cpu().numpy().item()\n print(f'====== t0: {self.t0} || num_envs: {self.vec_env.num_envs} || eval_times: {self.eval_round}')\n print(f'eval_success_rate: {sr_mu:.2f} +- {sr_std:.2f}')\n eval_rews = np.mean(reward_all)\n print(f'eval_rewards: {eval_rews}')\n self.writer.add_scalar('Eval/success_rate', sr_mu, it)\n self.writer.add_scalar('Eval/eval_rews', eval_rews, it)\n\n def run(self, num_learning_iterations, log_interval=1):\n if self.is_testing:\n self.eval(0)\n else:\n # train initilization\n self.actor_critic.train()\n self.vec_env.train()\n rewbuffer = deque(maxlen=100)\n lenbuffer = deque(maxlen=100)\n cur_reward_sum = torch.zeros(self.vec_env.num_envs, dtype=torch.float, device=self.device)\n cur_episode_length = torch.zeros(self.vec_env.num_envs, dtype=torch.float, device=self.device)\n reward_sum = []\n episode_length = []\n\n # reset env\n current_obs = self.vec_env.reset()['obs']\n current_states = self.vec_env.get_state()\n for it in range(self.current_learning_iteration, num_learning_iterations):\n start = time.time()\n ep_infos = []\n if 'ori_similarity' in self.vec_env.reward_type:\n ori_sim_all = []\n # Rollout\n for _ in range(self.num_transitions_per_env):\n if self.apply_reset:\n current_obs = self.vec_env.reset()['obs']\n current_states = self.vec_env.get_state()\n\n # Compute the action\n actions, actions_log_prob, values, mu, sigma, grad = self.compute_action(current_obs=current_obs, current_states=current_states)\n step_actions = self.process_actions(actions=actions.clone(), grad=grad)\n\n # Step the vec_environment\n next_obs, rews, dones, infos = self.vec_env.step(step_actions, (actions,grad))\n\n next_states = self.vec_env.get_state()\n\n # Record the transition\n self.storage.add_transitions(current_obs, current_states, actions, rews, dones, values, actions_log_prob, mu, sigma)\n current_obs.copy_(next_obs['obs'])\n current_states.copy_(next_states)\n\n # Book keeping\n ep_infos.append(infos.copy())\n # set_trace()\n if 'ori_similarity' in self.vec_env.reward_type:\n ori_sim_all.append(torch.mean(infos['ori_similarity']))\n # self.writer.add_scalar('Episode/ori_sim_all', torch.mean(infos['ori_similarity']), _)\n\n if self.print_log:\n cur_reward_sum[:] += rews\n cur_episode_length[:] += 1\n\n new_ids = (dones > 0).nonzero(as_tuple=False)\n reward_sum.extend(cur_reward_sum[new_ids][:, 0].cpu().numpy().tolist())\n episode_length.extend(cur_episode_length[new_ids][:, 0].cpu().numpy().tolist())\n cur_reward_sum[new_ids] = 0\n cur_episode_length[new_ids] = 0\n \n # done\n if torch.sum(dones) > 0:\n current_obs = self.vec_env.reset(dones)['obs']\n current_states = self.vec_env.get_state()\n print(infos['success_rate'])\n if 'ori_similarity' in self.vec_env.reward_type:\n fig = plt.figure()\n plt.plot(torch.tensor(ori_sim_all).cpu().numpy())\n ori_sim_all_img = get_img_from_fig(fig, dpi=100)\n # ori_sim_all_img = cv2.resize(ori_sim_all_img,(256,256))\n self.writer.add_image(\"ori_sim\", ori_sim_all_img, it, dataformats='HWC')\n\n if self.print_log:\n # reward_sum = [x[0] for x in reward_sum]\n # episode_length = [x[0] for x in episode_length]\n rewbuffer.extend(reward_sum)\n lenbuffer.extend(episode_length)\n\n _, _, last_values, _, _, _ = self.compute_action(current_obs=current_obs, current_states=current_states, mode='train')\n stop = time.time()\n collection_time = stop - start\n mean_trajectory_length, mean_reward = self.storage.get_statistics()\n\n # Learning step\n start = stop\n self.storage.compute_returns(last_values, self.gamma, self.lam)\n mean_value_loss, mean_surrogate_loss = self.update()\n self.storage.clear()\n stop = time.time()\n learn_time = stop - start\n if self.print_log:\n self.log(locals())\n if it % log_interval == 0:\n self.actor_critic.eval()\n self.eval(it)\n self.actor_critic.train()\n self.vec_env.train()\n self.save(os.path.join(self.log_dir, 'model_{}.pt'.format(it)))\n\n current_obs = self.vec_env.reset()['obs']\n current_states = self.vec_env.get_state()\n cur_episode_length[:] = 0\n # TODO clean extras\n ep_infos.clear()\n self.save(os.path.join(self.log_dir, 'model_{}.pt'.format(num_learning_iterations)))\n\n def log(self, locs, width=70, pad=35):\n self.tot_timesteps += self.num_transitions_per_env * self.vec_env.num_envs\n self.tot_time += locs['collection_time'] + locs['learn_time']\n iteration_time = locs['collection_time'] + locs['learn_time']\n\n ep_string = f''\n if locs['ep_infos']:\n for key in locs['ep_infos'][0]:\n infotensor = torch.tensor([], device=self.device)\n for ep_info in locs['ep_infos']:\n infotensor = torch.cat((infotensor, ep_info[key].to(self.device)))\n if key=='success_num':\n value = torch.sum(infotensor)\n self.writer.add_scalar('Episode/' + 'total_success_num', value, locs['it'])\n ep_string += f\"\"\"{f'Total episode {key}:':>{pad}} {value:.4f}\\n\"\"\"\n value = torch.mean(infotensor)\n self.writer.add_scalar('Episode/' + key, value, locs['it'])\n ep_string += f\"\"\"{f'Mean episode {key}:':>{pad}} {value:.4f}\\n\"\"\"\n mean_std = self.actor_critic.log_std.exp().mean()\n\n self.writer.add_scalar('Loss/value_function', locs['mean_value_loss'], locs['it'])\n self.writer.add_scalar('Loss/surrogate', locs['mean_surrogate_loss'], locs['it'])\n self.writer.add_scalar('Policy/mean_noise_std', mean_std.item(), locs['it'])\n if len(locs['rewbuffer']) > 0:\n self.writer.add_scalar('Train/mean_reward', statistics.mean(locs['rewbuffer']), locs['it'])\n self.writer.add_scalar('Train/mean_episode_length', statistics.mean(locs['lenbuffer']), locs['it'])\n self.writer.add_scalar('Train/mean_reward/time', statistics.mean(locs['rewbuffer']), self.tot_time)\n self.writer.add_scalar('Train/mean_episode_length/time', statistics.mean(locs['lenbuffer']), self.tot_time)\n\n self.writer.add_scalar('Train2/mean_reward/step', locs['mean_reward'], locs['it'])\n self.writer.add_scalar('Train2/mean_episode_length/episode', locs['mean_trajectory_length'], locs['it'])\n\n fps = int(self.num_transitions_per_env * self.vec_env.num_envs / (locs['collection_time'] + locs['learn_time']))\n\n str = f\" \\033[1m Learning iteration {locs['it']}/{locs['num_learning_iterations']} \\033[0m \"\n\n if len(locs['rewbuffer']) > 0:\n log_string = (f\"\"\"{'#' * width}\\n\"\"\"\n f\"\"\"{str.center(width, ' ')}\\n\\n\"\"\"\n f\"\"\"{'Computation:':>{pad}} {fps:.0f} steps/s (collection: {locs[\n 'collection_time']:.3f}s, learning {locs['learn_time']:.3f}s)\\n\"\"\"\n f\"\"\"{'Value function loss:':>{pad}} {locs['mean_value_loss']:.4f}\\n\"\"\"\n f\"\"\"{'Surrogate loss:':>{pad}} {locs['mean_surrogate_loss']:.4f}\\n\"\"\"\n f\"\"\"{'Mean action noise std:':>{pad}} {mean_std.item():.2f}\\n\"\"\"\n f\"\"\"{'Mean reward:':>{pad}} {statistics.mean(locs['rewbuffer']):.2f}\\n\"\"\"\n f\"\"\"{'Mean episode length:':>{pad}} {statistics.mean(locs['lenbuffer']):.2f}\\n\"\"\"\n f\"\"\"{'Mean reward/step:':>{pad}} {locs['mean_reward']:.2f}\\n\"\"\"\n f\"\"\"{'Mean episode length/episode:':>{pad}} {locs['mean_trajectory_length']:.2f}\\n\"\"\")\n else:\n log_string = (f\"\"\"{'#' * width}\\n\"\"\"\n f\"\"\"{str.center(width, ' ')}\\n\\n\"\"\"\n f\"\"\"{'Computation:':>{pad}} {fps:.0f} steps/s (collection: {locs[\n 'collection_time']:.3f}s, learning {locs['learn_time']:.3f}s)\\n\"\"\"\n f\"\"\"{'Value function loss:':>{pad}} {locs['mean_value_loss']:.4f}\\n\"\"\"\n f\"\"\"{'Surrogate loss:':>{pad}} {locs['mean_surrogate_loss']:.4f}\\n\"\"\"\n f\"\"\"{'Mean action noise std:':>{pad}} {mean_std.item():.2f}\\n\"\"\"\n f\"\"\"{'Mean reward/step:':>{pad}} {locs['mean_reward']:.2f}\\n\"\"\"\n f\"\"\"{'Mean episode length/episode:':>{pad}} {locs['mean_trajectory_length']:.2f}\\n\"\"\")\n\n log_string += ep_string\n log_string += (f\"\"\"{'-' * width}\\n\"\"\"\n f\"\"\"{'Total timesteps:':>{pad}} {self.tot_timesteps}\\n\"\"\"\n f\"\"\"{'Iteration time:':>{pad}} {iteration_time:.2f}s\\n\"\"\"\n f\"\"\"{'Total time:':>{pad}} {self.tot_time:.2f}s\\n\"\"\"\n f\"\"\"{'ETA:':>{pad}} {self.tot_time / (locs['it'] + 1) * (\n locs['num_learning_iterations'] - locs['it']):.1f}s\\n\"\"\")\n print(log_string)\n\n def update(self):\n mean_value_loss = 0\n mean_surrogate_loss = 0\n\n batch = self.storage.mini_batch_generator(self.num_mini_batches)\n\n for epoch in range(self.num_learning_epochs):\n # for obs_batch, actions_batch, target_values_batch, advantages_batch, returns_batch, old_actions_log_prob_batch \\\n # in self.storage.mini_batch_generator(self.num_mini_batches):\n\n for indices in batch:\n # print(len(indices))\n\n obs_batch = self.storage.observations.view(-1, *self.storage.observations.size()[2:])[indices]\n if self.asymmetric:\n states_batch = self.storage.states.view(-1, *self.storage.states.size()[2:])[indices]\n else:\n states_batch = None\n actions_batch = self.storage.actions.view(-1, self.storage.actions.size(-1))[indices]\n target_values_batch = self.storage.values.view(-1, 1)[indices]\n returns_batch = self.storage.returns.view(-1, 1)[indices]\n old_actions_log_prob_batch = self.storage.actions_log_prob.view(-1, 1)[indices]\n advantages_batch = self.storage.advantages.view(-1, 1)[indices]\n old_mu_batch = self.storage.mu.view(-1, self.storage.actions.size(-1))[indices]\n old_sigma_batch = self.storage.sigma.view(-1, self.storage.actions.size(-1))[indices]\n\n actions_log_prob_batch, entropy_batch, value_batch, mu_batch, sigma_batch = self.actor_critic.evaluate(obs_batch,\n states_batch,\n actions_batch)\n\n # KL\n if self.desired_kl != None and self.schedule == 'adaptive':\n\n kl = torch.sum(\n sigma_batch - old_sigma_batch + (torch.square(old_sigma_batch.exp()) + torch.square(old_mu_batch - mu_batch)) / (2.0 * torch.square(sigma_batch.exp())) - 0.5, axis=-1)\n kl_mean = torch.mean(kl)\n\n if kl_mean > self.desired_kl * 2.0:\n self.step_size = max(1e-5, self.step_size / 1.5)\n elif kl_mean < self.desired_kl / 2.0 and kl_mean > 0.0:\n self.step_size = min(1e-2, self.step_size * 1.5)\n\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.step_size\n\n # Surrogate loss\n ratio = torch.exp(actions_log_prob_batch - torch.squeeze(old_actions_log_prob_batch))\n surrogate = -torch.squeeze(advantages_batch) * ratio\n surrogate_clipped = -torch.squeeze(advantages_batch) * torch.clamp(ratio, 1.0 - self.clip_param,\n 1.0 + self.clip_param)\n surrogate_loss = torch.max(surrogate, surrogate_clipped).mean()\n\n # Value function loss\n if self.use_clipped_value_loss:\n value_clipped = target_values_batch + (value_batch - target_values_batch).clamp(-self.clip_param,\n self.clip_param)\n value_losses = (value_batch - returns_batch).pow(2)\n value_losses_clipped = (value_clipped - returns_batch).pow(2)\n value_loss = torch.max(value_losses, value_losses_clipped).mean()\n else:\n value_loss = (returns_batch - value_batch).pow(2).mean()\n\n loss = surrogate_loss + self.value_loss_coef * value_loss - self.entropy_coef * entropy_batch.mean()\n\n # Gradient step\n self.optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)\n self.optimizer.step()\n\n mean_value_loss += value_loss.item()\n mean_surrogate_loss += surrogate_loss.item()\n\n num_updates = self.num_learning_epochs * self.num_mini_batches\n mean_value_loss /= num_updates\n mean_surrogate_loss /= num_updates\n\n return mean_value_loss, mean_surrogate_loss\n\n '''\n utils\n '''\n def grad_norm(self,grad):\n scale_grad = (torch.max((abs(grad)),dim=1)[0]).reshape(-1,1).expand_as(grad)\n grad = grad/scale_grad\n return grad\n \n \n def action2grad(self, x, inv=False, relative=True, cur_x=None):\n if not inv:\n batch_size = x.size(0)\n state_dim = x.size(1)\n x = torch.cat([torch.sin(x).reshape(batch_size,state_dim,1), torch.cos(x).reshape(batch_size,state_dim,1)],2).reshape(batch_size,-1)\n return x\n else:\n batch_size = x.size(0)\n state_dim = x.size(1)\n x = x.reshape(batch_size,int(state_dim/2),2)\n cur_x = cur_x.reshape(batch_size,int(state_dim/2),2)\n\n cur_x = torch.cat([-cur_x[:,:,0:1], cur_x[:,:,1:2]],dim=-1)\n ori_grad = torch.sum(torch.cat([x[:,:,1:2], x[:,:,0:1]], dim=-1) * cur_x, dim=-1, keepdim=True).reshape(batch_size,int(state_dim/2))\n return ori_grad\n \n def get_obs_with_grad(self, current_obs, reset=False, t=None):\n # compute score\n B = current_obs.size(0)\n cur_hand_dof = current_obs[:,:18].clone() #【-1,1】\n pcl_index = self.stack_frame_numer*7 + 18\n cur_obj_pcl = current_obs[:,pcl_index:self.points_per_object*3+pcl_index].clone().reshape(-1, 3, self.points_per_object)\n\n if reset:\n with torch.no_grad(): \n in_process_sample, res = cond_ode_sampler(\n self.score,\n self.prior_fn,\n self.sde_fn,\n (cur_hand_dof, cur_obj_pcl),\n t0=0.5,\n device=self.device,\n num_steps=51,\n batch_size=B,\n space=self.args.space,\n )\n goal_pose = in_process_sample[-1,:,:]\n return goal_pose\n else:\n if self.args.space == 'riemann':\n if 'direct' in self.args.score_model_path:\n cur_hand_dof = self.vec_env.dof_norm(cur_hand_dof,inv=True)\n cur_hand_dof = self.action2grad(cur_hand_dof)\n\n if t is None:\n batch_time_step = torch.ones(B, device=self.device).unsqueeze(1) * self.t0\n else:\n t_max = 0.5\n t_min = 1e-5\n t = torch.tanh(t) * (t_max - t_min) / 2 + (t_max + t_min)/2\n batch_time_step = torch.clamp(t.reshape(B,-1), 1e-5, 0.5)\n self.vec_env.extras['t_value'] = torch.mean(abs(batch_time_step),-1)\n\n if self.args.space == 'riemann':\n grad = torch.zeros(B,36,device=self.device)\n elif self.args.space == 'euler':\n grad = torch.zeros(B,18,device=self.device)\n\n bz = 256\n iter_num = int(np.ceil(B/bz))\n\n for order in range(iter_num):\n with torch.no_grad(): \n if self.args.space == 'riemann':\n grad[order*bz:(order+1)*bz,:36] = self.score((cur_hand_dof[order*bz:(order+1)*bz,:], cur_obj_pcl[order*bz:(order+1)*bz,:]), batch_time_step[order*bz:(order+1)*bz,:]).detach()\n elif self.args.space == 'euler': \n grad[order*bz:(order+1)*bz,:18] = self.score((cur_hand_dof[order*bz:(order+1)*bz,:], cur_obj_pcl[order*bz:(order+1)*bz,:]), batch_time_step[order*bz:(order+1)*bz,:]).detach()\n\n if self.args.space == 'riemann':\n grad = self.action2grad(grad, inv=True, cur_x=cur_hand_dof)\n\n if 'pure_ori_similarity' in self.vec_env.reward_type:\n self.ori_grad = grad.clone()\n\n if 'direct' not in self.args.score_model_path:\n #denormalize to dof original range\n grad = grad * self.vec_env.shadow_hand_dof_range[self.vec_env.actuated_dof_indices] / 2\n\n if self.grad_process is not None:\n if 'norm' in self.grad_process:\n grad = self.grad_norm(grad)\n if 'clip' in self.grad_process:\n grad = torch.clamp(grad,-self.grad_scale,self.grad_scale)\n if 'scale' in self.grad_process:\n grad = grad * self.grad_scale\n\n if 'pure_ori_similarity' not in self.vec_env.reward_type:\n self.ori_grad = grad.clone()\n\n if self.action_type != 'controlt':\n current_obs[:,-18:] = grad\n\n # print(grad[0])\n return current_obs, grad\n \n def process_actions(self, actions, grad):\n if self.action_type=='joint':\n if self.sub_action_type=='add+jointscale':\n self.vec_env.extras['grad_ss_mean'] = torch.mean(abs(actions[:,:18]),-1)\n self.vec_env.extras['grad_ss_std'] = torch.std(abs(actions[:,:18]),-1)\n self.vec_env.extras['residual_mean'] = torch.mean(abs(actions[:,18:]),-1)\n self.vec_env.extras['residual_std'] = torch.std(abs(actions[:,18:]),-1)\n step_actions = grad*actions[:,:18] + actions[:,18:]\n else:\n step_actions = actions*grad\n elif self.action_type=='direct':\n step_actions = actions\n elif 'gf' in self.action_type:\n step_actions = grad\n return step_actions\n\n def compute_action(self, current_obs, current_states=None, mode='train'):\n # compute gf\n if 'gf' in self.vec_env.sub_obs_type:\n current_obs, grad = self.get_obs_with_grad(current_obs)\n else:\n grad = torch.zeros((current_obs.size(0),18), device=self.device)\n\n if self.pointnet_finetune:\n batch_num = current_obs.size(0)//self.finetune_pointnet_bz + 1\n for _ in range(batch_num):\n current_obs_batch = current_obs[self.finetune_pointnet_bz*_:self.finetune_pointnet_bz*(_+1),:]\n # current_states_batch = current_states[:,self.finetune_pointnet_bz*batch_num+self.finetune_pointnet_bz*(batch_num+1)]\n if mode=='train':\n actions_batch, actions_log_prob_batch, values_batch, mu_batch, sigma_batch = self.actor_critic.act(current_obs_batch, current_states)\n else:\n actions_batch = self.actor_critic.act_inference(current_obs_batch)\n if _ == 0:\n if mode=='train':\n actions, actions_log_prob, values, mu, sigma = actions_batch, actions_log_prob_batch, values_batch, mu_batch, sigma_batch\n else:\n actions = actions_batch\n else:\n if mode=='train':\n actions = torch.cat([actions, actions_batch])\n actions_log_prob = torch.cat([actions_log_prob,actions_log_prob_batch])\n values = torch.cat([values,values_batch])\n mu = torch.cat([mu, mu_batch])\n sigma = torch.cat([sigma, sigma_batch])\n else:\n actions = torch.cat([actions, actions_batch])\n else:\n if mode=='train':\n actions, actions_log_prob, values, mu, sigma = self.actor_critic.act(current_obs, current_states)\n else:\n actions = self.actor_critic.act_inference(current_obs)\n\n if mode=='train':\n return actions, actions_log_prob, values, mu, sigma, grad\n else:\n return actions, grad"
},
{
"identifier": "load_cfg",
"path": "utils/config.py",
"snippet": "def load_cfg(args):\n with open(os.path.join(os.path.dirname(__file__), '../ConDexEnv/condexenvs/cfg/train/', args.cfg_train+'.yaml'), 'r') as f:\n cfg_train = yaml.load(f, Loader=yaml.SafeLoader)\n\n logdir = args.logdir\n\n # Set deterministic mode\n if args.torch_deterministic:\n cfg_train[\"torch_deterministic\"] = True\n\n # Override seed if passed on the command line\n if args.seed is not None:\n cfg_train[\"seed\"] = args.seed\n\n log_id = args.logdir + \"_{}\".format(args.experiment)\n\n logdir = os.path.realpath(log_id)\n # os.makedirs(logdir, exist_ok=True)\n\n return cfg_train, logdir"
},
{
"identifier": "get_args",
"path": "utils/config.py",
"snippet": "def get_args(benchmark=False, use_rlg_config=False):\n custom_parameters = [\n \n # env \n {\"name\": \"--headless\", \"action\": \"store_true\", \"default\": False, \"help\": \"Force display off at all times\"},\n {\"name\": \"--rl_device\", \"type\": str, \"default\": \"cuda:1\", \"help\": \"Choose CPU or GPU device for inferencing policy network\"},\n {\"name\": \"--randomize\", \"action\": \"store_true\", \"default\": False, \"help\": \"Apply physics domain randomization\"},\n {\"name\": \"--num_envs\", \"type\": int, \"default\": 2, \"help\": \"Number of environments to create - override config file\"},\n {\"name\": \"--episode_length\", \"type\": int, \"default\": 0, \"help\": \"Episode length, by default is read from yaml config\"},\n {\"name\": \"--seed\", \"type\": int, \"help\": \"Random seed\"},\n {\"name\": \"--points_per_object\", \"type\": int, \"default\": 1024, \"help\": \"points for each object pcl\"},\n {\"name\": \"--method\", \"type\": str, \"default\": \"gf+rl\", \"help\": \"method\"},\n {\"name\": \"--run_device_id\", \"type\": int, \"help\": \"device id\"},\n {\"name\": \"--dataset_type\", \"type\": str, \"default\": \"train\", \"help\": \"method\"},\n # mode\n {\"name\": \"--mode\", \"type\": str, \"default\": \"train\", \"help\": \"env_mode\"},\n {\"name\": \"--test\", \"action\": \"store_true\", \"default\": False, \"help\": \"Run trained policy, no training\"},\n {\"name\": \"--eval_times\", \"type\": int, \"default\": 5, \"help\": \"eval times for each object\"},\n {\"name\": \"--constrained\", \"action\": \"store_true\", \"help\": \"whether constrain base\"},\n \n # score matching parameter\n {\"name\": \"--sigma\", \"type\": float, \"default\": 25, \"help\": \"eval times for each object\"},\n {\"name\": \"--t0\", \"type\": float, \"default\": 0.1, \"help\": \"t0 for sample\"},\n {\"name\": \"--hidden_dim\", \"type\": int, \"default\": 1024, \"help\": \"num of hidden dim\"},\n {\"name\": \"--embed_dim\", \"type\": int, \"default\": 512, \"help\": \"num of embed_dim\"},\n {\"name\": \"--score_mode\", \"type\": str, \"default\": \"target\", \"help\": \"score mode\"},\n {\"name\": \"--space\", \"type\": str, \"default\": \"riemann\", \"help\": \"angle space\"},\n {\"name\": \"--relative\", \"action\": \"store_false\", \"help\": \"relative pcl representation\"},\n {\"name\": \"--score_model_path\", \"type\": str, \"default\": \"./logs/train_all_rel_p2cuda_v_2e-4_2\", \"help\": \"pretrain score model path\"},\n # rl train \n {\"name\": \"--torch_deterministic\", \"action\": \"store_true\", \"default\": False, \"help\": \"Apply additional PyTorch settings for more deterministic behaviour\"},\n {\"name\": \"--metadata\", \"action\": \"store_true\", \"default\": False, \"help\": \"Requires --experiment flag, adds physics engine, sim device, pipeline info and if domain randomization is used to the experiment name provided by user\"},\n {\"name\": \"--resume\", \"type\": int, \"default\": 0, \"help\": \"Resume training or start testing from a checkpoint\"},\n {\"name\": \"--cfg_train\", \"type\": str, \"default\": \"ShadowHandConPPO\"},\n {\"name\": \"--max_iterations\", \"type\": int, \"default\": 0, \"help\": \"Set a maximum number of training iterations\"},\n {\"name\": \"--minibatch_size\", \"type\": int, \"default\": -1, \"help\": \"Set batch size for PPO optimization step. Supported only by rl_games. If not -1 overrides the config settings.\"},\n # log\n {\"name\": \"--logdir\", \"type\": str, \"default\": \"logs/gfppo/\"}, \n {\"name\": \"--experiment\", \"type\": str, \"default\": \"Base\", \"help\": \"Experiment name. If used with --metadata flag an additional information about physics engine, sim device, pipeline and domain randomization will be added to the name\"},\n {\"name\": \"--model_dir\", \"type\": str, \"default\": \"\", \"help\": \"Choose a model dir\"},\n {\"name\": \"--exp_name\", \"type\": str, \"default\": \"ours\", \"help\": \"exp_name\"},\n {\"name\": \"--eval_name\", \"type\": str, \"default\": \"ours\", \"help\": \"exp_name\"},\n {\"name\": \"--vis_env_num\", \"type\": int, \"default\": \"0\", \"help\": \"vis env num\"},\n ]\n \n\n # parse arguments\n args = gymutil.parse_arguments(\n description=\"RL Policy\",\n custom_parameters=custom_parameters)\n\n # allignment with examples\n args.device_id = args.compute_device_id\n args.device = args.sim_device_type if args.use_gpu_pipeline else 'cpu'\n\n if args.test:\n args.train = False\n else:\n args.train = True\n\n return args"
},
{
"identifier": "set_np_formatting",
"path": "utils/config.py",
"snippet": "def set_np_formatting():\n np.set_printoptions(edgeitems=30, infstr='inf',\n linewidth=4000, nanstr='nan', precision=2,\n suppress=False, threshold=10000, formatter=None)"
}
] | import isaacgym
import condexenvs
import torch
import os
import sys
from Algorithms.ppo import GFPPO
from utils.config import load_cfg, get_args, set_np_formatting | 10,617 | sys.path.append(os.path.dirname(os.path.dirname(__file__)))
if __name__ == '__main__':
set_np_formatting()
args = get_args()
| sys.path.append(os.path.dirname(os.path.dirname(__file__)))
if __name__ == '__main__':
set_np_formatting()
args = get_args() | cfg_train, logdir = load_cfg(args) | 1 | 2023-11-09 06:08:40+00:00 | 12k |
ApolloAuto/apollo-model-centerpoint | paddle3d/models/detection/smoke/smoke.py | [
{
"identifier": "manager",
"path": "paddle3d/apis/manager.py",
"snippet": "class ComponentManager:\n def __init__(self, *, name: str, description: str = ''):\n def __len__(self):\n def __repr__(self):\n def __getitem__(self, item: str):\n def components_dict(self) -> dict:\n def name(self) -> str:\n def description(self) -> str:\n def _add_single_component(self, component: Callable):\n def add_component(self, components: Union[Callable, Iterable[Callable]]\n ) -> Union[Callable, Iterable[Callable]]:\nVOXEL_ENCODERS = ComponentManager(name=\"voxel_encoders\")\nMIDDLE_ENCODERS = ComponentManager(name=\"middle_encoders\")\nBACKBONES = ComponentManager(name=\"backbones\")\nMODELS = ComponentManager(name=\"models\")\nNECKS = ComponentManager(name=\"necks\")\nHEADS = ComponentManager(name=\"heads\")\nLOSSES = ComponentManager(name=\"losses\")\nDATASETS = ComponentManager(name=\"datasets\")\nTRANSFORMS = ComponentManager(name=\"transforms\")\nLR_SCHEDULERS = ComponentManager(name=\"lr_schedulers\")\nOPTIMIZERS = ComponentManager(name=\"optimizers\")\nVOXELIZERS = ComponentManager(name=\"voxelizers\")\nPOINT_ENCODERS = ComponentManager(name=\"point_encoders\")\nPOSITIONAL_ENCODING = ComponentManager(name=\"POSITIONAL_ENCODING\")\nTRANSFORMERS = ComponentManager(name=\"TRANSFORMERS\")\nTRANSFORMER_ENCODERS = ComponentManager(name=\"TRANSFORMER_ENCODERS\")\nTRANSFORMER_ENCODER_LAYERS = ComponentManager(name=\"TRANSFORMER_ENCODER_LAYERS\")\nATTENTIONS = ComponentManager(name=\"ATTENTIONS\")\nBBOX_CODERS = ComponentManager(name=\"BBOX_CODERS\")\nBBOX_ASSIGNERS = ComponentManager(name=\"BBOX_ASSIGNERS\")\nMATCH_COSTS = ComponentManager(name=\"MATCH_COSTS\")\nBBOX_SAMPLERS = ComponentManager(name=\"BBOX_SAMPLERS\")\nTRANSFORMER_DECODER_LAYERS = ComponentManager(name=\"TRANSFORMER_DECODER_LAYERS\")\nTRANSFORMER_DECODERS = ComponentManager(name=\"TRANSFORMER_DECODERS\")"
},
{
"identifier": "BBoxes2D",
"path": "paddle3d/geometries/bbox.py",
"snippet": "class BBoxes2D(_Structure):\n \"\"\"\n \"\"\"\n\n def __init__(self, data: np.ndarray):\n if not isinstance(data, np.ndarray):\n data = np.array(data)\n\n if data.ndim != 2:\n raise ValueError('Illegal 2D box data with number of dim {}'.format(\n data.ndim))\n\n if data.shape[1] != 4:\n raise ValueError('Illegal 2D box data with shape {}'.format(\n data.shape))\n\n def scale(self, factor: float):\n ...\n\n def translate(self, translation: np.ndarray):\n ...\n\n def rotate(self, rotation: np.ndarray):\n ...\n\n def horizontal_flip(self, image_width: float):\n \"\"\"\n The inputs are pixel indices, they are flipped by `(W - 1 - x, H - 1 - y)`.\n \"\"\"\n self[:, 0] = image_width - self[:, 0] - 1\n\n def horizontal_flip_coords(self, image_width: float):\n \"\"\"\n The inputs are floating point coordinates, they are flipped by `(W - x, H - y)`.\n \"\"\"\n self[:, 0], self[:,\n 2] = image_width - self[:, 2], image_width - self[:, 0]\n\n def vertical_flip(self, image_height: float):\n self[:, 1] = image_height - self[:, 1] - 1\n\n def resize(self, h: int, w: int, newh: int, neww: int):\n factor_x = neww / w\n factor_y = newh / h\n self[:, 0::2] *= factor_x\n self[:, 1::2] *= factor_y"
},
{
"identifier": "BBoxes3D",
"path": "paddle3d/geometries/bbox.py",
"snippet": "class BBoxes3D(_Structure):\n \"\"\"\n \"\"\"\n\n def __init__(self,\n data: np.ndarray,\n coordmode: CoordMode = 0,\n velocities: List[float] = None,\n origin: List[float] = [0.5, 0.5, 0.5],\n rot_axis: int = 2):\n if not isinstance(data, np.ndarray):\n data = np.array(data)\n\n self.coordmode = coordmode\n self.velocities = velocities\n self.origin = origin\n self.rot_axis = rot_axis\n\n @property\n def corners_3d(self):\n # corners_3d format: x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0\n dx, dy, dz = self[:, 3:6].T\n b = dz.shape[0]\n\n x_corners = np.array([[0., 0., 0., 0., 1., 1., 1., 1.]],\n self.dtype).repeat(\n b, axis=0)\n y_corners = np.array([[0., 0., 1., 1., 0., 0., 1., 1.]],\n self.dtype).repeat(\n b, axis=0)\n z_corners = np.array([[0., 1., 1., 0., 0., 1., 1., 0.]],\n self.dtype).repeat(\n b, axis=0)\n\n x_corners = (\n dx[:, np.newaxis] * (x_corners - self.origin[0]))[:, :, np.newaxis]\n y_corners = (\n dy[:, np.newaxis] * (y_corners - self.origin[1]))[:, :, np.newaxis]\n z_corners = (\n dz[:, np.newaxis] * (z_corners - self.origin[2]))[:, :, np.newaxis]\n corners = np.concatenate([x_corners, y_corners, z_corners], axis=-1)\n\n angle = self[:, -1]\n corners = rotation_3d_in_axis(corners, angle, axis=self.rot_axis)\n centers = self[:, 0:3][:, np.newaxis, :]\n corners += centers\n\n return corners\n\n @property\n def corners_2d(self):\n # corners_2d format: x0y0, x0y1, x1y1, x1y0\n dx, dy = self[:, 3:5].T\n b = dy.shape[0]\n\n x_corners = np.array([[0., 0., 1., 1.]], self.dtype).repeat(b, axis=0)\n y_corners = np.array([[0., 1., 1., 0.]], self.dtype).repeat(b, axis=0)\n\n x_corners = (\n dx[:, np.newaxis] * (x_corners - self.origin[0]))[:, :, np.newaxis]\n y_corners = (\n dy[:, np.newaxis] * (y_corners - self.origin[1]))[:, :, np.newaxis]\n corners = np.concatenate([x_corners, y_corners], axis=-1)\n\n angle = self[:, -1]\n rot_sin = np.sin(angle)\n rot_cos = np.cos(angle)\n rotation_matrix = np.array([[rot_cos, -rot_sin], [rot_sin, rot_cos]],\n dtype=self.dtype)\n #rotation_matrix = rotation_matrix.transpose([2, 0, 1])\n #corners = corners @ rotation_matrix #TODO(luoqianhui)\n corners = np.einsum(\"aij,jka->aik\", corners, rotation_matrix)\n\n centers = self[:, 0:2][:, np.newaxis, :]\n corners += centers\n\n return corners\n\n def scale(self, factor: float):\n \"\"\"\n \"\"\"\n # Scale x, y, z, w, l, h, except the orientation\n self[..., :-1] = self[..., :-1] * factor\n\n # Scale velocities\n if self.velocities is not None:\n self.velocities[..., :] = self.velocities[..., :] * factor\n\n def translate(self, translation: np.ndarray):\n self[..., :3] = self[..., :3] + translation\n\n def rotate_around_z(self, angle: np.ndarray):\n # Rotation matrix around the z-axis\n rot_sin = np.sin(angle)\n rot_cos = np.cos(angle)\n rotation_matrix = np.array(\n [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],\n dtype=self.dtype)\n\n # Rotate x,y,z\n self[..., :3] = self[..., :3] @ rotation_matrix\n\n # Rotate velocities\n if self.velocities is not None:\n self.velocities[..., :2] = (np.hstack([\n self.velocities[..., :2],\n np.zeros(\n (self.velocities.shape[0], 1), dtype=self.velocities.dtype)\n ]) @ rotation_matrix)[..., :2]\n\n # Update orientation\n self[..., -1] += angle\n\n def horizontal_flip(self):\n \"\"\"\n The inputs are pixel indices\n \"\"\"\n self[:, 0] = -self[:, 0]\n if self.velocities is not None:\n self.velocities[:, 0] = -self.velocities[:, 0]\n self[:,\n -1] = -self[:,\n -1] + 2 * np.pi # TODO(luoqianhui): CHECK THIS 2 * np.pi is needed\n\n def horizontal_flip_coords(self):\n \"\"\"\n The inputs are floating point coordinates\n \"\"\"\n new_box3d_quat = np.stack(\n [self[:, 3], -self[:, 2], -self[:, 1], self[:, 0]], 1)\n self[:, :4] = new_box3d_quat\n self[:, 4] = -self[:, 4]\n\n def to_vision_based_3d_box(self):\n height, width, length = self[:, 3:4], self[:, 4:5], self[:, 5:6]\n x, y, z = self[:, 0:1], self[:, 1:2], self[:, 2:3]\n rotation = self[:, 6]\n tvec = np.concatenate([x, y - height / 2, z], axis=1)\n box_pose = []\n for i in range(rotation.shape[0]):\n wxyz = Quaternion(\n Quaternion(axis=[1, 0, 0], radians=np.pi / 2) * Quaternion(\n axis=[0, 0, 1], radians=-rotation[i]))\n box_pose.append(wxyz.elements.astype(np.float32))\n box_pose = np.stack(box_pose, axis=0)\n box3d_new = np.concatenate([box_pose, tvec, width, length, height],\n axis=1)\n return box3d_new\n\n def vertical_flip(self):\n self[:, 1] = -self[:, 1]\n if self.velocities is not None:\n self.velocities[:, 1] = -self.velocities[:, 1]\n self[:, -1] = -self[:, -1] + np.pi\n\n @staticmethod\n def limit_period(val, offset: float = 0.5, period: float = np.pi):\n return val - np.floor(val / period + offset) * period\n\n def get_mask_of_bboxes_outside_range(self, point_cloud_range: np.ndarray):\n bboxes_bev = self.corners_2d\n # Represent the bev range as a bounding box\n limit_polygons = minmax_range_3d_to_corner_2d(point_cloud_range)\n mask = points_in_convex_polygon_2d(\n bboxes_bev.reshape(-1, 2), limit_polygons)\n return np.any(mask.reshape(-1, 4), axis=1)\n\n def get_mask_of_small_bboxes(self, size_thr: np.ndarray):\n dim = self[:, 3:6]\n thr = size_thr.reshape(1, 3).repeat(self.shape[0], axis=0)\n mask = np.array((dim > thr))\n mask = np.all(mask, axis=1)\n return mask.nonzero()\n\n def masked_select(self, mask):\n selected_data = self[mask]\n selected_velocities = self.velocities\n if self.velocities is not None:\n selected_velocities = self.velocities[mask]\n selected_bbox = BBoxes3D(selected_data, self.coordmode,\n selected_velocities, self.origin,\n self.rot_axis)\n return selected_bbox"
},
{
"identifier": "CoordMode",
"path": "paddle3d/geometries/bbox.py",
"snippet": "class CoordMode(Enum):\n \"\"\"\n \"\"\"\n # z front\n # /\n # /\n # 0 ------> x right\n # |\n # |\n # v\n # y down\n KittiCamera = 0\n\n # up z\n # ^ x front\n # | /\n # | /\n # left y <------ 0\n KittiLidar = 1\n\n # up z\n # ^ y front\n # | /\n # | /\n # 0 ------> x right\n NuScenesLidar = 2"
},
{
"identifier": "BaseMonoModel",
"path": "paddle3d/models/base/base_mono_detection.py",
"snippet": "class BaseMonoModel(BaseDetectionModel):\n def __init__(self,\n box_with_velocity: bool = False,\n need_camera_to_image: bool = True,\n need_lidar_to_camera: bool = False,\n need_down_ratios: bool = False,\n image_height: Optional[int] = -1,\n image_width: Optional[int] = -1):\n super().__init__(box_with_velocity=box_with_velocity)\n self.need_camera_to_image = need_camera_to_image\n self.need_lidar_to_camera = need_lidar_to_camera\n self.image_height = image_height\n self.image_width = image_width\n self.need_down_ratios = need_down_ratios\n\n @property\n def inputs(self) -> List[dict]:\n images = {\n 'name': 'images',\n 'dtype': 'float32',\n 'shape': [1, 3, self.image_height, self.image_width]\n }\n res = [images]\n\n if self.need_camera_to_image:\n intrinsics = {\n 'name': 'trans_cam_to_img',\n 'dtype': 'float32',\n 'shape': [1, 3, 4]\n }\n res.append(intrinsics)\n\n if self.need_lidar_to_camera:\n poses = {\n 'name': 'trans_lidar_to_cam',\n 'dtype': 'float32',\n 'shape': [1, 4, 4]\n }\n res.append(poses)\n\n return res\n\n @property\n def sensor(self) -> str:\n return \"camera\""
},
{
"identifier": "PostProcessor",
"path": "paddle3d/models/detection/smoke/processor.py",
"snippet": "class PostProcessor(nn.Layer):\n def __init__(self,\n depth_ref,\n dim_ref,\n reg_head=10,\n det_threshold=0.25,\n max_detection=50,\n pred_2d=True):\n super().__init__()\n\n self.smoke_coder = SMOKECoder(depth_ref, dim_ref)\n self.reg_head = reg_head\n self.max_detection = max_detection\n self.det_threshold = det_threshold\n self.pred_2d = pred_2d\n\n def export_forward(self, predictions, cam_info):\n\n pred_heatmap, pred_regression = predictions[0], predictions[1]\n batch = pred_heatmap.shape[0]\n\n heatmap = nms_hm(pred_heatmap)\n\n topk_dict = select_topk(\n heatmap,\n K=self.max_detection,\n )\n scores, indexs = topk_dict[\"topk_score\"], topk_dict[\"topk_inds_all\"]\n clses, ys = topk_dict[\"topk_clses\"], topk_dict[\"topk_ys\"]\n xs = topk_dict[\"topk_xs\"]\n\n pred_regression = select_point_of_interest(batch, indexs,\n pred_regression)\n\n pred_regression_pois = paddle.reshape(\n pred_regression, (numel_t(pred_regression) // 10, 10))\n\n # yapf: disable\n pred_proj_points = paddle.concat([\n paddle.reshape(xs, (numel_t(xs), 1)),\n paddle.reshape(ys, (numel_t(ys), 1))\n ], axis=1)\n # yapf: enable\n\n # FIXME: fix hard code here\n pred_depths_offset = pred_regression_pois[:, 0]\n pred_proj_offsets = pred_regression_pois[:, 1:3]\n pred_dimensions_offsets = pred_regression_pois[:, 3:6]\n pred_orientation = pred_regression_pois[:, 6:8]\n pred_bbox_size = pred_regression_pois[:, 8:10]\n\n pred_depths = self.smoke_coder.decode_depth(pred_depths_offset)\n pred_locations = self.smoke_coder.decode_location_without_transmat(\n pred_proj_points, pred_proj_offsets, pred_depths, cam_info[0],\n cam_info[1])\n pred_dimensions = self.smoke_coder.decode_dimension(\n clses, pred_dimensions_offsets)\n # we need to change center location to bottom location\n pred_locations[:, 1] += pred_dimensions[:, 1] / 2\n\n pred_rotys, pred_alphas = self.smoke_coder.decode_orientation(\n pred_orientation, pred_locations)\n box2d = self.smoke_coder.decode_bbox_2d_without_transmat(\n pred_proj_points, pred_bbox_size, cam_info[1])\n # change variables to the same dimension\n clses = paddle.reshape(clses, (-1, 1))\n pred_alphas = paddle.reshape(pred_alphas, (-1, 1))\n pred_rotys = paddle.reshape(pred_rotys, (-1, 1))\n scores = paddle.reshape(scores, (-1, 1))\n\n l, h, w = pred_dimensions.chunk(3, 1)\n pred_dimensions = paddle.concat([h, w, l], axis=1)\n\n # yapf: disable\n result = paddle.concat([\n clses, pred_alphas, box2d, pred_dimensions, pred_locations,\n pred_rotys, scores\n ], axis=1)\n # yapf: enable\n\n return result\n\n def forward(self, predictions, targets):\n\n pred_heatmap, pred_regression = predictions[0], predictions[1]\n batch = pred_heatmap.shape[0]\n\n heatmap = nms_hm(pred_heatmap)\n\n topk_dict = select_topk(\n heatmap,\n K=self.max_detection,\n )\n scores, indexs = topk_dict[\"topk_score\"], topk_dict[\"topk_inds_all\"]\n clses, ys = topk_dict[\"topk_clses\"], topk_dict[\"topk_ys\"]\n xs = topk_dict[\"topk_xs\"]\n\n pred_regression = select_point_of_interest(batch, indexs,\n pred_regression)\n\n pred_regression_pois = paddle.reshape(pred_regression,\n (-1, self.reg_head))\n\n pred_proj_points = paddle.concat(\n [paddle.reshape(xs, (-1, 1)),\n paddle.reshape(ys, (-1, 1))], axis=1)\n\n # FIXME: fix hard code here\n pred_depths_offset = pred_regression_pois[:, 0]\n pred_proj_offsets = pred_regression_pois[:, 1:3]\n pred_dimensions_offsets = pred_regression_pois[:, 3:6]\n pred_orientation = pred_regression_pois[:, 6:8]\n pred_bbox_size = pred_regression_pois[:, 8:10]\n\n pred_depths = self.smoke_coder.decode_depth(pred_depths_offset)\n pred_locations = self.smoke_coder.decode_location(\n pred_proj_points, pred_proj_offsets, pred_depths, targets[\"K\"],\n targets[\"trans_mat\"])\n pred_dimensions = self.smoke_coder.decode_dimension(\n clses, pred_dimensions_offsets)\n # we need to change center location to bottom location\n pred_locations[:, 1] += pred_dimensions[:, 1] / 2\n\n pred_rotys, pred_alphas = self.smoke_coder.decode_orientation(\n pred_orientation, pred_locations)\n\n if self.pred_2d:\n box2d = self.smoke_coder.decode_bbox_2d(\n pred_proj_points, pred_bbox_size, targets[\"trans_mat\"],\n targets[\"image_size\"])\n else:\n box2d = paddle.to_tensor([0, 0, 0, 0])\n\n # change variables to the same dimension\n clses = paddle.reshape(clses, (-1, 1))\n pred_alphas = paddle.reshape(pred_alphas, (-1, 1))\n pred_rotys = paddle.reshape(pred_rotys, (-1, 1))\n scores = paddle.reshape(scores, (-1, 1))\n\n l, h, w = pred_dimensions.chunk(3, 1)\n pred_dimensions = paddle.concat([h, w, l], axis=1)\n\n # yapf: disable\n result = paddle.concat([\n clses, pred_alphas, box2d, pred_dimensions, pred_locations,\n pred_rotys, scores\n ], axis=1)\n # yapf: enable\n\n keep_idx = result[:, -1] > self.det_threshold\n\n if paddle.sum(keep_idx.astype(\"int32\")) >= 1:\n # Add indexs to determine which sample each box belongs to\n batch_size = targets['K'].shape[0]\n ids = paddle.arange(batch_size, dtype=paddle.float32)\n ids = ids.unsqueeze(0).expand([self.max_detection, batch_size])\n ids = ids.transpose([1, 0]).reshape([-1, 1])\n result = paddle.concat([result, ids], 1)\n\n # Filter out low confidence boxes\n keep_idx = paddle.nonzero(keep_idx)\n result = paddle.gather(result, keep_idx)\n else:\n result = paddle.to_tensor([])\n\n return result"
},
{
"identifier": "SMOKELossComputation",
"path": "paddle3d/models/detection/smoke/smoke_loss.py",
"snippet": "class SMOKELossComputation(object):\n \"\"\"Convert targets and preds to heatmaps®s, compute\n loss with CE and L1\n \"\"\"\n\n def __init__(self,\n depth_ref,\n dim_ref,\n reg_loss=\"DisL1\",\n loss_weight=(1., 10.),\n max_objs=50):\n\n self.smoke_coder = SMOKECoder(depth_ref, dim_ref)\n self.cls_loss = FocalLoss(alpha=2, beta=4)\n self.reg_loss = reg_loss\n self.loss_weight = loss_weight\n self.max_objs = max_objs\n\n def prepare_targets(self, targets):\n \"\"\"get heatmaps, regressions and 3D infos from targets\n \"\"\"\n\n heatmaps = targets[\"hm\"]\n regression = targets[\"reg\"]\n cls_ids = targets[\"cls_ids\"]\n proj_points = targets[\"proj_p\"]\n dimensions = targets[\"dimensions\"]\n locations = targets[\"locations\"]\n rotys = targets[\"rotys\"]\n trans_mat = targets[\"trans_mat\"]\n K = targets[\"K\"]\n reg_mask = targets[\"reg_mask\"]\n flip_mask = targets[\"flip_mask\"]\n bbox_size = targets[\"bbox_size\"]\n c_offsets = targets[\"c_offsets\"]\n\n return heatmaps, regression, dict(\n cls_ids=cls_ids,\n proj_points=proj_points,\n dimensions=dimensions,\n locations=locations,\n rotys=rotys,\n trans_mat=trans_mat,\n K=K,\n reg_mask=reg_mask,\n flip_mask=flip_mask,\n bbox_size=bbox_size,\n c_offsets=c_offsets)\n\n def prepare_predictions(self, targets_variables, pred_regression):\n \"\"\"decode model predictions\n \"\"\"\n batch, channel = pred_regression.shape[0], pred_regression.shape[1]\n targets_proj_points = targets_variables[\"proj_points\"]\n\n # obtain prediction from points of interests\n pred_regression_pois = select_point_of_interest(\n batch, targets_proj_points, pred_regression)\n pred_regression_pois = paddle.reshape(pred_regression_pois,\n (-1, channel))\n\n # FIXME: fix hard code here\n pred_depths_offset = pred_regression_pois[:, 0]\n pred_proj_offsets = pred_regression_pois[:, 1:3]\n pred_dimensions_offsets = pred_regression_pois[:, 3:6]\n pred_orientation = pred_regression_pois[:, 6:8]\n pred_bboxsize = pred_regression_pois[:, 8:10]\n\n pred_depths = self.smoke_coder.decode_depth(pred_depths_offset)\n pred_locations = self.smoke_coder.decode_location(\n targets_proj_points, pred_proj_offsets, pred_depths,\n targets_variables[\"K\"], targets_variables[\"trans_mat\"])\n\n pred_dimensions = self.smoke_coder.decode_dimension(\n targets_variables[\"cls_ids\"],\n pred_dimensions_offsets,\n )\n # we need to change center location to bottom location\n pred_locations[:, 1] += pred_dimensions[:, 1] / 2\n\n pred_rotys = self.smoke_coder.decode_orientation(\n pred_orientation, targets_variables[\"locations\"],\n targets_variables[\"flip_mask\"])\n\n if self.reg_loss == \"DisL1\":\n pred_box3d_rotys = self.smoke_coder.encode_box3d(\n pred_rotys, targets_variables[\"dimensions\"],\n targets_variables[\"locations\"])\n\n pred_box3d_dims = self.smoke_coder.encode_box3d(\n targets_variables[\"rotys\"], pred_dimensions,\n targets_variables[\"locations\"])\n pred_box3d_locs = self.smoke_coder.encode_box3d(\n targets_variables[\"rotys\"], targets_variables[\"dimensions\"],\n pred_locations)\n\n return dict(\n ori=pred_box3d_rotys,\n dim=pred_box3d_dims,\n loc=pred_box3d_locs,\n bbox=pred_bboxsize,\n )\n\n elif self.reg_loss == \"L1\":\n pred_box_3d = self.smoke_coder.encode_box3d(\n pred_rotys, pred_dimensions, pred_locations)\n return pred_box_3d\n\n def __call__(self, predictions, targets):\n pred_heatmap, pred_regression = predictions[0], predictions[1]\n\n targets_heatmap, targets_regression, targets_variables \\\n = self.prepare_targets(targets)\n\n predict_boxes3d = self.prepare_predictions(targets_variables,\n pred_regression)\n\n hm_loss = self.cls_loss(pred_heatmap,\n targets_heatmap) * self.loss_weight[0]\n\n targets_regression = paddle.reshape(\n targets_regression,\n (-1, targets_regression.shape[2], targets_regression.shape[3]))\n\n reg_mask = targets_variables[\"reg_mask\"].astype(\"float32\").flatten()\n reg_mask = paddle.reshape(reg_mask, (-1, 1, 1))\n reg_mask = reg_mask.expand_as(targets_regression)\n\n if self.reg_loss == \"DisL1\":\n reg_loss_ori = F.l1_loss(\n predict_boxes3d[\"ori\"] * reg_mask,\n targets_regression * reg_mask,\n reduction=\"sum\") / (self.loss_weight[1] * self.max_objs)\n\n reg_loss_dim = F.l1_loss(\n predict_boxes3d[\"dim\"] * reg_mask,\n targets_regression * reg_mask,\n reduction=\"sum\") / (self.loss_weight[1] * self.max_objs)\n\n reg_loss_loc = F.l1_loss(\n predict_boxes3d[\"loc\"] * reg_mask,\n targets_regression * reg_mask,\n reduction=\"sum\") / (self.loss_weight[1] * self.max_objs)\n\n reg_loss_size = F.l1_loss(\n predict_boxes3d[\"bbox\"],\n paddle.reshape(targets_variables[\"bbox_size\"],\n (-1, targets_variables[\"bbox_size\"].shape[-1])),\n reduction=\"sum\") / (self.loss_weight[1] * self.max_objs)\n\n losses = dict(\n hm_loss=hm_loss,\n reg_loss=reg_loss_ori + reg_loss_dim + reg_loss_loc,\n size_loss=reg_loss_size)\n\n return hm_loss + reg_loss_ori + reg_loss_dim + reg_loss_loc + reg_loss_size"
},
{
"identifier": "Sample",
"path": "paddle3d/sample.py",
"snippet": "class Sample(_EasyDict):\n \"\"\"\n \"\"\"\n _VALID_MODALITIES = [\"image\", \"lidar\", \"radar\", \"multimodal\", \"multiview\"]\n\n def __init__(self, path: str, modality: str):\n if modality not in self._VALID_MODALITIES:\n raise ValueError('Only modality {} is supported, but got {}'.format(\n self._VALID_MODALITIES, modality))\n\n self.meta = SampleMeta()\n\n self.path = path\n self.data = None\n self.modality = modality.lower()\n\n self.bboxes_2d = None\n self.bboxes_3d = None\n self.labels = None\n\n self.sweeps = []\n self.attrs = None"
},
{
"identifier": "logger",
"path": "paddle3d/utils/logger.py",
"snippet": "class Logger(object):\nclass ProgressBar(object):\n def __init__(self, name: str = None):\n def format(self):\n def disable(self):\n def enable(self):\n def enabled(self) -> bool:\n def __call__(self, log_level: str, msg: str):\n def use_terminator(self, terminator: str):\n def processing(self, msg: str, flush_interval: float = 0.1):\n def _printer():\n def progressbar(self, msg: str, flush_interval: float = 0.1):\n def range(self, stop: int, msg: str):\n def enumerate(self, iterable: Iterable, msg: str):\n def __init__(self, logger: Logger, flush_interval: float = 0.1):\n def update(self, progress: float):"
}
] | import os
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from typing import List, Tuple
from paddle3d.apis import manager
from paddle3d.geometries import BBoxes2D, BBoxes3D, CoordMode
from paddle3d.models.base import BaseMonoModel
from paddle3d.models.detection.smoke.processor import PostProcessor
from paddle3d.models.detection.smoke.smoke_loss import SMOKELossComputation
from paddle3d.sample import Sample
from paddle3d.utils.logger import logger | 7,597 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@manager.MODELS.add_component
class SMOKE(BaseMonoModel):
"""
"""
def __init__(self,
backbone,
head,
depth_ref: Tuple,
dim_ref: Tuple,
max_detection: int = 50,
pred_2d: bool = True,
box_with_velocity: bool = False):
super().__init__(
box_with_velocity=box_with_velocity,
need_camera_to_image=True,
need_lidar_to_camera=False,
need_down_ratios=True)
self.backbone = backbone
self.heads = head
self.max_detection = max_detection
self.init_weight()
| # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@manager.MODELS.add_component
class SMOKE(BaseMonoModel):
"""
"""
def __init__(self,
backbone,
head,
depth_ref: Tuple,
dim_ref: Tuple,
max_detection: int = 50,
pred_2d: bool = True,
box_with_velocity: bool = False):
super().__init__(
box_with_velocity=box_with_velocity,
need_camera_to_image=True,
need_lidar_to_camera=False,
need_down_ratios=True)
self.backbone = backbone
self.heads = head
self.max_detection = max_detection
self.init_weight() | self.loss_computation = SMOKELossComputation( | 6 | 2023-11-08 07:08:03+00:00 | 12k |
camlsys/fl-project-template | project/main.py | [
{
"identifier": "get_client_generator",
"path": "project/client/client.py",
"snippet": "def get_client_generator(\n working_dir: Path,\n net_generator: NetGen,\n dataloader_gen: ClientDataloaderGen,\n train: TrainFunc,\n test: TestFunc,\n) -> ClientGen:\n \"\"\"Return a function which creates a new Client.\n\n Client has access to the working dir,\n can generate a network and can generate a dataloader.\n The client receives train and test functions with pre-defined APIs.\n\n Parameters\n ----------\n working_dir : Path\n The path to the working directory.\n net_generator : NetGen\n The network generator.\n Please respect the pydantic schema.\n dataloader_gen : ClientDataloaderGen\n The dataloader generator.\n Uses the client id to determine partition.\n Please respect the pydantic schema.\n train : TrainFunc\n The train function.\n Please respect the interface and pydantic schema.\n test : TestFunc\n The test function.\n Please respect the interface and pydantic schema.\n\n Returns\n -------\n ClientGen\n The function which creates a new Client.\n \"\"\"\n\n def client_generator(cid: int | str) -> Client:\n \"\"\"Return a new Client.\n\n Parameters\n ----------\n cid : int | str\n The client's ID.\n\n Returns\n -------\n Client\n The new Client.\n \"\"\"\n return Client(\n cid,\n working_dir,\n net_generator,\n dataloader_gen,\n train,\n test,\n )\n\n return client_generator"
},
{
"identifier": "dispatch_config",
"path": "project/dispatch/dispatch.py",
"snippet": "def dispatch_config(cfg: DictConfig) -> ConfigStructure:\n \"\"\"Dispatch the fit/eval config functions based on on the hydra config.\n\n Functionality should be added to the dispatch.py\n file in the task folder.\n Statically specify the new dispatch function in the list,\n function order determines precedence\n if two different tasks may match the config.\n\n Parameters\n ----------\n cfg : DictConfig\n The configuration for the config function.\n Loaded dynamically from the config file.\n\n Returns\n -------\n ConfigStructure\n The config functions.\n \"\"\"\n # Create the list of task dispatches to try\n task_config_functions: list[Callable[[DictConfig], ConfigStructure | None]] = [\n dispatch_mnist_config,\n dispatch_default_config,\n ]\n\n # Match the first function which does not return None\n for task in task_config_functions:\n result = task(cfg)\n if result is not None:\n return result\n\n raise ValueError(\n f\"Unable to match the config generation functions: {cfg}\",\n )"
},
{
"identifier": "dispatch_data",
"path": "project/dispatch/dispatch.py",
"snippet": "def dispatch_data(cfg: DictConfig) -> DataStructure:\n \"\"\"Dispatch the net generator and dataloader client/fed generator functions.\n\n Functionality should be added to the dispatch.py file in the task folder.\n Statically specify the new dispatch function in the list,\n function order determines precedence if two different tasks may match the config.\n\n Parameters\n ----------\n cfg : DictConfig\n The configuration for the data function.\n Loaded dynamically from the config file.\n\n Returns\n -------\n DataStructure\n The net generator and dataloader generator functions.\n \"\"\"\n # Create the list of task dispatches to try\n task_data_dependent_functions: list[\n Callable[[DictConfig], DataStructure | None]\n ] = [\n dispatch_mnist_data,\n dispatch_default_data,\n ]\n\n # Match the first function which does not return None\n for task in task_data_dependent_functions:\n result = task(cfg)\n if result is not None:\n return result\n\n raise ValueError(\n f\"Unable to match the net generator and dataloader generator functions: {cfg}\",\n )"
},
{
"identifier": "dispatch_train",
"path": "project/dispatch/dispatch.py",
"snippet": "def dispatch_train(cfg: DictConfig) -> TrainStructure:\n \"\"\"Dispatch the train/test and fed test functions based on the config file.\n\n Functionality should be added to the dispatch.py file in the task folder.\n Statically specify the new dispatch function in the list,\n function order determines precedence if two different tasks may match the config.\n\n Parameters\n ----------\n cfg : DictConfig\n The configuration for the train function.\n Loaded dynamically from the config file.\n\n Returns\n -------\n TrainStructure\n The train function, test function and the get_fed_eval_fn function.\n \"\"\"\n # Create the list of task dispatches to try\n task_train_functions: list[Callable[[DictConfig], TrainStructure | None]] = [\n dispatch_default_train,\n dispatch_mnist_train,\n ]\n\n # Match the first function which does not return None\n for task in task_train_functions:\n result = task(cfg)\n if result is not None:\n return result\n\n raise ValueError(\n f\"Unable to match the train/test and fed_test functions: {cfg}\",\n )"
},
{
"identifier": "DeterministicClientManager",
"path": "project/fed/server/deterministic_client_manager.py",
"snippet": "class DeterministicClientManager(SimpleClientManager):\n \"\"\"A deterministic client manager.\n\n Samples clients in the same order every time based on the seed. Also allows sampling\n with replacement.\n \"\"\"\n\n def __init__(\n self,\n seed: int,\n enable_resampling: bool = False,\n ) -> None:\n \"\"\"Initialize DeterministicClientManager.\n\n Parameters\n ----------\n seed : int\n The seed to use for deterministic sampling.\n enable_resampling : bool\n Whether to allow sampling with replacement.\n\n Returns\n -------\n None\n \"\"\"\n super().__init__()\n self.seed = seed\n self.rng = random.Random(seed)\n self.enable_resampling = enable_resampling\n\n def sample(\n self,\n num_clients: int,\n min_num_clients: int | None = None,\n criterion: Criterion | None = None,\n ) -> list[ClientProxy]:\n \"\"\"Sample a number of Flower ClientProxy instances.\n\n Guarantees deterministic client sampling and enables\n sampling with replacement.\n\n Parameters\n ----------\n num_clients : int\n The number of clients to sample.\n min_num_clients : Optional[int]\n The minimum number of clients to sample.\n criterion : Optional[Criterion]\n A criterion to select clients.\n\n Returns\n -------\n List[ClientProxy]\n A list of sampled clients.\n \"\"\"\n # Block until at least num_clients are connected.\n if min_num_clients is None:\n min_num_clients = num_clients\n self.wait_for(min_num_clients)\n\n cids = list(self.clients)\n\n if criterion is not None:\n cids = [cid for cid in cids if criterion.select(self.clients[cid])]\n # Shuffle the list of clients\n\n available_cids = []\n if num_clients <= len(cids):\n available_cids = self.rng.sample(\n cids,\n num_clients,\n )\n elif self.enable_resampling:\n available_cids = self.rng.choices(\n cids,\n k=num_clients,\n )\n else:\n log(\n logging.INFO,\n \"Sampling failed: number of available clients\"\n \" (%s) is less than number of requested clients (%s).\",\n len(cids),\n num_clients,\n )\n available_cids = []\n\n client_list = [self.clients[cid] for cid in available_cids]\n log(\n logging.INFO,\n \"Sampled the following clients: %s\",\n available_cids,\n )\n return client_list"
},
{
"identifier": "WandbHistory",
"path": "project/fed/server/wandb_history.py",
"snippet": "class WandbHistory(History):\n \"\"\"History class for training and/or evaluation metrics collection.\"\"\"\n\n def __init__(self, use_wandb: bool = True) -> None:\n \"\"\"Initialize the history.\n\n Parameters\n ----------\n use_wandb : bool\n Whether to use wandb.\n Turn off to avoid communication overhead.\n\n Returns\n -------\n None\n \"\"\"\n super().__init__()\n\n self.use_wandb = use_wandb\n\n def add_loss_distributed(\n self,\n server_round: int,\n loss: float,\n ) -> None:\n \"\"\"Add one loss entry (from distributed evaluation) to history/wandb.\n\n Parameters\n ----------\n server_round : int\n The current server round.\n loss : float\n The loss to add.\n\n Returns\n -------\n None\n \"\"\"\n super().add_loss_distributed(server_round, loss)\n if self.use_wandb:\n wandb.log(\n {\"distributed_loss\": loss},\n step=server_round,\n )\n\n def add_loss_centralized(\n self,\n server_round: int,\n loss: float,\n ) -> None:\n \"\"\"Add one loss entry (from centralized evaluation) to history/wandb.\n\n Parameters\n ----------\n server_round : int\n The current server round.\n loss : float\n The loss to add.\n\n Returns\n -------\n None\n \"\"\"\n super().add_loss_centralized(server_round, loss)\n if self.use_wandb:\n wandb.log(\n {\"centralised_loss\": loss},\n step=server_round,\n )\n\n def add_metrics_distributed_fit(\n self,\n server_round: int,\n metrics: dict[str, Scalar],\n ) -> None:\n \"\"\"Add metrics entries (from distributed fit) to history/wandb.\n\n Parameters\n ----------\n server_round : int\n The current server round.\n metrics : Dict[str, Scalar]\n The metrics to add.\n\n Returns\n -------\n None\n \"\"\"\n super().add_metrics_distributed_fit(\n server_round,\n metrics,\n )\n if self.use_wandb:\n for key in metrics:\n wandb.log(\n {key: metrics[key]},\n step=server_round,\n )\n\n def add_metrics_distributed(\n self,\n server_round: int,\n metrics: dict[str, Scalar],\n ) -> None:\n \"\"\"Add metrics entries (from distributed evaluation) to history/wandb.\n\n Parameters\n ----------\n server_round : int\n The current server round.\n metrics : Dict[str, Scalar]\n The metrics to add.\n\n Returns\n -------\n None\n \"\"\"\n super().add_metrics_distributed(\n server_round,\n metrics,\n )\n if self.use_wandb:\n for key in metrics:\n wandb.log(\n {key: metrics[key]},\n step=server_round,\n )\n\n def add_metrics_centralized(\n self,\n server_round: int,\n metrics: dict[str, Scalar],\n ) -> None:\n \"\"\"Add metrics entries (from centralized evaluation) to history/wand.\n\n Parameters\n ----------\n server_round : int\n The current server round.\n metrics : Dict[str, Scalar]\n The metrics to add.\n\n Returns\n -------\n None\n \"\"\"\n super().add_metrics_centralized(\n server_round,\n metrics,\n )\n if self.use_wandb:\n for key in metrics:\n wandb.log(\n {key: metrics[key]},\n step=server_round,\n )"
},
{
"identifier": "WandbServer",
"path": "project/fed/server/wandb_server.py",
"snippet": "class WandbServer(Server):\n \"\"\"Flower server.\"\"\"\n\n def __init__(\n self,\n *,\n client_manager: ClientManager,\n strategy: Strategy | None = None,\n history: History | None = None,\n save_parameters_to_file: Callable[\n [Parameters],\n None,\n ],\n save_files_per_round: Callable[[int], None],\n ) -> None:\n \"\"\"Flower server implementation.\n\n Parameters\n ----------\n client_manager : ClientManager\n Client manager implementation.\n strategy : Optional[Strategy]\n Strategy implementation.\n history : Optional[History]\n History implementation.\n save_parameters_to_file : Callable[[Parameters], None]\n Function to save the parameters to file.\n save_files_per_round : Callable[[int], None]\n Function to save files every round.\n\n Returns\n -------\n None\n \"\"\"\n super().__init__(\n client_manager=client_manager,\n strategy=strategy,\n )\n\n self.history: History | None = history\n self.save_parameters_to_file = save_parameters_to_file\n self.save_files_per_round = save_files_per_round\n\n # pylint: disable=too-many-locals\n def fit(\n self,\n num_rounds: int,\n timeout: float | None,\n ) -> History:\n \"\"\"Run federated averaging for a number of rounds.\n\n Parameters\n ----------\n num_rounds : int\n The number of rounds to run.\n timeout : Optional[float]\n Timeout in seconds.\n\n Returns\n -------\n History\n The history of the training.\n Potentially using a pre-defined history.\n \"\"\"\n history = self.history if self.history is not None else History()\n\n # Initialize parameters\n log(INFO, \"Initializing global parameters\")\n self.parameters = self._get_initial_parameters(\n timeout=timeout,\n )\n log(INFO, \"Evaluating initial parameters\")\n res = self.strategy.evaluate(\n 0,\n parameters=self.parameters,\n )\n if res is not None:\n log(\n INFO,\n \"initial parameters (loss, other metrics): %s, %s\",\n res[0],\n res[1],\n )\n history.add_loss_centralized(\n server_round=0,\n loss=res[0],\n )\n history.add_metrics_centralized(\n server_round=0,\n metrics=res[1],\n )\n\n # Run federated learning for num_rounds\n log(INFO, \"FL starting\")\n start_time = timeit.default_timer()\n\n # Save initial parameters and files\n self.save_parameters_to_file(self.parameters)\n self.save_files_per_round(0)\n\n for current_round in range(1, num_rounds + 1):\n # Train model and replace previous global model\n res_fit = self.fit_round(\n server_round=current_round,\n timeout=timeout,\n )\n if res_fit is not None:\n (\n parameters_prime,\n fit_metrics,\n _,\n ) = res_fit # fit_metrics_aggregated\n if parameters_prime:\n self.parameters = parameters_prime\n history.add_metrics_distributed_fit(\n server_round=current_round,\n metrics=fit_metrics,\n )\n\n # Evaluate model using strategy implementation\n res_cen = self.strategy.evaluate(\n current_round,\n parameters=self.parameters,\n )\n if res_cen is not None:\n loss_cen, metrics_cen = res_cen\n log(\n INFO,\n \"fit progress: (%s, %s, %s, %s)\",\n current_round,\n loss_cen,\n metrics_cen,\n timeit.default_timer() - start_time,\n )\n history.add_loss_centralized(\n server_round=current_round,\n loss=loss_cen,\n )\n history.add_metrics_centralized(\n server_round=current_round,\n metrics=metrics_cen,\n )\n\n # Evaluate model on a sample of available clients\n res_fed = self.evaluate_round(\n server_round=current_round,\n timeout=timeout,\n )\n if res_fed is not None:\n loss_fed, evaluate_metrics_fed, _ = res_fed\n if loss_fed is not None:\n history.add_loss_distributed(\n server_round=current_round,\n loss=loss_fed,\n )\n history.add_metrics_distributed(\n server_round=current_round,\n metrics=evaluate_metrics_fed,\n )\n # Saver round parameters and files\n self.save_parameters_to_file(self.parameters)\n self.save_files_per_round(current_round)\n\n # Bookkeeping\n end_time = timeit.default_timer()\n elapsed = end_time - start_time\n log(INFO, \"FL finished in %s\", elapsed)\n return history"
},
{
"identifier": "get_initial_parameters",
"path": "project/fed/utils/utils.py",
"snippet": "def get_initial_parameters(\n net_generator: NetGen,\n config: dict,\n load_from: Path | None,\n server_round: int | None,\n) -> Parameters:\n \"\"\"Get the initial parameters for the network.\n\n Parameters\n ----------\n net_generator : NetGen\n The function to generate the network.\n config : Dict\n The configuration.\n load_from : Optional[Path]\n The path to the parameters file.\n\n Returns\n -------\n 'Parameters\n The parameters.\n \"\"\"\n if load_from is None:\n log(\n logging.INFO,\n \"Generating initial parameters with config: %s\",\n config,\n )\n return ndarrays_to_parameters(\n generic_get_parameters(net_generator(config)),\n )\n try:\n if server_round is not None:\n # Load specific round parameters\n load_from = load_from / f\"parameters_{server_round}.bin\"\n else:\n # Load only the most recent parameters\n load_from = max(\n Path(load_from).glob(\"parameters_*.bin\"),\n key=lambda f: (\n int(f.stem.split(\"_\")[1]),\n int(f.stem.split(\"_\")[2]),\n ),\n )\n\n log(\n logging.INFO,\n \"Loading initial parameters from: %s\",\n load_from,\n )\n\n return load_parameters_from_file(load_from)\n except (\n ValueError,\n FileNotFoundError,\n PermissionError,\n OSError,\n EOFError,\n IsADirectoryError,\n ):\n log(\n logging.INFO,\n f\"Loading parameters failed from: {load_from}\",\n )\n log(\n logging.INFO,\n \"Generating initial parameters with config: %s\",\n config,\n )\n\n return ndarrays_to_parameters(\n generic_get_parameters(net_generator(config)),\n )"
},
{
"identifier": "get_save_parameters_to_file",
"path": "project/fed/utils/utils.py",
"snippet": "def get_save_parameters_to_file(\n working_dir: Path,\n) -> Callable[[Parameters], None]:\n \"\"\"Get a function to save parameters to a file.\n\n Parameters\n ----------\n working_dir : Path\n The working directory.\n\n Returns\n -------\n Callable[[Parameters], None]\n A function to save parameters to a file.\n \"\"\"\n\n def save_parameters_to_file(\n parameters: Parameters,\n ) -> None:\n \"\"\"Save the parameters to a file.\n\n Parameters\n ----------\n parameters : Parameters\n The parameters to save.\n\n Returns\n -------\n None\n \"\"\"\n parameters_path = working_dir / \"parameters\"\n parameters_path.mkdir(parents=True, exist_ok=True)\n with open(\n parameters_path / \"parameters.bin\",\n \"wb\",\n ) as f:\n # Since Parameters is a list of bytes\n # save the length of each row and the data\n # for deserialization\n for data in parameters.tensors:\n # Prepend the length of the data as a 4-byte integer\n f.write(struct.pack(\"I\", len(data)))\n f.write(data)\n\n return save_parameters_to_file"
},
{
"identifier": "get_weighted_avg_metrics_agg_fn",
"path": "project/fed/utils/utils.py",
"snippet": "def get_weighted_avg_metrics_agg_fn(\n to_agg: set[str],\n) -> Callable[[list[tuple[int, dict]]], dict]:\n \"\"\"Return a function to compute a weighted average over pre-defined metrics.\n\n Parameters\n ----------\n to_agg : Set[str]\n The metrics to aggregate.\n\n Returns\n -------\n Callable[[List[Tuple[int, Dict]]], Dict]\n A function to compute a weighted average over pre-defined metrics.\n \"\"\"\n\n def weighted_avg(\n metrics: list[tuple[int, dict]],\n ) -> dict:\n \"\"\"Compute a weighted average over pre-defined metrics.\n\n Parameters\n ----------\n metrics : List[Tuple[int, Dict]]\n The metrics to aggregate.\n\n Returns\n -------\n Dict\n The weighted average over pre-defined metrics.\n \"\"\"\n total_num_examples = sum(\n [num_examples for num_examples, _ in metrics],\n )\n weighted_metrics: dict = defaultdict(float)\n for num_examples, metric in metrics:\n for key, value in metric.items():\n if key in to_agg:\n weighted_metrics[key] += num_examples * value\n\n return {\n key: value / total_num_examples for key, value in weighted_metrics.items()\n }\n\n return weighted_avg"
},
{
"identifier": "test_client",
"path": "project/fed/utils/utils.py",
"snippet": "def test_client( # noqa: PLR0917\n test_all_clients: bool,\n test_one_client: bool,\n client_generator: ClientGen,\n initial_parameters: Parameters,\n total_clients: int,\n on_fit_config_fn: OnFitConfigFN | None,\n on_evaluate_config_fn: OnEvaluateConfigFN | None,\n) -> None:\n \"\"\"Debug the client code.\n\n Avoids the complexity of Ray.\n \"\"\"\n parameters = parameters_to_ndarrays(initial_parameters)\n if test_all_clients or test_one_client:\n if test_one_client:\n client = client_generator(str(0))\n _, *res_fit = client.fit(\n parameters,\n on_fit_config_fn(0) if on_fit_config_fn else {},\n )\n res_eval = client.evaluate(\n parameters,\n on_evaluate_config_fn(0) if on_evaluate_config_fn else {},\n )\n log(\n logging.INFO,\n \"Fit debug fit: %s and eval: %s\",\n res_fit,\n res_eval,\n )\n else:\n for i in range(total_clients):\n client = client_generator(str(i))\n _, *res_fit = client.fit(\n parameters,\n on_fit_config_fn(i) if on_fit_config_fn else {},\n )\n res_eval = client.evaluate(\n parameters,\n on_evaluate_config_fn(i) if on_evaluate_config_fn else {},\n )\n log(\n logging.INFO,\n \"Fit debug fit: %s and eval: %s\",\n res_fit,\n res_eval,\n )"
},
{
"identifier": "ClientGen",
"path": "project/types/common.py",
"snippet": ""
},
{
"identifier": "FileSystemManager",
"path": "project/utils/utils.py",
"snippet": "class FileSystemManager:\n \"\"\"A context manager for saving and cleaning up files.\"\"\"\n\n def __init__( # noqa: PLR0917\n self,\n working_dir: Path,\n output_dir: Path,\n to_clean_once: list[str],\n to_save_once: list[str],\n original_hydra_dir: Path,\n reuse_output_dir: bool,\n file_limit: int | None = None,\n ) -> None:\n \"\"\"Initialize the context manager.\n\n Parameters\n ----------\n working_dir : Path\n The working directory.\n output_dir : Path\n The output directory.\n to_clean_once : List[str]\n The tokens to clean once.\n to_save_once : List[str]\n The tokens to save once.\n original_hydra_dir : Path\n The original hydra directory.\n For copying the hydra directory to the working directory.\n reuse_output_dir : bool\n Whether to reuse the output directory.\n file_limit : Optional[int]\n The maximal number of files to search.\n If None, then there is no limit.\n\n Returns\n -------\n None\n \"\"\"\n self.to_clean_once = to_clean_once\n self.working_dir = working_dir\n self.output_dir = output_dir\n self.to_save_once = to_save_once\n self.original_hydra_dir = original_hydra_dir\n self.reuse_output_dir = reuse_output_dir\n self.checkpoint_index = get_checkpoint_index(\n self.output_dir,\n file_limit,\n )\n\n def get_save_files_every_round(\n self,\n to_save: list[str],\n save_frequency: int,\n ) -> Callable[[int], None]:\n \"\"\"Get a function that saves files every save_frequency rounds.\n\n Parameters\n ----------\n to_save : List[str]\n The tokens to save.\n save_frequency : int\n The frequency to save.\n\n Returns\n -------\n Callable[[int], None]\n The function that saves the files.\n \"\"\"\n\n def save_files_round(cur_round: int) -> None:\n if cur_round % save_frequency == 0:\n save_files(\n self.working_dir,\n self.output_dir,\n to_save=to_save,\n ending=cur_round,\n checkpoint_index=self.checkpoint_index,\n )\n\n return save_files_round\n\n def __enter__(self) -> \"FileSystemManager\":\n \"\"\"Initialize the context manager and cleanup.\"\"\"\n log(\n logging.INFO,\n f\"Pre-cleaning {self.to_clean_once}\",\n )\n cleanup(self.working_dir, self.to_clean_once)\n\n return self\n\n def __exit__(\n self,\n _exc_type: type[BaseException] | None,\n _exc_value: BaseException | None,\n _traceback: TracebackType | None,\n ) -> None:\n \"\"\"Cleanup the files.\"\"\"\n log(logging.INFO, f\"Saving {self.to_save_once}\")\n\n # Copy the hydra directory to the working directory\n # so that multiple runs can be ran\n # in the same output directory and configs versioned\n hydra_dir = self.working_dir / \".hydra\"\n\n shutil.copytree(\n str(self.original_hydra_dir / \".hydra\"),\n str(object=hydra_dir),\n dirs_exist_ok=True,\n )\n\n # Move main.log to the working directory\n main_log = self.original_hydra_dir / \"main.log\"\n shutil.copy2(\n str(main_log),\n str(self.working_dir / \"main.log\"),\n )\n save_files(\n self.working_dir,\n self.output_dir,\n to_save=self.to_save_once,\n checkpoint_index=self.checkpoint_index,\n )\n log(\n logging.INFO,\n f\"Post-cleaning {self.to_clean_once}\",\n )\n cleanup(\n self.working_dir,\n to_clean=self.to_clean_once,\n )"
},
{
"identifier": "RayContextManager",
"path": "project/utils/utils.py",
"snippet": "class RayContextManager:\n \"\"\"A context manager for cleaning up after ray.\"\"\"\n\n def __enter__(self) -> \"RayContextManager\":\n \"\"\"Initialize the context manager.\"\"\"\n return self\n\n def __exit__(\n self,\n _exc_type: type[BaseException] | None,\n _exc_value: BaseException | None,\n _traceback: TracebackType | None,\n ) -> None:\n \"\"\"Cleanup the files.\n\n Parameters\n ----------\n _exc_type : Any\n The exception type.\n _exc_value : Any\n The exception value.\n _traceback : Any\n The traceback.\n\n Returns\n -------\n None\n \"\"\"\n if ray.is_initialized():\n temp_dir = Path(\n ray.worker._global_node.get_session_dir_path(),\n )\n ray.shutdown()\n directory_size = shutil.disk_usage(\n temp_dir,\n ).used\n shutil.rmtree(temp_dir)\n log(\n logging.INFO,\n f\"Cleaned up ray temp session: {temp_dir} with size: {directory_size}\",\n )"
},
{
"identifier": "seed_everything",
"path": "project/utils/utils.py",
"snippet": "def seed_everything(seed: int) -> None:\n \"\"\"Seed everything for reproducibility.\n\n Parameters\n ----------\n seed : int\n The seed.\n\n Returns\n -------\n None\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)"
},
{
"identifier": "wandb_init",
"path": "project/utils/utils.py",
"snippet": "def wandb_init(\n wandb_enabled: bool,\n *args: Any,\n **kwargs: Any,\n) -> NoOpContextManager | Any:\n \"\"\"Initialize wandb if enabled.\n\n Parameters\n ----------\n wandb_enabled : bool\n Whether wandb is enabled.\n *args : Any\n The arguments to pass to wandb.init.\n **kwargs : Any\n The keyword arguments to pass to wandb.init.\n\n Returns\n -------\n Optional[Union[NoOpContextManager, Any]]\n The wandb context manager if enabled, otherwise a no-op context manager\n \"\"\"\n if wandb_enabled:\n return wandb.init(*args, **kwargs)\n\n return NoOpContextManager()"
}
] | import json
import logging
import os
import subprocess
import sys
import flwr as fl
import hydra
import wandb
from pathlib import Path
from typing import cast
from flwr.common.logger import log
from hydra.core.hydra_config import HydraConfig
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from project.client.client import get_client_generator
from project.dispatch.dispatch import dispatch_config, dispatch_data, dispatch_train
from project.fed.server.deterministic_client_manager import DeterministicClientManager
from project.fed.server.wandb_history import WandbHistory
from project.fed.server.wandb_server import WandbServer
from project.fed.utils.utils import (
get_initial_parameters,
get_save_parameters_to_file,
get_weighted_avg_metrics_agg_fn,
test_client,
)
from project.types.common import ClientGen, FedEvalFN
from project.utils.utils import (
FileSystemManager,
RayContextManager,
seed_everything,
wandb_init,
) | 7,950 | """Create and connect the building blocks for your experiments; start the simulation.
It includes processing the dataset, instantiate strategy, specifying how the global
model will be evaluated, etc. In the end, this script saves the results.
"""
# Only import from the project root
# Never do a relative import nor one that assumes a given folder structure
# Make debugging easier when using Hydra + Ray
os.environ["HYDRA_FULL_ERROR"] = "1"
os.environ["OC_CAUSE"] = "1"
@hydra.main(
config_path="conf",
config_name="base",
version_base=None,
)
def main(cfg: DictConfig) -> None:
"""Run the baseline.
Parameters
----------
cfg : DictConfig
An omegaconf object that stores the hydra config.
"""
# Print parsed config
log(logging.INFO, OmegaConf.to_yaml(cfg))
wandb_config = OmegaConf.to_container(
cfg,
resolve=True,
throw_on_missing=True,
)
# Obtain the output dir from hydra
original_hydra_dir = Path(
hydra.utils.to_absolute_path(
HydraConfig.get().runtime.output_dir,
),
)
output_directory = original_hydra_dir
# Reuse an output directory for checkpointing
if cfg.reuse_output_dir is not None:
output_directory = Path(cfg.reuse_output_dir)
# The directory to save data to
results_dir = output_directory / "results"
results_dir.mkdir(parents=True, exist_ok=True)
# Where to save files to and from
if cfg.working_dir is not None:
# Pre-defined directory
working_dir = Path(cfg.working_dir)
else:
# Default directory
working_dir = output_directory / "working"
working_dir.mkdir(parents=True, exist_ok=True)
# Wandb context manager
# controlls if wandb is initialised or not
# if not it returns a dummy run
with wandb_init(
cfg.use_wandb,
**cfg.wandb.setup,
settings=wandb.Settings(start_method="thread"),
config=wandb_config,
) as run:
log(
logging.INFO,
"Wandb run initialized with %s",
cfg.use_wandb,
)
# Context managers for saving and cleaning up files
# from the working directory
# at the start/end of the simulation
# The RayContextManager deletes the ray session folder
with (
FileSystemManager(
working_dir=working_dir,
output_dir=results_dir,
to_clean_once=cfg.to_clean_once,
to_save_once=cfg.to_save_once,
original_hydra_dir=original_hydra_dir,
reuse_output_dir=cfg.reuse_output_dir,
file_limit=cfg.file_limit,
) as fs_manager,
RayContextManager() as _ray_manager,
):
# Which files to save every <to_save_per_round> rounds
# e.g. model checkpoints
save_files_per_round = fs_manager.get_save_files_every_round(
cfg.to_save_per_round,
cfg.save_frequency,
)
# For checkpointed runs, adjust the seed
# so different clients are sampled
adjusted_seed = cfg.fed.seed ^ fs_manager.checkpoint_index
save_parameters_to_file = get_save_parameters_to_file(working_dir)
# Client manager that samples the same clients
# For a given seed+checkpoint combination
| """Create and connect the building blocks for your experiments; start the simulation.
It includes processing the dataset, instantiate strategy, specifying how the global
model will be evaluated, etc. In the end, this script saves the results.
"""
# Only import from the project root
# Never do a relative import nor one that assumes a given folder structure
# Make debugging easier when using Hydra + Ray
os.environ["HYDRA_FULL_ERROR"] = "1"
os.environ["OC_CAUSE"] = "1"
@hydra.main(
config_path="conf",
config_name="base",
version_base=None,
)
def main(cfg: DictConfig) -> None:
"""Run the baseline.
Parameters
----------
cfg : DictConfig
An omegaconf object that stores the hydra config.
"""
# Print parsed config
log(logging.INFO, OmegaConf.to_yaml(cfg))
wandb_config = OmegaConf.to_container(
cfg,
resolve=True,
throw_on_missing=True,
)
# Obtain the output dir from hydra
original_hydra_dir = Path(
hydra.utils.to_absolute_path(
HydraConfig.get().runtime.output_dir,
),
)
output_directory = original_hydra_dir
# Reuse an output directory for checkpointing
if cfg.reuse_output_dir is not None:
output_directory = Path(cfg.reuse_output_dir)
# The directory to save data to
results_dir = output_directory / "results"
results_dir.mkdir(parents=True, exist_ok=True)
# Where to save files to and from
if cfg.working_dir is not None:
# Pre-defined directory
working_dir = Path(cfg.working_dir)
else:
# Default directory
working_dir = output_directory / "working"
working_dir.mkdir(parents=True, exist_ok=True)
# Wandb context manager
# controlls if wandb is initialised or not
# if not it returns a dummy run
with wandb_init(
cfg.use_wandb,
**cfg.wandb.setup,
settings=wandb.Settings(start_method="thread"),
config=wandb_config,
) as run:
log(
logging.INFO,
"Wandb run initialized with %s",
cfg.use_wandb,
)
# Context managers for saving and cleaning up files
# from the working directory
# at the start/end of the simulation
# The RayContextManager deletes the ray session folder
with (
FileSystemManager(
working_dir=working_dir,
output_dir=results_dir,
to_clean_once=cfg.to_clean_once,
to_save_once=cfg.to_save_once,
original_hydra_dir=original_hydra_dir,
reuse_output_dir=cfg.reuse_output_dir,
file_limit=cfg.file_limit,
) as fs_manager,
RayContextManager() as _ray_manager,
):
# Which files to save every <to_save_per_round> rounds
# e.g. model checkpoints
save_files_per_round = fs_manager.get_save_files_every_round(
cfg.to_save_per_round,
cfg.save_frequency,
)
# For checkpointed runs, adjust the seed
# so different clients are sampled
adjusted_seed = cfg.fed.seed ^ fs_manager.checkpoint_index
save_parameters_to_file = get_save_parameters_to_file(working_dir)
# Client manager that samples the same clients
# For a given seed+checkpoint combination | client_manager = DeterministicClientManager( | 4 | 2023-11-08 15:31:44+00:00 | 12k |
silicx/GoldFromOres | DatasetCondensation/main.py | [
{
"identifier": "get_loops",
"path": "DatasetCondensation/utils.py",
"snippet": "def get_loops(ipc):\r\n # Get the two hyper-parameters of outer-loop and inner-loop.\r\n # The following values are empirically good.\r\n if ipc == 1:\r\n outer_loop, inner_loop = 1, 1\r\n elif ipc == 10:\r\n outer_loop, inner_loop = 10, 50\r\n elif ipc == 20:\r\n outer_loop, inner_loop = 20, 25\r\n elif ipc == 30:\r\n outer_loop, inner_loop = 30, 20\r\n elif ipc == 40:\r\n outer_loop, inner_loop = 40, 15\r\n elif ipc == 50:\r\n outer_loop, inner_loop = 50, 10\r\n else:\r\n outer_loop, inner_loop = 0, 0\r\n exit('loop hyper-parameters are not defined for %d ipc'%ipc)\r\n return outer_loop, inner_loop\r"
},
{
"identifier": "get_dataset",
"path": "DatasetCondensation/utils.py",
"snippet": "def get_dataset(dataset, data_path):\r\n if dataset == 'MNIST':\r\n channel = 1\r\n im_size = (28, 28)\r\n num_classes = 10\r\n mean = [0.1307]\r\n std = [0.3081]\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\r\n dst_train = datasets.MNIST(data_path, train=True, download=True, transform=transform) # no augmentation\r\n dst_test = datasets.MNIST(data_path, train=False, download=True, transform=transform)\r\n class_names = [str(c) for c in range(num_classes)]\r\n\r\n elif dataset == 'FashionMNIST':\r\n channel = 1\r\n im_size = (28, 28)\r\n num_classes = 10\r\n mean = [0.2861]\r\n std = [0.3530]\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\r\n dst_train = datasets.FashionMNIST(data_path, train=True, download=True, transform=transform) # no augmentation\r\n dst_test = datasets.FashionMNIST(data_path, train=False, download=True, transform=transform)\r\n class_names = dst_train.classes\r\n\r\n elif dataset == 'SVHN':\r\n channel = 3\r\n im_size = (32, 32)\r\n num_classes = 10\r\n mean = [0.4377, 0.4438, 0.4728]\r\n std = [0.1980, 0.2010, 0.1970]\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\r\n dst_train = datasets.SVHN(data_path, split='train', download=True, transform=transform) # no augmentation\r\n dst_test = datasets.SVHN(data_path, split='test', download=True, transform=transform)\r\n class_names = [str(c) for c in range(num_classes)]\r\n\r\n elif dataset == 'CIFAR10':\r\n channel = 3\r\n im_size = (32, 32)\r\n num_classes = 10\r\n mean = [0.4914, 0.4822, 0.4465]\r\n std = [0.2023, 0.1994, 0.2010]\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\r\n dst_train = datasets.CIFAR10(data_path, train=True, download=True, transform=transform) # no augmentation\r\n dst_test = datasets.CIFAR10(data_path, train=False, download=True, transform=transform)\r\n class_names = dst_train.classes\r\n\r\n elif dataset == 'CIFAR100':\r\n channel = 3\r\n im_size = (32, 32)\r\n num_classes = 100\r\n mean = [0.5071, 0.4866, 0.4409]\r\n std = [0.2673, 0.2564, 0.2762]\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\r\n dst_train = datasets.CIFAR100(data_path, train=True, download=True, transform=transform) # no augmentation\r\n dst_test = datasets.CIFAR100(data_path, train=False, download=True, transform=transform)\r\n class_names = dst_train.classes\r\n\r\n elif dataset == 'TinyImageNet':\r\n channel = 3\r\n im_size = (64, 64)\r\n num_classes = 200\r\n mean = [0.485, 0.456, 0.406]\r\n std = [0.229, 0.224, 0.225]\r\n data = torch.load(os.path.join(data_path, 'tinyimagenet.pt'), map_location='cpu')\r\n\r\n class_names = data['classes']\r\n\r\n images_train = data['images_train']\r\n labels_train = data['labels_train']\r\n images_train = images_train.detach().float() / 255.0\r\n labels_train = labels_train.detach()\r\n for c in range(channel):\r\n images_train[:,c] = (images_train[:,c] - mean[c])/std[c]\r\n dst_train = TensorDataset(images_train, labels_train) # no augmentation\r\n\r\n images_val = data['images_val']\r\n labels_val = data['labels_val']\r\n images_val = images_val.detach().float() / 255.0\r\n labels_val = labels_val.detach()\r\n\r\n for c in range(channel):\r\n images_val[:, c] = (images_val[:, c] - mean[c]) / std[c]\r\n\r\n dst_test = TensorDataset(images_val, labels_val) # no augmentation\r\n\r\n else:\r\n exit('unknown dataset: %s'%dataset)\r\n\r\n\r\n testloader = torch.utils.data.DataLoader(dst_test, batch_size=256, shuffle=False, num_workers=0)\r\n return channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader\r"
},
{
"identifier": "get_network",
"path": "DatasetCondensation/utils.py",
"snippet": "def get_network(model, channel, num_classes, im_size=(32, 32)):\r\n torch.random.manual_seed(int(time.time() * 1000) % 100000)\r\n net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting()\r\n\r\n if model == 'MLP':\r\n net = MLP(channel=channel, num_classes=num_classes)\r\n elif model == 'ConvNet':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'LeNet':\r\n net = LeNet(channel=channel, num_classes=num_classes)\r\n elif model == 'AlexNet':\r\n net = AlexNet(channel=channel, num_classes=num_classes)\r\n elif model == 'AlexNetBN':\r\n net = AlexNetBN(channel=channel, num_classes=num_classes)\r\n elif model == 'VGG11':\r\n net = VGG11( channel=channel, num_classes=num_classes)\r\n elif model == 'VGG11BN':\r\n net = VGG11BN(channel=channel, num_classes=num_classes)\r\n elif model == 'ResNet18':\r\n net = ResNet18(channel=channel, num_classes=num_classes)\r\n elif model == 'ResNet18BN_AP':\r\n net = ResNet18BN_AP(channel=channel, num_classes=num_classes)\r\n elif model == 'ResNet18BN':\r\n net = ResNet18BN(channel=channel, num_classes=num_classes)\r\n\r\n elif model == 'ConvNetD1':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=1, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetD2':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=2, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetD3':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=3, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetD4':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=4, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n\r\n elif model == 'ConvNetW32':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=32, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetW64':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=64, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetW128':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=128, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetW256':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=256, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n\r\n elif model == 'ConvNetAS':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='sigmoid', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetAR':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='relu', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetAL':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='leakyrelu', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetASwish':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='swish', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetASwishBN':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='swish', net_norm='batchnorm', net_pooling=net_pooling, im_size=im_size)\r\n\r\n elif model == 'ConvNetNN':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='none', net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetBN':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='batchnorm', net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetLN':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='layernorm', net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetIN':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='instancenorm', net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetGN':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='groupnorm', net_pooling=net_pooling, im_size=im_size)\r\n\r\n elif model == 'ConvNetNP':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling='none', im_size=im_size)\r\n elif model == 'ConvNetMP':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling='maxpooling', im_size=im_size)\r\n elif model == 'ConvNetAP':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling='avgpooling', im_size=im_size)\r\n\r\n else:\r\n net = None\r\n exit('unknown model: %s'%model)\r\n\r\n gpu_num = torch.cuda.device_count()\r\n if gpu_num>0:\r\n device = 'cuda'\r\n if gpu_num>1:\r\n net = nn.DataParallel(net)\r\n else:\r\n device = 'cpu'\r\n net = net.to(device)\r\n\r\n return net\r"
},
{
"identifier": "get_eval_pool",
"path": "DatasetCondensation/utils.py",
"snippet": "def get_eval_pool(eval_mode, model, model_eval):\r\n if eval_mode == 'M': # multiple architectures\r\n model_eval_pool = ['MLP', 'ConvNet', 'LeNet', 'AlexNet', 'VGG11', 'ResNet18']\r\n elif eval_mode == 'B': # multiple architectures with BatchNorm for DM experiments\r\n model_eval_pool = ['ConvNetBN', 'ConvNetASwishBN', 'AlexNetBN', 'VGG11BN', 'ResNet18BN']\r\n elif eval_mode == 'W': # ablation study on network width\r\n model_eval_pool = ['ConvNetW32', 'ConvNetW64', 'ConvNetW128', 'ConvNetW256']\r\n elif eval_mode == 'D': # ablation study on network depth\r\n model_eval_pool = ['ConvNetD1', 'ConvNetD2', 'ConvNetD3', 'ConvNetD4']\r\n elif eval_mode == 'A': # ablation study on network activation function\r\n model_eval_pool = ['ConvNetAS', 'ConvNetAR', 'ConvNetAL', 'ConvNetASwish']\r\n elif eval_mode == 'P': # ablation study on network pooling layer\r\n model_eval_pool = ['ConvNetNP', 'ConvNetMP', 'ConvNetAP']\r\n elif eval_mode == 'N': # ablation study on network normalization layer\r\n model_eval_pool = ['ConvNetNN', 'ConvNetBN', 'ConvNetLN', 'ConvNetIN', 'ConvNetGN']\r\n elif eval_mode == 'S': # itself\r\n if 'BN' in model:\r\n print('Attention: Here I will replace BN with IN in evaluation, as the synthetic set is too small to measure BN hyper-parameters.')\r\n model_eval_pool = [model[:model.index('BN')]] if 'BN' in model else [model]\r\n elif eval_mode == 'SS': # itself\r\n model_eval_pool = [model]\r\n else:\r\n model_eval_pool = [model_eval]\r\n return model_eval_pool\r"
},
{
"identifier": "evaluate_synset",
"path": "DatasetCondensation/utils.py",
"snippet": "def evaluate_synset(it_eval, net, images_train, labels_train, testloader, args):\r\n net = net.to(args.device)\r\n images_train = images_train.to(args.device)\r\n labels_train = labels_train.to(args.device)\r\n lr = float(args.lr_net)\r\n Epoch = int(args.epoch_eval_train)\r\n lr_schedule = [Epoch//2+1]\r\n optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)\r\n criterion = nn.CrossEntropyLoss().to(args.device)\r\n\r\n dst_train = TensorDataset(images_train, labels_train)\r\n trainloader = torch.utils.data.DataLoader(dst_train, batch_size=args.batch_train, shuffle=True, num_workers=0)\r\n\r\n start = time.time()\r\n for ep in range(Epoch+1):\r\n loss_train, acc_train = epoch('train', trainloader, net, optimizer, criterion, args, aug = True)\r\n if ep in lr_schedule:\r\n lr *= 0.1\r\n optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)\r\n\r\n time_train = time.time() - start\r\n loss_test, acc_test = epoch('test', testloader, net, optimizer, criterion, args, aug = False)\r\n print('%s Evaluate_%02d: epoch = %04d train time = %d s train loss = %.6f train acc = %.4f, test acc = %.4f' % (get_time(), it_eval, Epoch, int(time_train), loss_train, acc_train, acc_test))\r\n\r\n return net, acc_train, acc_test\r"
},
{
"identifier": "get_daparam",
"path": "DatasetCondensation/utils.py",
"snippet": "def get_daparam(dataset, model, model_eval, ipc):\r\n # We find that augmentation doesn't always benefit the performance.\r\n # So we do augmentation for some of the settings.\r\n\r\n dc_aug_param = dict()\r\n dc_aug_param['crop'] = 4\r\n dc_aug_param['scale'] = 0.2\r\n dc_aug_param['rotate'] = 45\r\n dc_aug_param['noise'] = 0.001\r\n dc_aug_param['strategy'] = 'none'\r\n\r\n if dataset == 'MNIST':\r\n dc_aug_param['strategy'] = 'crop_scale_rotate'\r\n\r\n if model_eval in ['ConvNetBN']: # Data augmentation makes model training with Batch Norm layer easier.\r\n dc_aug_param['strategy'] = 'crop_noise'\r\n\r\n return dc_aug_param\r"
},
{
"identifier": "match_loss",
"path": "DatasetCondensation/utils.py",
"snippet": "def match_loss(gw_syn, gw_real, args):\r\n dis = torch.tensor(0.0).to(args.device)\r\n\r\n if args.dis_metric == 'ours':\r\n for ig in range(len(gw_real)):\r\n gwr = gw_real[ig]\r\n gws = gw_syn[ig]\r\n dis += distance_wb(gwr, gws)\r\n\r\n elif args.dis_metric == 'mse':\r\n gw_real_vec = []\r\n gw_syn_vec = []\r\n for ig in range(len(gw_real)):\r\n gw_real_vec.append(gw_real[ig].reshape((-1)))\r\n gw_syn_vec.append(gw_syn[ig].reshape((-1)))\r\n gw_real_vec = torch.cat(gw_real_vec, dim=0)\r\n gw_syn_vec = torch.cat(gw_syn_vec, dim=0)\r\n dis = torch.sum((gw_syn_vec - gw_real_vec)**2)\r\n\r\n elif args.dis_metric == 'cos':\r\n gw_real_vec = []\r\n gw_syn_vec = []\r\n for ig in range(len(gw_real)):\r\n gw_real_vec.append(gw_real[ig].reshape((-1)))\r\n gw_syn_vec.append(gw_syn[ig].reshape((-1)))\r\n gw_real_vec = torch.cat(gw_real_vec, dim=0)\r\n gw_syn_vec = torch.cat(gw_syn_vec, dim=0)\r\n dis = 1 - torch.sum(gw_real_vec * gw_syn_vec, dim=-1) / (torch.norm(gw_real_vec, dim=-1) * torch.norm(gw_syn_vec, dim=-1) + 0.000001)\r\n\r\n else:\r\n exit('unknown distance function: %s'%args.dis_metric)\r\n\r\n return dis\r"
},
{
"identifier": "get_time",
"path": "DatasetCondensation/utils.py",
"snippet": "def get_time():\r\n return str(time.strftime(\"[%Y-%m-%d %H:%M:%S]\", time.localtime()))\r"
},
{
"identifier": "TensorDataset",
"path": "DatasetCondensation/utils.py",
"snippet": "class TensorDataset(Dataset):\r\n def __init__(self, images, labels): # images: n x c x h x w tensor\r\n self.images = images.detach().float()\r\n self.labels = labels.detach()\r\n\r\n def __getitem__(self, index):\r\n return self.images[index], self.labels[index]\r\n\r\n def __len__(self):\r\n return self.images.shape[0]\r"
},
{
"identifier": "epoch",
"path": "DatasetCondensation/utils.py",
"snippet": "def epoch(mode, dataloader, net, optimizer, criterion, args, aug):\r\n loss_avg, acc_avg, num_exp = 0, 0, 0\r\n net = net.to(args.device)\r\n criterion = criterion.to(args.device)\r\n\r\n if mode == 'train':\r\n net.train()\r\n else:\r\n net.eval()\r\n\r\n for i_batch, datum in enumerate(dataloader):\r\n img = datum[0].float().to(args.device)\r\n if aug:\r\n if args.dsa:\r\n img = DiffAugment(img, args.dsa_strategy, param=args.dsa_param)\r\n else:\r\n img = augment(img, args.dc_aug_param, device=args.device)\r\n lab = datum[1].long().to(args.device)\r\n n_b = lab.shape[0]\r\n\r\n output = net(img)\r\n loss = criterion(output, lab)\r\n acc = np.sum(np.equal(np.argmax(output.cpu().data.numpy(), axis=-1), lab.cpu().data.numpy()))\r\n\r\n loss_avg += loss.item()*n_b\r\n acc_avg += acc\r\n num_exp += n_b\r\n\r\n if mode == 'train':\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n loss_avg /= num_exp\r\n acc_avg /= num_exp\r\n\r\n return loss_avg, acc_avg\r"
},
{
"identifier": "DiffAugment",
"path": "DatasetCondensation/utils.py",
"snippet": "def DiffAugment(x, strategy='', seed = -1, param = None):\r\n if strategy == 'None' or strategy == 'none' or strategy == '':\r\n return x\r\n\r\n if seed == -1:\r\n param.Siamese = False\r\n else:\r\n param.Siamese = True\r\n\r\n param.latestseed = seed\r\n\r\n if strategy:\r\n if param.aug_mode == 'M': # original\r\n for p in strategy.split('_'):\r\n for f in AUGMENT_FNS[p]:\r\n x = f(x, param)\r\n elif param.aug_mode == 'S':\r\n pbties = strategy.split('_')\r\n set_seed_DiffAug(param)\r\n p = pbties[torch.randint(0, len(pbties), size=(1,)).item()]\r\n for f in AUGMENT_FNS[p]:\r\n x = f(x, param)\r\n else:\r\n exit('unknown augmentation mode: %s'%param.aug_mode)\r\n x = x.contiguous()\r\n return x\r"
},
{
"identifier": "ParamDiffAug",
"path": "DatasetCondensation/utils.py",
"snippet": "class ParamDiffAug():\r\n def __init__(self):\r\n self.aug_mode = 'S' #'multiple or single'\r\n self.prob_flip = 0.5\r\n self.ratio_scale = 1.2\r\n self.ratio_rotate = 15.0\r\n self.ratio_crop_pad = 0.125\r\n self.ratio_cutout = 0.5 # the size would be 0.5x0.5\r\n self.brightness = 1.0\r\n self.saturation = 2.0\r\n self.contrast = 0.5\r"
},
{
"identifier": "drop_samples",
"path": "drop_utils/drop.py",
"snippet": "def drop_samples(images_all, labels_all, indices_class,\n dataset: str, drop_criterion: str, \n *, drop_ratio=None, keep_ratio=None):\n \"\"\"images_all, labels_all, indices_class: the dataset structure that commonly used for DD\n dataset: (str) dataset name\n drop_criterion: (str) =`random`, or in the format of ${utility-indicator}_${order}, e.g. LossConverge_Small\n drop_ratio, keep_ratio: only one of them should be specified (drop_ratio = 1.0 - keep_ratio)\n \"\"\"\n assert (drop_ratio is None) ^ (keep_ratio is None), \\\n f\"Only one of drop_ratio ({drop_ratio}) and keep_ratio ({keep_ratio}) should be specified.\"\n \n if drop_ratio is None:\n assert keep_ratio is not None, \"I know keep_ratio must have value here! I'm muting the warning in my way.\"\n drop_ratio = 1.0 - keep_ratio\n assert 0.0 <= drop_ratio <= 1.0, str(drop_ratio)\n\n # Here's the tricky part: remember that in any case, the samples we hope to drop is sorted to the left\n # of the sequence, so we keep the `keep_ratio`% samples at right, \n # i.e. we keep the range [drop_ratio, 100%]\n \n dropped_idx_set = sample_indices_to_drop(dataset, drop_criterion, indices_class, drop_ratio, 1.0)\n\n\n # re-indexing\n \n images_all = [x for i, x in enumerate(images_all) if i not in dropped_idx_set]\n print(\"Original:\", labels_all.shape[0], \"; Now:\", len(images_all), \"remain\")\n labels_all = [x for i, x in enumerate(labels_all) if i not in dropped_idx_set]\n\n indices_class = [[] for c in range(len(indices_class))]\n for i, lab in enumerate(labels_all):\n indices_class[lab].append(i)\n\n # for i, x in enumerate(indices_class):\n # print(\"Class\", i, \"remains\", len(x), \"samples\")\n\n images_all = torch.stack(images_all, dim=0)\n labels_all = torch.tensor(labels_all, dtype=torch.long, device=images_all.device)\n torch.cuda.empty_cache()\n\n return images_all, labels_all, indices_class"
}
] | import os
import time
import copy
import argparse
import numpy as np
import torch
import torch.nn as nn
import pdb
from torchvision.utils import save_image
from .utils import get_loops, get_dataset, get_network, get_eval_pool, evaluate_synset, get_daparam, match_loss, get_time, TensorDataset, epoch, DiffAugment, ParamDiffAug
from drop_utils import drop_samples
| 7,657 |
def main():
parser = argparse.ArgumentParser(description='Parameter Processing')
parser.add_argument('--method', type=str, default='DC', help='DC/DSA')
parser.add_argument('--dataset', type=str, default='CIFAR10', help='dataset')
parser.add_argument('--model', type=str, default='ConvNet', help='model')
parser.add_argument('--ipc', type=int, default=1, help='image(s) per class')
parser.add_argument('--eval_mode', type=str, default='S', help='eval_mode') # S: the same to training model, M: multi architectures, W: net width, D: net depth, A: activation function, P: pooling layer, N: normalization layer,
parser.add_argument('--num_exp', type=int, default=5, help='the number of experiments')
parser.add_argument('--num_eval', type=int, default=20, help='the number of evaluating randomly initialized models')
parser.add_argument('--epoch_eval_train', type=int, default=300, help='epochs to train a model with synthetic data')
parser.add_argument('--Iteration', type=int, default=1000, help='training iterations')
parser.add_argument('--lr_img', type=float, default=0.1, help='learning rate for updating synthetic images')
parser.add_argument('--lr_net', type=float, default=0.01, help='learning rate for updating network parameters')
parser.add_argument('--batch_real', type=int, default=256, help='batch size for real data')
parser.add_argument('--batch_train', type=int, default=256, help='batch size for training networks')
parser.add_argument('--init', type=str, default='noise', help='noise/real: initialize synthetic images from random noise or randomly sampled real images.')
parser.add_argument('--dsa_strategy', type=str, default='None', help='differentiable Siamese augmentation strategy')
parser.add_argument('--data_path', type=str, default='data', help='dataset path')
parser.add_argument('--save_path', type=str, default='result', help='path to save results')
parser.add_argument('--dis_metric', type=str, default='ours', help='distance metric')
parser.add_argument('--drop_criterion', type=str, default='LossConverge_large', help='Criterion for data dropping')
parser.add_argument('--drop_ratio', type=float, default=0.0, help='The ratio to drop (for each class)')
args = parser.parse_args()
args.outer_loop, args.inner_loop = get_loops(args.ipc)
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
args.dsa_param = ParamDiffAug()
args.dsa = True if args.method == 'DSA' else False
if not os.path.exists(args.data_path):
os.mkdir(args.data_path)
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
eval_it_pool = np.arange(0, args.Iteration+1, 500).tolist() if args.eval_mode == 'S' or args.eval_mode == 'SS' else [args.Iteration] # The list of iterations when we evaluate models and record results.
print('eval_it_pool: ', eval_it_pool)
channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader = get_dataset(args.dataset, args.data_path)
model_eval_pool = get_eval_pool(args.eval_mode, args.model, args.model)
accs_all_exps = dict() # record performances of all experiments
for key in model_eval_pool:
accs_all_exps[key] = []
data_save = []
for exp in range(args.num_exp):
print('\n================== Exp %d ==================\n '%exp)
print('Hyper-parameters: \n', args.__dict__)
print('Evaluation model pool: ', model_eval_pool)
''' organize the real dataset '''
images_all = []
labels_all = []
indices_class = [[] for c in range(num_classes)]
images_all = [torch.unsqueeze(dst_train[i][0], dim=0) for i in range(len(dst_train))]
labels_all = [dst_train[i][1] for i in range(len(dst_train))]
for i, lab in enumerate(labels_all):
indices_class[lab].append(i)
images_all = torch.cat(images_all, dim=0).to(args.device)
labels_all = torch.tensor(labels_all, dtype=torch.long, device=args.device)
images_all, labels_all, indices_class = drop_samples(
images_all, labels_all, indices_class,
args.dataset, args.drop_criterion, drop_ratio=args.drop_ratio)
for c in range(num_classes):
print('class c = %d: %d real images'%(c, len(indices_class[c])))
def get_images(c, n): # get random n images from class c
idx_shuffle = np.random.permutation(indices_class[c])[:n]
return images_all[idx_shuffle]
for ch in range(channel):
print('real images channel %d, mean = %.4f, std = %.4f'%(ch, torch.mean(images_all[:, ch]), torch.std(images_all[:, ch])))
''' initialize the synthetic data '''
image_syn = torch.randn(size=(num_classes*args.ipc, channel, im_size[0], im_size[1]), dtype=torch.float, requires_grad=True, device=args.device)
label_syn = torch.tensor([np.ones(args.ipc)*i for i in range(num_classes)], dtype=torch.long, requires_grad=False, device=args.device).view(-1) # [0,0,0, 1,1,1, ..., 9,9,9]
if args.init == 'real':
print('initialize synthetic data from random real images')
for c in range(num_classes):
image_syn.data[c*args.ipc:(c+1)*args.ipc] = get_images(c, args.ipc).detach().data
else:
print('initialize synthetic data from random noise')
''' training '''
optimizer_img = torch.optim.SGD([image_syn, ], lr=args.lr_img, momentum=0.5) # optimizer_img for synthetic data
optimizer_img.zero_grad()
criterion = nn.CrossEntropyLoss().to(args.device)
print('%s training begins'%get_time())
for it in range(args.Iteration+1):
''' Evaluate synthetic data '''
if it in eval_it_pool:
for model_eval in model_eval_pool:
print('-------------------------\nEvaluation\nmodel_train = %s, model_eval = %s, iteration = %d'%(args.model, model_eval, it))
if args.dsa:
args.epoch_eval_train = 1000
args.dc_aug_param = None
print('DSA augmentation strategy: \n', args.dsa_strategy)
print('DSA augmentation parameters: \n', args.dsa_param.__dict__)
else:
|
def main():
parser = argparse.ArgumentParser(description='Parameter Processing')
parser.add_argument('--method', type=str, default='DC', help='DC/DSA')
parser.add_argument('--dataset', type=str, default='CIFAR10', help='dataset')
parser.add_argument('--model', type=str, default='ConvNet', help='model')
parser.add_argument('--ipc', type=int, default=1, help='image(s) per class')
parser.add_argument('--eval_mode', type=str, default='S', help='eval_mode') # S: the same to training model, M: multi architectures, W: net width, D: net depth, A: activation function, P: pooling layer, N: normalization layer,
parser.add_argument('--num_exp', type=int, default=5, help='the number of experiments')
parser.add_argument('--num_eval', type=int, default=20, help='the number of evaluating randomly initialized models')
parser.add_argument('--epoch_eval_train', type=int, default=300, help='epochs to train a model with synthetic data')
parser.add_argument('--Iteration', type=int, default=1000, help='training iterations')
parser.add_argument('--lr_img', type=float, default=0.1, help='learning rate for updating synthetic images')
parser.add_argument('--lr_net', type=float, default=0.01, help='learning rate for updating network parameters')
parser.add_argument('--batch_real', type=int, default=256, help='batch size for real data')
parser.add_argument('--batch_train', type=int, default=256, help='batch size for training networks')
parser.add_argument('--init', type=str, default='noise', help='noise/real: initialize synthetic images from random noise or randomly sampled real images.')
parser.add_argument('--dsa_strategy', type=str, default='None', help='differentiable Siamese augmentation strategy')
parser.add_argument('--data_path', type=str, default='data', help='dataset path')
parser.add_argument('--save_path', type=str, default='result', help='path to save results')
parser.add_argument('--dis_metric', type=str, default='ours', help='distance metric')
parser.add_argument('--drop_criterion', type=str, default='LossConverge_large', help='Criterion for data dropping')
parser.add_argument('--drop_ratio', type=float, default=0.0, help='The ratio to drop (for each class)')
args = parser.parse_args()
args.outer_loop, args.inner_loop = get_loops(args.ipc)
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
args.dsa_param = ParamDiffAug()
args.dsa = True if args.method == 'DSA' else False
if not os.path.exists(args.data_path):
os.mkdir(args.data_path)
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
eval_it_pool = np.arange(0, args.Iteration+1, 500).tolist() if args.eval_mode == 'S' or args.eval_mode == 'SS' else [args.Iteration] # The list of iterations when we evaluate models and record results.
print('eval_it_pool: ', eval_it_pool)
channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader = get_dataset(args.dataset, args.data_path)
model_eval_pool = get_eval_pool(args.eval_mode, args.model, args.model)
accs_all_exps = dict() # record performances of all experiments
for key in model_eval_pool:
accs_all_exps[key] = []
data_save = []
for exp in range(args.num_exp):
print('\n================== Exp %d ==================\n '%exp)
print('Hyper-parameters: \n', args.__dict__)
print('Evaluation model pool: ', model_eval_pool)
''' organize the real dataset '''
images_all = []
labels_all = []
indices_class = [[] for c in range(num_classes)]
images_all = [torch.unsqueeze(dst_train[i][0], dim=0) for i in range(len(dst_train))]
labels_all = [dst_train[i][1] for i in range(len(dst_train))]
for i, lab in enumerate(labels_all):
indices_class[lab].append(i)
images_all = torch.cat(images_all, dim=0).to(args.device)
labels_all = torch.tensor(labels_all, dtype=torch.long, device=args.device)
images_all, labels_all, indices_class = drop_samples(
images_all, labels_all, indices_class,
args.dataset, args.drop_criterion, drop_ratio=args.drop_ratio)
for c in range(num_classes):
print('class c = %d: %d real images'%(c, len(indices_class[c])))
def get_images(c, n): # get random n images from class c
idx_shuffle = np.random.permutation(indices_class[c])[:n]
return images_all[idx_shuffle]
for ch in range(channel):
print('real images channel %d, mean = %.4f, std = %.4f'%(ch, torch.mean(images_all[:, ch]), torch.std(images_all[:, ch])))
''' initialize the synthetic data '''
image_syn = torch.randn(size=(num_classes*args.ipc, channel, im_size[0], im_size[1]), dtype=torch.float, requires_grad=True, device=args.device)
label_syn = torch.tensor([np.ones(args.ipc)*i for i in range(num_classes)], dtype=torch.long, requires_grad=False, device=args.device).view(-1) # [0,0,0, 1,1,1, ..., 9,9,9]
if args.init == 'real':
print('initialize synthetic data from random real images')
for c in range(num_classes):
image_syn.data[c*args.ipc:(c+1)*args.ipc] = get_images(c, args.ipc).detach().data
else:
print('initialize synthetic data from random noise')
''' training '''
optimizer_img = torch.optim.SGD([image_syn, ], lr=args.lr_img, momentum=0.5) # optimizer_img for synthetic data
optimizer_img.zero_grad()
criterion = nn.CrossEntropyLoss().to(args.device)
print('%s training begins'%get_time())
for it in range(args.Iteration+1):
''' Evaluate synthetic data '''
if it in eval_it_pool:
for model_eval in model_eval_pool:
print('-------------------------\nEvaluation\nmodel_train = %s, model_eval = %s, iteration = %d'%(args.model, model_eval, it))
if args.dsa:
args.epoch_eval_train = 1000
args.dc_aug_param = None
print('DSA augmentation strategy: \n', args.dsa_strategy)
print('DSA augmentation parameters: \n', args.dsa_param.__dict__)
else:
| args.dc_aug_param = get_daparam(args.dataset, args.model, model_eval, args.ipc) # This augmentation parameter set is only for DC method. It will be muted when args.dsa is True.
| 5 | 2023-11-03 09:34:15+00:00 | 12k |
gchada/ROAM | sim/rail_walker_interface/environment/joystick_real.py | [
{
"identifier": "WalkerEnvironment",
"path": "sim/rail_walker_interface/environment/env.py",
"snippet": "class WalkerEnvironment:\n @property\n def robot(self) -> BaseWalker:\n pass"
},
{
"identifier": "JoystickEnvironment",
"path": "sim/rail_walker_interface/environment/env.py",
"snippet": "class JoystickEnvironment:\n @property\n def joystick_policy(self) -> JoystickPolicy:\n pass\n\n def set_joystick_policy(self, joystick_policy: JoystickPolicy):\n pass\n\n @property\n def is_resetter_policy(self) -> bool:\n return False"
},
{
"identifier": "BaseWalker",
"path": "sim/rail_walker_interface/robot/robot.py",
"snippet": "class BaseWalker(Generic[_ObsT]):\n def __init__(\n self, \n name: Optional[str] = \"robot\", \n Kp: float = 5,\n Kd: float = 1,\n force_real_control_timestep : bool = False,\n limit_action_range : float = 1.0,\n power_protect_factor : float = 0.1\n ):\n assert limit_action_range > 0 and limit_action_range <= 1.0\n self.name = name\n self.Kp = Kp\n self.Kd = Kd\n self.force_real_control_timestep = force_real_control_timestep\n self._last_control_t = 0.0\n self.limit_action_range = limit_action_range\n self._power_protect_factor = power_protect_factor\n\n @property\n def is_real_robot(self) -> bool:\n return False\n\n @property\n def power_protect_factor(self) -> float:\n return self._power_protect_factor\n \n @power_protect_factor.setter\n def power_protect_factor(self, value: float) -> None:\n assert value >= 0 and value <= 1.0\n self._power_protect_factor = value\n\n \"\"\"\n The control_timestep is the time interval between two consecutive model control actions.\n \"\"\"\n @property\n def control_timestep(self) -> float:\n pass\n \n @property\n def action_interpolation(self) -> bool:\n pass\n\n \"\"\"\n The control_subtimestep is the time interval between two consecutive internal control actions. It will also be the physics timestep if in simulation.\n \"\"\"\n @property\n def control_subtimestep(self) -> float:\n pass\n\n def receive_observation(self) -> bool:\n pass\n\n @property\n def joint_qpos_init(self) -> np.ndarray:\n pass\n\n @property\n def joint_qpos_sitting(self) -> np.ndarray:\n pass\n\n @cached_property\n def joint_qpos_crouch(self) -> np.ndarray:\n return (self.joint_qpos_init + self.joint_qpos_sitting) / 2.0\n\n \"\"\"\n This property will be used to determine the standing range of qpos of the robot.\n \"\"\"\n @property\n def joint_qpos_offset(self) -> np.ndarray:\n pass\n\n @property\n def joint_qpos_mins(self) -> np.ndarray:\n pass\n\n @property\n def joint_qpos_maxs(self) -> np.ndarray:\n pass\n\n def reset(self) -> None:\n pass\n\n def get_3d_linear_velocity(self) -> np.ndarray:\n pass\n\n def get_3d_local_velocity(self) -> np.ndarray:\n pass\n\n def get_3d_angular_velocity(self) -> np.ndarray:\n pass\n\n def get_framequat_wijk(self) -> np.ndarray:\n pass\n\n def get_roll_pitch_yaw(self) -> np.ndarray:\n pass\n\n def get_last_observation(self) -> Optional[_ObsT]:\n pass\n\n def get_3d_acceleration_local(self) -> np.ndarray:\n pass\n\n def get_joint_qpos(self) -> np.ndarray:\n pass\n\n def get_joint_qvel(self) -> np.ndarray:\n pass\n\n def get_joint_qacc(self) -> np.ndarray:\n pass\n\n def get_joint_torques(self) -> np.ndarray:\n pass\n\n def _apply_action(self, action: np.ndarray) -> bool:\n pass\n\n def close(self) -> None:\n pass\n\n def __del__(self):\n self.close()\n \n @property\n def action_qpos_mins(self) -> np.ndarray:\n return (self.joint_qpos_mins - self.joint_qpos_init) * self.limit_action_range + self.joint_qpos_init\n \n @property\n def action_qpos_maxs(self) -> np.ndarray:\n return (self.joint_qpos_maxs - self.joint_qpos_init) * self.limit_action_range + self.joint_qpos_init\n\n def apply_action(self, action: np.ndarray) -> bool:\n action = np.clip(action, self.action_qpos_mins, self.action_qpos_maxs)\n \n if not self.force_real_control_timestep:\n return self._apply_action(action)\n else:\n t = time.time()\n dt = t - self._last_control_t\n if dt >= self.control_timestep:\n self._last_control_t = t\n return self._apply_action(action)\n else:\n time_to_sleep = self.control_timestep - dt\n time.sleep(time_to_sleep)\n self._last_control_t = t + time_to_sleep\n return self._apply_action(action)\n\n def can_apply_action(self) -> bool:\n t = time.time()\n dt = t - self._last_control_t\n if (not self.force_real_control_timestep) or dt >= self.control_timestep:\n return True\n else:\n return False\n\n def async_apply_action(self, action: np.ndarray) -> bool:\n if self.can_apply_action():\n self._last_control_t = time.time()\n return self._apply_action(action)\n else:\n return False\n\n @cached_property\n def joint_nums(self) -> int:\n return len(self.joint_qpos_init)\n \n @cached_property\n def action_spec(self) -> gym.spaces.Box:\n return gym.spaces.Box(\n low=self.joint_qpos_mins, \n high=self.joint_qpos_maxs, \n shape=(self.joint_nums,),\n dtype=np.float32\n )\n\n def unwrapped(self):\n return self"
},
{
"identifier": "BaseWalkerWithFootContact",
"path": "sim/rail_walker_interface/robot/robot.py",
"snippet": "class BaseWalkerWithFootContact:\n def get_foot_contact(self) -> np.ndarray:\n pass\n\n def get_foot_force(self) -> np.ndarray:\n pass"
},
{
"identifier": "JoystickPolicy",
"path": "sim/rail_walker_interface/joystick_policy/joystick_policy.py",
"snippet": "class JoystickPolicy:\n def __init__(\n self,\n robot: BaseWalker,\n reward_provider: JoystickPolicyRewardProvider,\n target_yaw_provider: JoystickPolicyTargetProvider,\n termination_providers: list[JoystickPolicyTerminationConditionProvider],\n truncation_providers: list[JoystickPolicyTerminationConditionProvider],\n resetters: list[JoystickPolicyResetter],\n initializers: list[JoystickPolicyResetter] = [],\n target_observable: Optional[JoystickPolicyTargetObservable] = None,\n enabled_observables: list[str] = [\n \"joints_pos\",\n \"joints_vel\",\n \"imu\",\n \"sensors_local_velocimeter\",\n \"torques\",\n \"foot_contacts\",\n ],\n lock_target: bool = False,\n enable_target_custom_obs=True\n ):\n self.robot = robot\n self.reward_provider = reward_provider\n self.target_yaw_provider = target_yaw_provider\n self.termination_providers = termination_providers\n self.truncation_providers = truncation_providers\n self.resetters = resetters\n self.initializers = initializers\n self.target_observable = target_observable\n self.enabled_observables = enabled_observables\n self.lock_target = lock_target\n self.enable_target_custom_obs = enable_target_custom_obs\n\n # Temporary Variables\n self._step_target_qpos = self.robot.get_joint_qpos()\n\n # Set up task-specific variables\n self._target_goal_world_delta = np.zeros(2)\n self._target_goal_local = np.zeros(2)\n self._target_yaw = 0.0\n self._target_delta_yaw = 0.0\n self._target_velocity = 0.0\n self._target_custom_data = None\n self._rew_step = 0.0\n self._info_dict = {}\n self._has_after_after_step = False\n self._termination_reason: Optional[JoystickPolicyTerminationConditionProvider] = None\n self._truncation_reason: Optional[JoystickPolicyTerminationConditionProvider] = None\n self._inited = False\n\n @property\n def has_after_after_step(self) -> bool:\n return self._has_after_after_step\n\n @property\n def control_timestep(self) -> float:\n return self.robot.control_timestep\n\n @control_timestep.setter\n def control_timestep(self, value: float) -> None:\n self.robot.control_timestep = value\n\n @property\n def last_info(self) -> dict[str, Any]:\n return self._info_dict.copy()\n\n @property\n def control_subtimestep(self) -> float:\n return self.robot.control_subtimestep\n\n @control_subtimestep.setter\n def control_subtimestep(self, value: float) -> None:\n self.robot.control_subtimestep = value\n\n @property\n def target_yaw(self) -> float:\n return self._target_yaw\n\n @property\n def target_delta_yaw(self) -> float:\n return self._target_delta_yaw\n\n @property\n def target_goal_world_delta(self) -> np.ndarray:\n return self._target_goal_world_delta.copy()\n\n @property\n def target_goal_local(self) -> np.ndarray:\n return self._target_goal_local.copy()\n\n @property\n def target_custom_data(self) -> Optional[Any]:\n return self._target_custom_data\n\n @property\n def target_goal_world_delta_unit(self) -> np.ndarray:\n norm_goal = np.linalg.norm(self._target_goal_world_delta)\n if norm_goal == 0.0:\n return np.zeros(2)\n else:\n return self._target_goal_world_delta / norm_goal\n\n @property\n def target_goal_local_unit(self) -> np.ndarray:\n norm_goal = np.linalg.norm(self._target_goal_local)\n if norm_goal == 0.0:\n return np.zeros(2)\n else:\n return self._target_goal_local / norm_goal\n\n def __update_target(self) -> float:\n new_target_goal_world_delta = self.target_yaw_provider.get_target_goal_world_delta(self.robot)[:2]\n new_target_velocity = self.target_yaw_provider.get_target_velocity(self.robot)\n _, _, yaw = self.robot.get_roll_pitch_yaw()\n inv_rotation_mat = np.array([\n [np.cos(yaw), np.sin(yaw)],\n [-np.sin(yaw), np.cos(yaw)]\n ])\n new_target_goal_local = inv_rotation_mat @ new_target_goal_world_delta\n\n new_target_yaw = np.arctan2(new_target_goal_world_delta[1], new_target_goal_world_delta[0]) if np.linalg.norm(\n new_target_goal_world_delta) > 0.0 else 0.0\n new_target_delta_yaw = normalize_rad(new_target_yaw - self.robot.get_roll_pitch_yaw()[2])\n change_in_abs_target_delta_yaw = self.__get_change_in_abs_target_delta_yaw()\n\n self._info_dict[\"target_yaw\"] = new_target_yaw\n self._info_dict[\"target_delta_yaw\"] = new_target_delta_yaw\n self._info_dict[\"target_goal_local_x\"] = new_target_goal_local[0]\n self._info_dict[\"target_goal_local_y\"] = new_target_goal_local[1]\n self._info_dict[\"target_goal_world_delta_x\"] = new_target_goal_world_delta[0]\n self._info_dict[\"target_goal_world_delta_y\"] = new_target_goal_world_delta[1]\n self._info_dict[\"change_in_abs_target_delta_yaw\"] = change_in_abs_target_delta_yaw\n self._info_dict[\"abs_target_delta_yaw\"] = np.abs(new_target_delta_yaw)\n self._info_dict[\"target_velocity\"] = new_target_velocity\n\n self._target_yaw = new_target_yaw\n self._target_delta_yaw = new_target_delta_yaw\n self._target_goal_local = new_target_goal_local\n self._target_goal_world_delta = new_target_goal_world_delta\n self._target_custom_data = self.target_yaw_provider.get_target_custom_data()\n self._target_velocity = new_target_velocity\n return change_in_abs_target_delta_yaw\n\n def __get_change_in_abs_target_delta_yaw(self) -> float:\n new_target_delta_yaw = normalize_rad(self.target_yaw - self.robot.get_roll_pitch_yaw()[2])\n change_in_abs_target_delta_yaw = np.abs(new_target_delta_yaw) - np.abs(self._target_delta_yaw)\n return change_in_abs_target_delta_yaw\n\n def before_step(\n self,\n action: np.ndarray,\n random_state: np.random.RandomState\n ):\n self._step_target_qpos = action\n self.robot.apply_action(action)\n\n def get_reward(\n self\n ):\n return self._rew_step\n\n def get_reward_final(self):\n return self._rew_step_final\n\n def after_step(\n self,\n random_state: np.random.RandomState\n ) -> dict[str, Any]:\n self._info_dict = {}\n self.robot.receive_observation()\n\n # Update the target yaw\n self.target_yaw_provider.step_target(\n self.robot,\n self._info_dict,\n random_state\n )\n self._has_after_after_step = self.target_yaw_provider.has_target_changed()\n if not self.lock_target and self._has_after_after_step:\n change_in_abs_target_delta_yaw = self.after_after_step(\n random_state\n )\n else:\n change_in_abs_target_delta_yaw = self.__update_target()\n\n # Gather info about velocity\n robot_v = self.robot.get_3d_linear_velocity()\n robot_v_norm = np.linalg.norm(robot_v)\n robot_v_to_goal = np.dot(\n robot_v[:2], self.target_goal_world_delta_unit\n )\n robot_v_local = self.robot.get_3d_local_velocity()\n robot_rpy = self.robot.get_roll_pitch_yaw()\n self._info_dict[\"velocity_norm\"] = robot_v_norm\n self._info_dict[\"velocity_to_goal\"] = robot_v_to_goal\n self._info_dict[\"velocity_local_x\"] = robot_v_local[0]\n self._info_dict[\"velocity_local_y\"] = robot_v_local[1]\n self._info_dict[\"velocity_local_z\"] = robot_v_local[2]\n self._info_dict[\"roll\"] = robot_rpy[0]\n self._info_dict[\"pitch\"] = robot_rpy[1]\n self._info_dict[\"yaw\"] = robot_rpy[2]\n self._info_dict[\"joint_torques\"] = np.mean(np.abs(self.robot.get_joint_torques()))\n self._info_dict[\"joint_qvels\"] = np.mean(np.abs(self.robot.get_joint_qvel()))\n self._info_dict[\"joint_qaccs\"] = np.mean(np.abs(self.robot.get_joint_qacc()))\n self._info_dict[\"joint_velocities\"] = np.mean(np.abs(self.robot.get_joint_qvel()))\n if hasattr(self.robot, \"get_foot_force\"):\n foot_force: np.ndarray = self.robot.get_foot_force()\n if foot_force.shape == (4,):\n foot_force_names = [\"FR\", \"FL\", \"RR\", \"RL\"]\n else:\n foot_force_names = list(range(foot_force.shape[0]))\n for i in range(len(foot_force_names)):\n self._info_dict[\"foot_force_\" + foot_force_names[i]] = foot_force[i]\n\n self.reward_provider.step_reward(\n self.robot,\n self._step_target_qpos,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n reward_perstep = self.reward_provider.get_reward()\n # assert reward_perstep is not None and reward_perstep != np.nan\n self._info_dict[\"reward_perstep\"] = reward_perstep\n self._rew_step = reward_perstep\n self._rew_step_final = self.reward_provider.get_reward_final()\n\n # Step the target yaw observable\n if self.target_observable is not None:\n self.target_observable.step_target_obs(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n\n # Step resetters\n for resetter in self.resetters:\n resetter.step_resetter(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n\n # Step termination providers\n for termination_provider in self.termination_providers:\n termination_provider.step_termination_condition(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n if termination_provider.should_terminate():\n print(\"Termination provider\", termination_provider, \"terminated the episode\")\n self._termination_reason = termination_provider\n break\n\n # Step truncaiton providers\n for truncation_provider in self.truncation_providers:\n truncation_provider.step_termination_condition(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n if truncation_provider.should_terminate():\n print(\"Truncation provider\", truncation_provider, \"truncated the episode\")\n self._truncation_reason = truncation_provider\n break\n\n return self._info_dict.copy()\n\n def after_after_step(\n self,\n random_state: np.random.RandomState\n ):\n if self._has_after_after_step:\n self.target_yaw_provider.after_step_target(\n self.robot,\n self._info_dict,\n random_state\n )\n change_in_abs_target_delta_yaw = self.__update_target()\n robot_v = self.robot.get_3d_linear_velocity()\n robot_v_to_goal = np.dot(\n robot_v[:2], self.target_goal_world_delta_unit\n )\n # Step the target yaw observable\n if self.target_observable is not None:\n self.target_observable.step_target_obs(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n\n # self.reward_provider.step_ex(\n # self.robot,\n # self.target_goal_world_delta,\n # self.target_goal_local,\n # self.target_yaw,\n # self.target_delta_yaw,\n # robot_v_to_goal,\n # change_in_abs_target_delta_yaw,\n # self._target_custom_data,\n # self.enable_target_custom_obs,\n # self._info_dict,\n # random_state\n # )\n # reward_perstep = self.reward_provider.get_reward()\n # #assert reward_perstep is not None and reward_perstep != np.nan\n # self._rew_step = reward_perstep\n\n self._has_after_after_step = False\n return change_in_abs_target_delta_yaw\n else:\n return 0.0\n\n def reset(self, random_state: np.random.RandomState) -> dict[str, Any]:\n self.robot.receive_observation()\n # Reset the info dict\n self._info_dict = {}\n\n # Reset the task-specific variables\n self._target_yaw = 0.0\n self._target_delta_yaw = 0.0\n self._has_after_after_step = False\n\n if not self._inited:\n self._inited = True\n for initializer in self.initializers:\n initializer.perform_reset(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n # call the resetters\n for resetter in self.resetters:\n resetter.perform_reset(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n # Reset the target yaw provider\n self.target_yaw_provider.reset_target(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n self.__update_target()\n\n # Reset target yaw obs\n if self.target_observable is not None:\n self.target_observable.reset_target_obs(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n self._info_dict,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._termination_reason,\n random_state\n )\n\n # Reset reward provider\n self.reward_provider.reset_reward(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n # Reset termination providers\n for termination_provider in self.termination_providers:\n termination_provider.reset_termination_condition(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n # Reset truncation providers\n for truncation_provider in self.truncation_providers:\n truncation_provider.reset_termination_condition(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n self._termination_reason = None\n self._truncation_reason = None\n self._rew_step = 0.0\n\n # Reset the robot\n self.robot.reset()\n self.robot.receive_observation()\n\n for resetter in self.resetters:\n if hasattr(resetter, \"last_position\"):\n resetter.perform_reset(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n return self._info_dict.copy()\n\n def should_terminate(self) -> bool:\n return self._termination_reason is not None\n\n def should_truncate(self) -> bool:\n return self._truncation_reason is not None"
}
] | import gym
import gym.spaces
import numpy as np
import copy
from .env import WalkerEnvironment, JoystickEnvironment
from ..robot import BaseWalker, BaseWalkerWithFootContact
from ..joystick_policy import JoystickPolicy
from functools import cached_property
from typing import Optional, Any
from collections import OrderedDict | 7,316 | if isinstance(self.env.robot.unwrapped(), BaseWalkerWithFootContact):
ret_dict["robot/foot_forces"] = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(4,),
dtype=np.float32
)
ret_dict["robot/foot_forces_normalized"] = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(4,),
dtype=np.float32
)
ret_dict["robot/foot_forces_normalized_masked"] = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(4,),
dtype=np.float32
)
ret_dict["robot/foot_contacts"] = gym.spaces.Box( # should use MultiBinary but flatten() does not support having multibinary / box spaces in a Dict
low=0,
high=1,
shape=(4,),
dtype=np.float32
)
return gym.spaces.Dict(ret_dict)
def extract_observation(self) -> dict[str,Any]:
roll, pitch, yaw = self.env.robot.get_roll_pitch_yaw()
dr, dp, dy = self.env.robot.get_3d_angular_velocity()
imu = np.array([roll, pitch, dr, dp], dtype=np.float32)
ret_dict = {
"robot/joints_pos": self.env.robot.get_joint_qpos(),
"robot/joints_vel": self.env.robot.get_joint_qvel(),
"robot/imu": imu,
"robot/sensors_gyro": self.env.robot.get_3d_angular_velocity(),
"robot/sensors_framequat": self.env.robot.get_framequat_wijk(),
"robot/torques": self.env.robot.get_joint_torques(),
"robot/sensors_local_velocimeter": self.env.robot.get_3d_local_velocity(),
"robot/sensors_accelerometer": self.env.robot.get_3d_acceleration_local(),
}
if isinstance(self.env.robot.unwrapped(), BaseWalkerWithFootContact):
ret_dict["robot/foot_forces"] = self.env.robot.get_foot_force()
ret_dict["robot/foot_contacts"] = self.env.robot.get_foot_contact()
if hasattr(self.env.robot, "foot_contact_no_contact_threshold") and hasattr(self.env.robot, "foot_contact_has_contact_threshold"):
ret_dict["robot/foot_forces_normalized"] = (ret_dict["robot/foot_forces"] - self.env.robot.foot_contact_no_contact_threshold) / (self.env.robot.foot_contact_has_contact_threshold - self.env.robot.foot_contact_no_contact_threshold)
else:
ret_dict["robot/foot_forces_normalized"] = ret_dict["robot/foot_forces"]
masked_foot_forces = ret_dict["robot/foot_forces_normalized"].copy()
masked_foot_forces[-1] = 0.0
ret_dict["robot/foot_forces_normalized_masked"] = masked_foot_forces
return ret_dict
class JoystickEnvImpl(gym.Env[dict[str,Any],np.ndarray], WalkerEnvironment, JoystickEnvironment):
metadata = {
"render_modes": []
}
def __init__(
self,
joystick_policy : JoystickPolicy
):
gym.Env.__init__(self)
WalkerEnvironment.__init__(self)
JoystickEnvironment.__init__(self)
# ====================== Store Parameters ======================
self._joystick_policy = joystick_policy
self.obs_extractor = JoystickEnvObservationExtractor(self)
self.random_state = np.random.RandomState()
@property
def action_space(self) -> gym.spaces.Box:
return gym.spaces.Box(
low=self.robot.action_qpos_mins,
high=self.robot.action_qpos_maxs,
dtype=np.float32
)
@property
def observation_space(self) -> gym.spaces.Dict:
robot_space = self.obs_extractor.observation_spec
real_obs_space = {}
for key, space in robot_space.items():
if key.startswith("robot/") and key[len("robot/"):] in self.joystick_policy.enabled_observables:
real_obs_space[key] = space
if self.joystick_policy.target_observable is not None:
real_obs_space["target_obs"] = self.joystick_policy.target_observable.get_observation_spec()
if not self.joystick_policy.target_yaw_provider.is_target_velocity_fixed():
real_obs_space["target_vel"] = gym.spaces.Box(
low = 0.0,
high = np.inf,
shape=(1,)
)
target_custom_data_spec = self.joystick_policy.target_yaw_provider.get_target_custom_data_observable_spec()
if self.joystick_policy.enable_target_custom_obs and target_custom_data_spec is not None:
real_obs_space["target_custom"] = target_custom_data_spec
# Enforce order
real_obs_space = OrderedDict(sorted(real_obs_space.items(), key=lambda t: t[0]))
obs_space = gym.spaces.Dict(real_obs_space)
return obs_space
@property
def joystick_policy(self) -> JoystickPolicy:
return self._joystick_policy
def set_joystick_policy(self, joystick_policy: JoystickPolicy):
self._joystick_policy = joystick_policy
@property
def is_resetter_policy(self) -> bool:
return False
@property
|
class JoystickEnvObservationExtractor:
def __init__(self, env : "JoystickEnvImpl"):
self.env = env
@cached_property
def observation_spec(self) -> gym.spaces.Dict:
ret_dict = {
"robot/joints_pos": gym.spaces.Box(
low=self.env.robot.joint_qpos_mins,
high=self.env.robot.joint_qpos_maxs,
shape=(self.env.robot.joint_nums,),
dtype=np.float32
),
"robot/joints_vel": gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(self.env.robot.joint_nums,),
dtype=np.float32
),
"robot/imu": gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(4,),
),
"robot/sensors_gyro": gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(3,),
dtype=np.float32
),
"robot/sensors_framequat": gym.spaces.Box(
low=-1.0,
high=1.0,
shape=(4,),
dtype=np.float32
),
"robot/torques": gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(self.env.robot.joint_nums,),
dtype=np.float32
),
"robot/sensors_local_velocimeter": gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(3,),
dtype=np.float32
),
"robot/sensors_accelerometer": gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(3,),
dtype=np.float32
),
}
if isinstance(self.env.robot.unwrapped(), BaseWalkerWithFootContact):
ret_dict["robot/foot_forces"] = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(4,),
dtype=np.float32
)
ret_dict["robot/foot_forces_normalized"] = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(4,),
dtype=np.float32
)
ret_dict["robot/foot_forces_normalized_masked"] = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(4,),
dtype=np.float32
)
ret_dict["robot/foot_contacts"] = gym.spaces.Box( # should use MultiBinary but flatten() does not support having multibinary / box spaces in a Dict
low=0,
high=1,
shape=(4,),
dtype=np.float32
)
return gym.spaces.Dict(ret_dict)
def extract_observation(self) -> dict[str,Any]:
roll, pitch, yaw = self.env.robot.get_roll_pitch_yaw()
dr, dp, dy = self.env.robot.get_3d_angular_velocity()
imu = np.array([roll, pitch, dr, dp], dtype=np.float32)
ret_dict = {
"robot/joints_pos": self.env.robot.get_joint_qpos(),
"robot/joints_vel": self.env.robot.get_joint_qvel(),
"robot/imu": imu,
"robot/sensors_gyro": self.env.robot.get_3d_angular_velocity(),
"robot/sensors_framequat": self.env.robot.get_framequat_wijk(),
"robot/torques": self.env.robot.get_joint_torques(),
"robot/sensors_local_velocimeter": self.env.robot.get_3d_local_velocity(),
"robot/sensors_accelerometer": self.env.robot.get_3d_acceleration_local(),
}
if isinstance(self.env.robot.unwrapped(), BaseWalkerWithFootContact):
ret_dict["robot/foot_forces"] = self.env.robot.get_foot_force()
ret_dict["robot/foot_contacts"] = self.env.robot.get_foot_contact()
if hasattr(self.env.robot, "foot_contact_no_contact_threshold") and hasattr(self.env.robot, "foot_contact_has_contact_threshold"):
ret_dict["robot/foot_forces_normalized"] = (ret_dict["robot/foot_forces"] - self.env.robot.foot_contact_no_contact_threshold) / (self.env.robot.foot_contact_has_contact_threshold - self.env.robot.foot_contact_no_contact_threshold)
else:
ret_dict["robot/foot_forces_normalized"] = ret_dict["robot/foot_forces"]
masked_foot_forces = ret_dict["robot/foot_forces_normalized"].copy()
masked_foot_forces[-1] = 0.0
ret_dict["robot/foot_forces_normalized_masked"] = masked_foot_forces
return ret_dict
class JoystickEnvImpl(gym.Env[dict[str,Any],np.ndarray], WalkerEnvironment, JoystickEnvironment):
metadata = {
"render_modes": []
}
def __init__(
self,
joystick_policy : JoystickPolicy
):
gym.Env.__init__(self)
WalkerEnvironment.__init__(self)
JoystickEnvironment.__init__(self)
# ====================== Store Parameters ======================
self._joystick_policy = joystick_policy
self.obs_extractor = JoystickEnvObservationExtractor(self)
self.random_state = np.random.RandomState()
@property
def action_space(self) -> gym.spaces.Box:
return gym.spaces.Box(
low=self.robot.action_qpos_mins,
high=self.robot.action_qpos_maxs,
dtype=np.float32
)
@property
def observation_space(self) -> gym.spaces.Dict:
robot_space = self.obs_extractor.observation_spec
real_obs_space = {}
for key, space in robot_space.items():
if key.startswith("robot/") and key[len("robot/"):] in self.joystick_policy.enabled_observables:
real_obs_space[key] = space
if self.joystick_policy.target_observable is not None:
real_obs_space["target_obs"] = self.joystick_policy.target_observable.get_observation_spec()
if not self.joystick_policy.target_yaw_provider.is_target_velocity_fixed():
real_obs_space["target_vel"] = gym.spaces.Box(
low = 0.0,
high = np.inf,
shape=(1,)
)
target_custom_data_spec = self.joystick_policy.target_yaw_provider.get_target_custom_data_observable_spec()
if self.joystick_policy.enable_target_custom_obs and target_custom_data_spec is not None:
real_obs_space["target_custom"] = target_custom_data_spec
# Enforce order
real_obs_space = OrderedDict(sorted(real_obs_space.items(), key=lambda t: t[0]))
obs_space = gym.spaces.Dict(real_obs_space)
return obs_space
@property
def joystick_policy(self) -> JoystickPolicy:
return self._joystick_policy
def set_joystick_policy(self, joystick_policy: JoystickPolicy):
self._joystick_policy = joystick_policy
@property
def is_resetter_policy(self) -> bool:
return False
@property | def robot(self) -> BaseWalker: | 2 | 2023-11-02 23:21:38+00:00 | 12k |
UMass-Foundation-Model/genome | engine/viper/base_models/xvlm/xvlm.py | [
{
"identifier": "VisionTransformer",
"path": "engine/viper/base_models/xvlm/vit.py",
"snippet": "class VisionTransformer(nn.Module):\n \"\"\" Vision Transformer\n A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -\n https://arxiv.org/abs/2010.11929\n \"\"\"\n def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\n num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, local_attn_depth=0):\n \"\"\"\n Args:\n img_size (int, tuple): input image size\n patch_size (int, tuple): patch size\n in_chans (int): number of input channels\n num_classes (int): number of classes for classification head\n embed_dim (int): embedding dimension\n depth (int): depth of transformer\n num_heads (int): number of attention heads\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n qkv_bias (bool): enable bias for qkv if True\n qk_scale (float): override default qk scale of head_dim ** -0.5 if set\n representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\n drop_rate (float): dropout rate\n attn_drop_rate (float): attention dropout rate\n drop_path_rate (float): stochastic depth rate\n norm_layer: (nn.Module): normalization layer\n \"\"\"\n super().__init__()\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\n\n self.num_patch_embed = self.patch_embed.num_patches\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n\n self.num_pos_embed = self.num_patch_embed + 1\n self.pos_embed = nn.Parameter(torch.zeros(1, self.num_pos_embed, embed_dim))\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule\n self.blocks = nn.ModuleList([\n Block(\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)\n for i in range(depth)])\n\n self.depth = depth\n self.local_attn_depth = local_attn_depth # do local attn from index=(depth - local_attn_depth)\n\n self.norm = norm_layer(embed_dim)\n\n trunc_normal_(self.pos_embed, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token'}\n\n def forward(self, x, register_blk=-1, idx_to_group_img=None, image_atts=None):\n\n B = x.shape[0]\n x = self.patch_embed(x)\n\n cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n x = torch.cat((cls_tokens, x), dim=1)\n \n x = x + self.pos_embed[:,:x.size(1),:]\n x = self.pos_drop(x)\n\n do_gather = True if idx_to_group_img is not None else False\n\n if do_gather and (image_atts is not None):\n full_atts = torch.ones(x.shape[:2], dtype=x.dtype).to(x.device)\n image_atts_blk = torch.cat([image_atts, full_atts], dim=0)\n\n image_atts_blk = image_atts_blk.unsqueeze(1).unsqueeze(2)\n image_atts_blk = (1.0 - image_atts_blk) * -10000.0\n else:\n image_atts_blk = None\n\n for i, blk in enumerate(self.blocks):\n if (self.local_attn_depth > 0) and (i >= self.depth-self.local_attn_depth):\n if do_gather:\n do_gather = False\n\n x_bs = torch.gather(x, dim=0, index=idx_to_group_img.view(-1, 1, 1).expand(-1, x.shape[1], x.shape[2]))\n x = torch.cat([x_bs, x], dim=0)\n\n x = blk(x, register_blk == i, image_atts=image_atts_blk)\n\n else:\n x = blk(x, register_blk==i, image_atts=None)\n\n x = self.norm(x)\n\n if idx_to_group_img is not None:\n bs = len(idx_to_group_img)\n x_bs, x_fullatts = torch.split(x, [bs, x.size(0)-bs])\n return x_bs, x_fullatts\n\n return x"
},
{
"identifier": "interpolate_pos_embed",
"path": "engine/viper/base_models/xvlm/vit.py",
"snippet": "def interpolate_pos_embed(pos_embed_checkpoint, num_patches, num_extra_tokens=1):\n # num_patches = visual_encoder.num_patch_embed\n # num_extra_tokens = visual_encoder.num_pos_embed - visual_encoder.num_patch_embed\n\n # interpolate position embedding\n embedding_size = pos_embed_checkpoint.shape[-1]\n # height (== width) for the checkpoint position embedding\n orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n # height (== width) for the new position embedding\n new_size = int(num_patches ** 0.5)\n\n if orig_size != new_size:\n # class_token and dist_token are kept unchanged\n extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n # only the position tokens are interpolated\n pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n pos_tokens = torch.nn.functional.interpolate(\n pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n # print('reshape position embedding from %d to %d' % (orig_size ** 2, new_size ** 2))\n\n return new_pos_embed\n else:\n return pos_embed_checkpoint"
},
{
"identifier": "SwinTransformer",
"path": "engine/viper/base_models/xvlm/swin_transformer.py",
"snippet": "class SwinTransformer(nn.Module):\n r\"\"\" Swin Transformer\n A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -\n https://arxiv.org/pdf/2103.14030\n\n Args:\n img_size (int | tuple(int)): Input image size. Default 224\n patch_size (int | tuple(int)): Patch size. Default: 4\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each Swin Transformer layer.\n num_heads (tuple(int)): Number of attention heads in different layers.\n window_size (int): Window size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,\n embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],\n window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, **kwargs):\n super().__init__()\n\n self.num_classes = num_classes\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.ape = ape\n self.patch_norm = patch_norm\n self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))\n self.mlp_ratio = mlp_ratio\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n num_patches = self.patch_embed.num_patches\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution\n\n # absolute position embedding\n if self.ape:\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))\n trunc_normal_(self.absolute_pos_embed, std=.02)\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),\n input_resolution=(patches_resolution[0] // (2 ** i_layer),\n patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint)\n self.layers.append(layer)\n\n self.norm = norm_layer(self.num_features)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n # self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'absolute_pos_embed'}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'relative_position_bias_table'}\n\n def forward(self, x, idx_to_group_img=None, image_atts=None, **kwargs):\n x = self.patch_embed(x)\n if self.ape:\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x)\n\n for layer in self.layers:\n x = layer(x)\n\n x = self.norm(x) # B L C\n\n x_cls = self.avgpool(x.transpose(1, 2)) # B C 1\n\n if idx_to_group_img is None:\n return torch.cat([x_cls.transpose(1, 2), x], dim=1)\n else:\n x_bs = torch.gather(x, dim=0, index=idx_to_group_img.view(-1, 1, 1).expand(-1, x.shape[1], x.shape[2]))\n weights = image_atts[:, 1:].unsqueeze(2) # B L 1\n x_bs_cls = torch.sum((weights * x_bs).transpose(1, 2), dim=-1, keepdim=True) # B C 1\n x_bs_cls = x_bs_cls / torch.sum(weights.transpose(1, 2), dim=-1, keepdim=True) # avgpool\n\n return torch.cat([x_bs_cls.transpose(1, 2), x_bs], dim=1), \\\n torch.cat([x_cls.transpose(1, 2), x], dim=1)\n\n def flops(self):\n flops = 0\n flops += self.patch_embed.flops()\n for i, layer in enumerate(self.layers):\n flops += layer.flops()\n flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)\n flops += self.num_features * self.num_classes\n return flops"
},
{
"identifier": "interpolate_relative_pos_embed",
"path": "engine/viper/base_models/xvlm/swin_transformer.py",
"snippet": "def interpolate_relative_pos_embed(rel_pos_bias, dst_num_pos, param_name=''):\n # from: https://github.com/microsoft/unilm/blob/8a0a1c1f4e7326938ea7580a00d56d7f17d65612/beit/run_class_finetuning.py#L348\n\n # rel_pos_bias: relative_position_bias_table\n src_num_pos, num_attn_heads = rel_pos_bias.size()\n\n num_extra_tokens = 0\n src_size = int((src_num_pos - num_extra_tokens) ** 0.5)\n dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)\n if src_size != dst_size:\n print(\"Position interpolate %s from %dx%d to %dx%d\" % (param_name, src_size, src_size, dst_size, dst_size))\n\n # extra_tokens = rel_pos_bias[-num_extra_tokens:, :]\n # rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]\n\n def geometric_progression(a, r, n):\n return a * (1.0 - r ** n) / (1.0 - r)\n\n left, right = 1.01, 1.5\n while right - left > 1e-6:\n q = (left + right) / 2.0\n gp = geometric_progression(1, q, src_size // 2)\n if gp > dst_size // 2:\n right = q\n else:\n left = q\n\n # if q > 1.090307:\n # q = 1.090307\n\n dis = []\n cur = 1\n for i in range(src_size // 2):\n dis.append(cur)\n cur += q ** (i + 1)\n\n r_ids = [-_ for _ in reversed(dis)]\n\n x = r_ids + [0] + dis\n y = r_ids + [0] + dis\n\n t = dst_size // 2.0\n dx = np.arange(-t, t + 0.1, 1.0)\n dy = np.arange(-t, t + 0.1, 1.0)\n\n # print(\"Original positions = %s\" % str(x))\n # print(\"Target positions = %s\" % str(dx))\n\n all_rel_pos_bias = []\n\n for i in range(num_attn_heads):\n z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()\n f = interpolate.interp2d(x, y, z, kind='cubic')\n all_rel_pos_bias.append(\n torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))\n\n rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)\n\n return rel_pos_bias"
},
{
"identifier": "BertConfig",
"path": "engine/viper/base_models/xvlm/xbert.py",
"snippet": "_CONFIG_FOR_DOC = \"BertConfig\"\n_TOKENIZER_FOR_DOC = \"BertTokenizer\"\nBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"bert-base-uncased\",\n \"bert-large-uncased\",\n \"bert-base-cased\",\n \"bert-large-cased\",\n \"bert-base-multilingual-uncased\",\n \"bert-base-multilingual-cased\",\n \"bert-base-chinese\",\n \"bert-base-german-cased\",\n \"bert-large-uncased-whole-word-masking\",\n \"bert-large-cased-whole-word-masking\",\n \"bert-large-uncased-whole-word-masking-finetuned-squad\",\n \"bert-large-cased-whole-word-masking-finetuned-squad\",\n \"bert-base-cased-finetuned-mrpc\",\n \"bert-base-german-dbmdz-cased\",\n \"bert-base-german-dbmdz-uncased\",\n \"cl-tohoku/bert-base-japanese\",\n \"cl-tohoku/bert-base-japanese-whole-word-masking\",\n \"cl-tohoku/bert-base-japanese-char\",\n \"cl-tohoku/bert-base-japanese-char-whole-word-masking\",\n \"TurkuNLP/bert-base-finnish-cased-v1\",\n \"TurkuNLP/bert-base-finnish-uncased-v1\",\n \"wietsedv/bert-base-dutch-cased\",\n # See all BERT models at https://huggingface.co/models?filter=bert\n]\nBERT_START_DOCSTRING = r\"\"\"\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n Parameters:\n config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\nBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n Indices can be obtained using :class:`~transformers.BertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\ndef load_tf_weights_in_bert(model, config, tf_checkpoint_path):\n def __init__(self, config):\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n def __init__(self, config, is_cross_attention):\n def save_attn_gradients(self, attn_gradients):\n def get_attn_gradients(self):\n def save_attention_map(self, attention_map):\n def get_attention_map(self):\n def transpose_for_scores(self, x):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(self, hidden_states, input_tensor):\n def __init__(self, config, is_cross_attention=False):\n def prune_heads(self, heads):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states, input_tensor):\n def __init__(self, config, layer_num):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def feed_forward_chunk(self, attention_output):\n def __init__(self, config):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n mode='multi_modal',\n ):\n def create_custom_forward(module):\n def custom_forward(*inputs):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, sequence_output):\n def __init__(self, config):\n def forward(self, pooled_output):\n def __init__(self, config):\n def forward(self, sequence_output, pooled_output):\n def _init_weights(self, module):\n def __init__(self, config, add_pooling_layer=True):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def _prune_heads(self, heads_to_prune):\n def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n is_decoder=False,\n mode='multi_modal',\n ):\n def __init__(self, config):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n next_sentence_label=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):\n def forward(self, logits, label):\n def __init__(self, config, label_smoothing=0.0):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n is_decoder=True,\n reduction='mean',\n mode='multi_modal',\n return_logits=False,\n ):\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n def _reorder_cache(self, past, beam_idx):\n def _generate_no_beam_search(\n self,\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n batch_size,\n **model_kwargs\n ):\ndef top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float(\"Inf\"), min_tokens_to_keep=1):\n def __init__(self, config):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def gather_seq_out_by_pos(self, seq, pos):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n is_decoder=False,\n mode='multi_modal',\n return_logits=False,\n masked_pos=None,\n ):\n def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):\n def __init__(self, config):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs\n ):\n def __init__(self, config):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n def __init__(self, config):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n def __init__(self, config):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n def __init__(self, config):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\nclass BertEmbeddings(nn.Module):\nclass BertSelfAttention(nn.Module):\nclass BertSelfOutput(nn.Module):\nclass BertAttention(nn.Module):\nclass BertIntermediate(nn.Module):\nclass BertOutput(nn.Module):\nclass BertLayer(nn.Module):\nclass BertEncoder(nn.Module):\nclass BertPooler(nn.Module):\nclass BertPredictionHeadTransform(nn.Module):\nclass BertLMPredictionHead(nn.Module):\nclass BertOnlyMLMHead(nn.Module):\nclass BertOnlyNSPHead(nn.Module):\nclass BertPreTrainingHeads(nn.Module):\nclass BertPreTrainedModel(PreTrainedModel):\nclass BertForPreTrainingOutput(ModelOutput):\nclass BertModel(BertPreTrainedModel):\nclass BertForPreTraining(BertPreTrainedModel):\nclass LabelSmoothSoftmaxCEV1(nn.Module):\nclass BertLMHeadModel(BertPreTrainedModel):\nclass BertForMaskedLM(BertPreTrainedModel):\nclass BertForNextSentencePrediction(BertPreTrainedModel):\nclass BertForSequenceClassification(BertPreTrainedModel):\nclass BertForMultipleChoice(BertPreTrainedModel):\nclass BertForTokenClassification(BertPreTrainedModel):\nclass BertForQuestionAnswering(BertPreTrainedModel):"
}
] | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import json
from functools import partial
from engine.viper.base_models.xvlm.vit import VisionTransformer, interpolate_pos_embed
from engine.viper.base_models.xvlm.swin_transformer import SwinTransformer, interpolate_relative_pos_embed
from engine.viper.base_models.xvlm.xbert import BertConfig, BertForMaskedLM, BertModel | 8,466 | # Multi-Grained Vision Language Pre-Training: Aligning Texts with Visual Concepts (https://arxiv.org/abs/2111.08276)
# Github: https://github.com/zengyan-97/X-VLM
# Copyright (c) 2022, ByteDance Inc.
# All rights reserved.
def read_json(rpath):
with open(rpath, 'r') as f:
return json.load(f)
class AllGather(torch.autograd.Function):
"""An autograd function that performs allgather on a tensor."""
@staticmethod
def forward(ctx, tensor, rank, world_size):
output = [torch.empty_like(tensor) for _ in range(world_size)]
dist.all_gather(output, tensor)
ctx.rank = rank
ctx.batch_size = tensor.shape[0]
return torch.cat(output, 0)
@staticmethod
def backward(ctx, grad_output):
return (
grad_output[ctx.batch_size * ctx.rank: ctx.batch_size * (ctx.rank + 1)],
None,
None
)
allgather = AllGather.apply
def build_vision_encoder(vision_config, load_params=False):
"""
Args:
load_params: False when building fine-tuning models
"""
vision_width = vision_config['vision_width']
vision_encoder = SwinTransformer(img_size=vision_config['image_res'],
patch_size=4,
in_chans=3,
embed_dim=vision_config['embed_dim'],
depths=vision_config['depths'],
num_heads=vision_config['num_heads'],
window_size=vision_config['window_size'],
mlp_ratio=4.,
qkv_bias=True,
drop_rate=0.0,
drop_path_rate=0.1,
ape=False,
patch_norm=True,
use_checkpoint=False)
if load_params:
# download from https://github.com/microsoft/Swin-Transformer
state_dict = torch.load(vision_config['ckpt'], map_location="cpu")['model']
for k in list(state_dict.keys()):
if 'relative_position_bias_table' in k:
dst_num_pos = (2 * vision_config['window_size'] - 1) ** 2
state_dict[k] = interpolate_relative_pos_embed(state_dict[k], dst_num_pos, param_name=k)
elif ('relative_position_index' in k) or ('attn_mask' in k):
del state_dict[k]
if load_params:
print("### Load ViT: ", flush=True)
msg = vision_encoder.load_state_dict(state_dict, strict=False)
print("missing_keys: ", msg.missing_keys)
print("unexpected_keys: ", msg.unexpected_keys)
return vision_encoder, vision_width
def build_text_encoder(config, vision_width, load_text_params=False, use_mlm_loss=False, config_text=None):
init_params = [] # train from scratch with larger lr
config_text = BertConfig.from_json_file('engine/viper/base_models/xvlm/config_bert.json')
config_text.encoder_width = vision_width
if use_mlm_loss: # for pre-training, load_text_params by default (otherwise notimplemented)
assert load_text_params is True
if ('accelerator' in config.keys()) and (config['accelerator']['FP16_OPT_LEVEL'] != 'O0'):
config_text.fp16 = True # will use some operations to avoid gradient overflow
| # Multi-Grained Vision Language Pre-Training: Aligning Texts with Visual Concepts (https://arxiv.org/abs/2111.08276)
# Github: https://github.com/zengyan-97/X-VLM
# Copyright (c) 2022, ByteDance Inc.
# All rights reserved.
def read_json(rpath):
with open(rpath, 'r') as f:
return json.load(f)
class AllGather(torch.autograd.Function):
"""An autograd function that performs allgather on a tensor."""
@staticmethod
def forward(ctx, tensor, rank, world_size):
output = [torch.empty_like(tensor) for _ in range(world_size)]
dist.all_gather(output, tensor)
ctx.rank = rank
ctx.batch_size = tensor.shape[0]
return torch.cat(output, 0)
@staticmethod
def backward(ctx, grad_output):
return (
grad_output[ctx.batch_size * ctx.rank: ctx.batch_size * (ctx.rank + 1)],
None,
None
)
allgather = AllGather.apply
def build_vision_encoder(vision_config, load_params=False):
"""
Args:
load_params: False when building fine-tuning models
"""
vision_width = vision_config['vision_width']
vision_encoder = SwinTransformer(img_size=vision_config['image_res'],
patch_size=4,
in_chans=3,
embed_dim=vision_config['embed_dim'],
depths=vision_config['depths'],
num_heads=vision_config['num_heads'],
window_size=vision_config['window_size'],
mlp_ratio=4.,
qkv_bias=True,
drop_rate=0.0,
drop_path_rate=0.1,
ape=False,
patch_norm=True,
use_checkpoint=False)
if load_params:
# download from https://github.com/microsoft/Swin-Transformer
state_dict = torch.load(vision_config['ckpt'], map_location="cpu")['model']
for k in list(state_dict.keys()):
if 'relative_position_bias_table' in k:
dst_num_pos = (2 * vision_config['window_size'] - 1) ** 2
state_dict[k] = interpolate_relative_pos_embed(state_dict[k], dst_num_pos, param_name=k)
elif ('relative_position_index' in k) or ('attn_mask' in k):
del state_dict[k]
if load_params:
print("### Load ViT: ", flush=True)
msg = vision_encoder.load_state_dict(state_dict, strict=False)
print("missing_keys: ", msg.missing_keys)
print("unexpected_keys: ", msg.unexpected_keys)
return vision_encoder, vision_width
def build_text_encoder(config, vision_width, load_text_params=False, use_mlm_loss=False, config_text=None):
init_params = [] # train from scratch with larger lr
config_text = BertConfig.from_json_file('engine/viper/base_models/xvlm/config_bert.json')
config_text.encoder_width = vision_width
if use_mlm_loss: # for pre-training, load_text_params by default (otherwise notimplemented)
assert load_text_params is True
if ('accelerator' in config.keys()) and (config['accelerator']['FP16_OPT_LEVEL'] != 'O0'):
config_text.fp16 = True # will use some operations to avoid gradient overflow
| text_encoder, msg = BertForMaskedLM.from_pretrained(config['text_encoder'], config=config_text, | 4 | 2023-11-01 16:39:33+00:00 | 12k |
ml4bio/RhoFold | rhofold/model/structure_module.py | [
{
"identifier": "Linear",
"path": "rhofold/model/primitives.py",
"snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in the code.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n out_dim: int,\n bias: bool = True,\n ):\n \"\"\"\n Args:\n in_dim:\n The final dimension of inputs to the layer\n out_dim:\n The final dimension of layer outputs\n bias:\n Whether to learn an additive bias. True by default\n \"\"\"\n super(Linear, self).__init__(in_dim, out_dim, bias=bias)\n\n if bias:\n with torch.no_grad():\n self.bias.fill_(0)"
},
{
"identifier": "LayerNorm",
"path": "rhofold/model/primitives.py",
"snippet": "class LayerNorm(nn.Module):\n def __init__(self, c_in, eps=1e-5):\n super(LayerNorm, self).__init__()\n \n self.c_in = (c_in,)\n self.eps = eps\n\n self.weight = nn.Parameter(torch.ones(c_in))\n self.bias = nn.Parameter(torch.zeros(c_in))\n\n def forward(self, x): \n\n out = nn.functional.layer_norm(\n x,\n self.c_in,\n self.weight,\n self.bias,\n self.eps,\n )\n\n return out"
},
{
"identifier": "Rigid",
"path": "rhofold/utils/rigid_utils.py",
"snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())"
},
{
"identifier": "dict_multimap",
"path": "rhofold/utils/tensor_utils.py",
"snippet": "def dict_multimap(fn, dicts):\n first = dicts[0]\n new_dict = {}\n for k, v in first.items():\n all_v = [d[k] for d in dicts]\n if type(v) is dict:\n new_dict[k] = dict_multimap(fn, all_v)\n else:\n new_dict[k] = fn(all_v)\n\n return new_dict"
},
{
"identifier": "permute_final_dims",
"path": "rhofold/utils/tensor_utils.py",
"snippet": "def permute_final_dims(tensor: torch.Tensor, inds: List[int]):\n zero_index = -1 * len(inds)\n first_inds = list(range(len(tensor.shape[:zero_index])))\n return tensor.permute(first_inds + [zero_index + i for i in inds])"
},
{
"identifier": "flatten_final_dims",
"path": "rhofold/utils/tensor_utils.py",
"snippet": "def flatten_final_dims(t: torch.Tensor, no_dims: int):\n return t.reshape(t.shape[:-no_dims] + (-1,))"
},
{
"identifier": "RNAAlphabet",
"path": "rhofold/utils/alphabet.py",
"snippet": "class RNAAlphabet(Alphabet):\n\n def get_batch_converter(self):\n if self.use_msa:\n return RNAMSABatchConverter(self)\n else:\n return BatchConverter(self)\n\n @classmethod\n def from_architecture(cls, name: str, ) -> \"RNAAlphabet\":\n if name in (\"RNA MSA Transformer\", \"rna_msa_transformer\", \"RNA\"):\n standard_toks = rna_msaseq_toks[\"toks\"]\n prepend_toks = (\"<cls>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\",)\n prepend_bos = True\n append_eos = False\n use_msa = True\n else:\n raise ValueError(\"Unknown architecture selected\")\n return cls(\n standard_toks, prepend_toks, append_toks, prepend_bos, append_eos, use_msa\n )"
},
{
"identifier": "RNAConverter",
"path": "rhofold/utils/converter.py",
"snippet": "class RNAConverter():\n \"\"\"RNA Structure Converter.\"\"\"\n\n def __init__(self):\n \"\"\"\"\"\"\n\n self.eps = 1e-4\n self.__init()\n\n def __init(self):\n \"\"\"\"\"\"\n\n self.cord_dict = defaultdict(dict)\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n for atom_name, _, cord_vals in RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]:\n self.cord_dict[resd_name][atom_name] = torch.tensor(cord_vals, dtype=torch.float32)\n\n trans_dict_all = {}\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n trans_dict = {}\n cord_dict = {}\n\n atom_infos = RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]\n angl_infos = RNA_CONSTANTS.ANGL_INFOS_PER_RESD[resd_name]\n n_angls = len(angl_infos)\n \n for atom_name, idx_rgrp, _ in atom_infos:\n if idx_rgrp == 0:\n cord_dict[atom_name] = self.cord_dict[resd_name][atom_name]\n\n trans_dict['omega-main'] = (torch.eye(3, dtype=torch.float32), torch.zeros((3), dtype=torch.float32))\n trans_dict['phi-main'] = (torch.eye(3, dtype=torch.float32), torch.zeros((3), dtype=torch.float32))\n\n for idx_angl, (angl_name, _, atom_names_sel) in enumerate(angl_infos):\n x1 = cord_dict[atom_names_sel[0]]\n x2 = cord_dict[atom_names_sel[1]]\n x3 = cord_dict[atom_names_sel[2]]\n rot, tsl_vec = calc_rot_tsl(x1, x3, x3 + (x3 - x2))\n trans_dict['%s-main' % angl_name] = (rot, tsl_vec)\n\n for atom_name, idx_rgrp, _ in atom_infos:\n if idx_rgrp == idx_angl + 3:\n cord_dict[atom_name] = tsl_vec + torch.sum(\n rot * self.cord_dict[resd_name][atom_name].view(1, 3), dim=1)\n\n for idx_angl_src in range(1, n_angls - 1):\n idx_angl_dst = idx_angl_src + 1\n angl_name_src = angl_infos[idx_angl_src][0]\n angl_name_dst = angl_infos[idx_angl_dst][0]\n rot_src, tsl_vec_src = trans_dict['%s-main' % angl_name_src]\n rot_dst, tsl_vec_dst = trans_dict['%s-main' % angl_name_dst]\n rot = torch.matmul(rot_src.transpose(1, 0), rot_dst)\n tsl_vec = torch.matmul(rot_src.transpose(1, 0), tsl_vec_dst - tsl_vec_src)\n trans_dict['%s-%s' % (angl_name_dst, angl_name_src)] = (rot, tsl_vec)\n\n trans_dict_all[resd_name] = trans_dict\n\n self.trans_dict_init = trans_dict_all\n\n def build_cords(self, seq, fram, angl, rtn_cmsk=False):\n\n # initialization\n n_resds = len(seq)\n device = angl.device\n\n angl = angl.squeeze(dim=0) / (torch.norm(angl.squeeze(dim=0), dim=2, keepdim=True) + self.eps)\n rigid = Rigid.from_tensor_7(fram, normalize_quats=True)\n fram = rigid.to_tensor_4x4()\n rot = fram[:,:,:3,:3]\n tsl = fram[:,:,:3,3:].permute(0,1,3,2)\n\n fram = torch.cat([rot, tsl], dim=2)[:,:,:4,:3].permute(1,0,2,3)\n fmsk = torch.ones((n_resds, 1), dtype=torch.int8, device=device)\n amsk = torch.ones((n_resds, RNA_CONSTANTS.N_ANGLS_PER_RESD_MAX), dtype=torch.int8, device=device)\n cord = torch.zeros((n_resds, RNA_CONSTANTS.ATOM_NUM_MAX, 3), dtype=torch.float32, device=device)\n cmsk = torch.zeros((n_resds, RNA_CONSTANTS.ATOM_NUM_MAX), dtype=torch.int8, device=device)\n\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n idxs = [x for x in range(n_resds) if seq[x] == resd_name]\n if len(idxs) == 0:\n continue\n cord[idxs], cmsk[idxs] =\\\n self.__build_cord(resd_name, fram[idxs], fmsk[idxs], angl[idxs], amsk[idxs])\n\n return (cord, cmsk) if rtn_cmsk else (cord)\n\n def __build_cord(self, resd_name, fram, fmsk, angl, amsk):\n \"\"\"\"\"\"\n\n # initialization\n device = fram.device\n n_resds = fram.shape[0]\n atom_names_all = RNA_CONSTANTS.ATOM_NAMES_PER_RESD[resd_name]\n atom_names_pad = atom_names_all + ['X'] * (RNA_CONSTANTS.ATOM_NUM_MAX - len(atom_names_all))\n atom_infos_all = RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]\n\n cord_dict = defaultdict(\n lambda: torch.zeros((n_resds, 3), dtype=torch.float32, device=device))\n cmsk_vec_dict = defaultdict(lambda: torch.zeros((n_resds), dtype=torch.int8, device=device))\n\n fram_null = torch.tensor(\n [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]], dtype=torch.float32, device=device)\n fram_dict = defaultdict(lambda: fram_null.unsqueeze(dim=0).repeat(n_resds, 1, 1))\n fmsk_vec_dict = defaultdict(lambda: torch.zeros((n_resds), dtype=torch.int8, device=device))\n\n trans_dict = {'main': (fram[:, 0, :3], fram[:, 0, 3])}\n\n rot_curr, tsl_curr = trans_dict['main']\n atom_names_sel = [x[0] for x in atom_infos_all if x[1] == 0]\n for atom_name_sel in atom_names_sel:\n cord_vec = self.cord_dict[resd_name][atom_name_sel].to(device)\n cord_dict[atom_name_sel] = \\\n tsl_curr + torch.sum(rot_curr * cord_vec.view(1, 1, 3), dim=2)\n cmsk_vec_dict[atom_name_sel] = fmsk[:, 0]\n\n # determine 3D coordinates of atoms belonging to side-chain rigid-groups\n angl_infos_all = RNA_CONSTANTS.ANGL_INFOS_PER_RESD[resd_name]\n rgrp_names_all = ['omega', 'phi'] + [x[0] for x in angl_infos_all]\n\n for idx_rgrp, rgrp_name_curr in enumerate(rgrp_names_all):\n if rgrp_name_curr in ['omega', 'phi', 'angl_0', 'angl_1']:\n rgrp_name_prev = 'main'\n else:\n rgrp_name_prev = 'angl_%d' % (int(rgrp_name_curr[-1]) - 1)\n\n rot_prev, tsl_prev = trans_dict[rgrp_name_prev]\n rot_base, tsl_vec_base = \\\n self.trans_dict_init[resd_name]['%s-%s' % (rgrp_name_curr, rgrp_name_prev)]\n rot_base = rot_base.unsqueeze(dim=0).to(device)\n tsl_base = tsl_vec_base.unsqueeze(dim=0).to(device)\n \n rot_addi, tsl_addi = calc_angl_rot_tsl(angl[:, idx_rgrp])\n rot_curr, tsl_curr = merge_rot_tsl(\n rot_prev, tsl_prev, rot_base, tsl_base, rot_addi, tsl_addi)\n trans_dict[rgrp_name_curr] = (rot_curr, tsl_curr)\n\n fram_dict[rgrp_name_curr] = \\\n torch.cat([rot_curr, tsl_curr.unsqueeze(dim=1)], dim=1)\n fmsk_vec_dict[rgrp_name_curr] = fmsk[:, 0] * amsk[:, idx_rgrp]\n\n atom_names_sel = [x[0] for x in atom_infos_all if x[1] == idx_rgrp + 1]\n for atom_name_sel in atom_names_sel:\n cord_vec = self.cord_dict[resd_name][atom_name_sel].to(device)\n\n cord_dict[atom_name_sel] = \\\n tsl_curr + torch.sum(rot_curr * cord_vec.view(1, 1, 3), dim=2)\n cmsk_vec_dict[atom_name_sel] = fmsk_vec_dict[rgrp_name_curr]\n\n cmsk = torch.stack([cmsk_vec_dict[x] for x in atom_names_pad][:RNA_CONSTANTS.ATOM_NUM_MAX], dim=1)\n cord = torch.stack([cord_dict[x] for x in atom_names_pad][:RNA_CONSTANTS.ATOM_NUM_MAX], dim=1)\n\n return cord, cmsk\n\n def export_pdb_file(self, seq, atom_cords, path, atom_masks=None, confidence=None, chain_id=None, logger = None):\n \"\"\"Export a PDB file.\"\"\"\n\n # configurations\n i_code = ' '\n chain_id = '0' if chain_id is None else chain_id\n occupancy = 1.0\n cord_min = -999.0\n cord_max = 999.0\n seq_len = len(seq)\n\n n_key_atoms = RNA_CONSTANTS.ATOM_NUM_MAX\n\n # take all the atom coordinates as valid, if not specified\n if atom_masks is None:\n atom_masks = np.ones(atom_cords.shape[:-1], dtype=np.int8)\n\n # determine the set of atom names (per residue)\n if atom_cords.ndim == 2:\n if atom_cords.shape[0] == seq_len * n_key_atoms:\n atom_cords = np.reshape(atom_cords, [seq_len, n_key_atoms, 3])\n atom_masks = np.reshape(atom_masks, [seq_len, n_key_atoms])\n else:\n raise ValueError('atom coordinates\\' shape does not match the sequence length')\n\n elif atom_cords.ndim == 3:\n assert atom_cords.shape[0] == seq_len\n atom_cords = atom_cords\n atom_masks = atom_masks\n\n else:\n raise ValueError('atom coordinates must be a 2D or 3D np.ndarray')\n\n # reset invalid values in atom coordinates\n atom_cords = np.clip(atom_cords, cord_min, cord_max)\n atom_cords[np.isnan(atom_cords)] = 0.0\n atom_cords[np.isinf(atom_cords)] = 0.0\n\n # export the 3D structure to a PDB file\n os.makedirs(os.path.dirname(os.path.realpath(path)), exist_ok=True)\n with open(path, 'w') as o_file:\n n_atoms = 0\n for idx_resd, resd_name in enumerate(seq):\n for idx_atom, atom_name in enumerate(RNA_CONSTANTS.ATOM_NAMES_PER_RESD[resd_name]):\n\n temp_factor = 0.0 if confidence is None else \\\n float(100 * confidence.reshape([seq_len])[idx_resd - 1])\n\n if atom_masks[idx_resd, idx_atom] == 0:\n continue\n n_atoms += 1\n charge = atom_name[0]\n line_str = ''.join([\n 'ATOM ',\n '%5d' % n_atoms,\n ' ' + atom_name + ' ' * (3 - len(atom_name)),\n ' %s' % resd_name,\n ' %s' % chain_id,\n ' ' * (4 - len(str(idx_resd + 1))),\n '%s' % str(idx_resd + 1),\n '%s ' % i_code,\n '%8.3f' % atom_cords[idx_resd, idx_atom, 0],\n '%8.3f' % atom_cords[idx_resd, idx_atom, 1],\n '%8.3f' % atom_cords[idx_resd, idx_atom, 2],\n '%6.2f' % occupancy,\n '%6.2f' % temp_factor,\n ' ' * 10,\n '%2s' % charge,\n '%2s' % ' ',\n ])\n assert len(line_str) == 80, 'line length must be exactly 80 characters: ' + line_str\n o_file.write(line_str + '\\n')\n\n if logger is not None:\n logger.info(f' Export PDB file to {path}')"
}
] | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Sequence
from rhofold.model.primitives import Linear, LayerNorm
from rhofold.utils.rigid_utils import Rigid
from rhofold.utils.tensor_utils import (
dict_multimap,
permute_final_dims,
flatten_final_dims,
)
from einops import rearrange
from rhofold.utils.alphabet import RNAAlphabet
from rhofold.utils.converter import RNAConverter | 9,008 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RefineNet(nn.Module):
""""""
def __init__(self, dim = 64, is_pos_emb = True, n_layer = 4, enable = True, **kwargs):
"""Constructor function."""
super().__init__()
self.is_pos_emb = is_pos_emb
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RefineNet(nn.Module):
""""""
def __init__(self, dim = 64, is_pos_emb = True, n_layer = 4, enable = True, **kwargs):
"""Constructor function."""
super().__init__()
self.is_pos_emb = is_pos_emb | self.alphabet = RNAAlphabet.from_architecture('RNA') | 6 | 2023-11-01 10:29:08+00:00 | 12k |
ziqi-zhang/TAOISM | python/sgx_net.py | [
{
"identifier": "SecretInputLayer",
"path": "python/layers/input.py",
"snippet": "class SecretInputLayer(SecretNonlinearLayer):\n shape = None\n\n def __init__(\n self, sid, LayerName, input_shape, EnclaveMode, link_prev=True, link_next=True, \n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.shape = input_shape\n\n def link_tensors(self):\n gt.link_tags(self.get_tag(\"input\", remap=False), self.get_tag(\"output\", remap=False))\n super().link_tensors()\n\n def init_shape(self):\n return\n\n def set_input(self, tensor):\n self.set_tensor_cpu_gpu_enclave(\"input\", tensor)\n\n def get_output_shape(self):\n return self.shape\n\n def forward(self):\n return\n\n def backward(self):\n return\n\n def plain_forward(self):\n return\n\n def plain_backward(self):\n return\n\n def show_plain_error(self):\n return\n\n def print_connection_info(self):\n print(f\"{self.LayerName:30} shape{self.shape} output {self.NextLayer.LayerName:30}\")"
},
{
"identifier": "SecretOutputLayer",
"path": "python/layers/output.py",
"snippet": "class SecretOutputLayer(SecretNonlinearLayer):\n TargetShape = None\n loss = 0\n\n def __init__(\n self, sid, LayerName, EnclaveMode, inference=False, link_prev=True, link_next=True, \n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.ForwardFunc = torch.nn.CrossEntropyLoss()\n self.PlainFunc = torch.nn.CrossEntropyLoss()\n self.EnclaveMode = ExecutionModeOptions.CPU\n self.inference = inference\n\n\n def init_shape(self):\n self.InputShape = self.PrevLayer.get_output_shape()\n self.OutputShape = [1]\n self.TargetShape = [self.InputShape[0]] # number of Minibatch\n\n def init(self, start_enclave=True):\n TensorLoader.init(self, start_enclave)\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n self.tensor_name_list = {}\n return\n\n NeededTensorNames = [\n (\"output\", self.OutputShape, None),\n (\"DerInput\", self.InputShape, None),\n (\"input\", self.InputShape, None),\n (\"target\", self.TargetShape, None),\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n def load_target(self, tensor):\n self.set_tensor_with_name(\"target\", tensor)\n\n def get_loss(self):\n return self.loss\n \n def get_prediction(self):\n self.forward_tensor_transfer(\"input\")\n if torch.sum(self.get_cpu(\"input\").abs()) == 0:\n raise RuntimeError(\"SGX input not load\")\n return self.get_cpu(\"input\")\n\n def forward(self):\n if not self.inference:\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n self.set_cpu(\"input\", self.get_cpu(\"input\").detach())\n self.requires_grad_on_cpu(\"input\")\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\"), self.get_cpu(\"target\")))\n loss = self.get_cpu(\"output\").item()\n self.loss = loss\n\n def backward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Backward\", verbose_level=VerboseLevel.LAYER):\n self.backward_tensor_transfer(transfer_tensor=\"output\")\n self.get_cpu(\"output\").backward()\n self.set_cpu(\"DerInput\", self.get_cpu(\"input\").grad)\n\n def plain_forward(self):\n if not self.inference:\n self.make_sure_cpu_is_latest(\"input\")\n self.set_cpu(\"input\", self.get_cpu(\"input\").detach())\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"), self.get_cpu(\"target\"))\n\n def plain_backward(self):\n self.make_sure_cpu_is_latest(\"output\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainBackward\"):\n self.PlainForwardResult.backward()\n self.set_cpu(\"DerInput\", self.get_cpu(\"input\").grad)\n\n def show_plain_error(self):\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"))\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n if self.PlainBackwardResult is None:\n return\n self.make_sure_cpu_is_latest(\"DerInput\")\n\n err = compare_expected_actual(self.PlainBackwardResult, self.get_cpu(\"DerInput\"))\n print(f\"S{self.sid}: {self.LayerName} Backward Error {err}\")\n\n def print_connection_info(self):\n print(f\"{self.LayerName:30} shape{self.InputShape}{' ':30} input {self.PrevLayer.LayerName:30}\")"
},
{
"identifier": "str_hash",
"path": "python/utils/basic_utils.py",
"snippet": "def str_hash(s):\n return int(int(hashlib.sha224(s.encode('utf-8')).hexdigest(), 16) % ((1 << 62) - 1))"
},
{
"identifier": "GlobalTensor",
"path": "python/enclave_interfaces.py",
"snippet": "class GlobalTensor(object):\n cpu_tensor = {}\n gpu_tensors = {}\n encrypted_tensors = {}\n LinkedTags = {}\n InverseLinkedTags = {}\n IsInitEnclaveTensor = {}\n EnclaveInterface = None\n eid = None\n is_init_global_tensor = False\n\n @staticmethod\n def init():\n if GlobalTensor.is_init_global_tensor:\n return\n GlobalTensor.EnclaveInterface = EnclaveInterface()\n GlobalTensor.EnclaveInterface.init_enclave()\n GlobalTensor.is_init_global_tensor = True\n\n @staticmethod\n def destroy():\n GlobalTensor.EnclaveInterface.destroy_enclave()\n\n GlobalTensor.cpu_tensor = {}\n GlobalTensor.gpu_tensors = {}\n GlobalTensor.encrypted_tensors = {}\n GlobalTensor.LinkedTags = {}\n GlobalTensor.InverseLinkedTags = {}\n GlobalTensor.IsInitEnclaveTensor = {}\n GlobalTensor.EnclaveInterface = None\n GlobalTensor.eid = None\n GlobalTensor.is_init_global_tensor = False\n\n\n @staticmethod\n def get_eid():\n return GlobalTensor.EnclaveInterface.get_eid()\n\n @staticmethod\n def link_tags(tag1, tag2):\n if tag1 == tag2:\n return\n\n friends = []\n\n def add_friends(tag):\n nonlocal friends\n if tag in GlobalTensor.LinkedTags:\n its_leader_tag = GlobalTensor.LinkedTags[tag]\n if its_leader_tag in GlobalTensor.InverseLinkedTags:\n friends += GlobalTensor.InverseLinkedTags.pop(its_leader_tag)\n else:\n friends += [tag]\n\n add_friends(tag1)\n add_friends(tag2)\n leader_tag = min(friends)\n\n GlobalTensor.InverseLinkedTags[leader_tag] = friends\n for t in friends:\n if t in GlobalTensor.IsInitEnclaveTensor:\n raise ValueError(\"Tags must linked before tensor initialization\")\n GlobalTensor.LinkedTags[t] = leader_tag\n\n @staticmethod\n def get_remapped_tags(tag):\n return GlobalTensor.LinkedTags[tag] if tag in GlobalTensor.LinkedTags else tag\n\n @staticmethod\n def set_cpu(tag, tensor):\n GlobalTensor.cpu_tensor[tag] = tensor.to(torch.device(\"cpu\"))\n\n @staticmethod\n def set_gpu(tag, tensor):\n GlobalTensor.gpu_tensors[tag] = tensor\n\n @staticmethod\n def set_encrypted(tag, tensor):\n GlobalTensor.encrypted_tensors[tag] = tensor\n\n @staticmethod\n def get_cpu(tag):\n return GlobalTensor.cpu_tensor[tag]\n\n @staticmethod\n def get_gpu(tag):\n return GlobalTensor.gpu_tensors[tag]\n\n @staticmethod\n def get_encryption(tag):\n return GlobalTensor.encrypted_tensors[tag]\n\n @staticmethod\n def init_enclave_tensor(tag, size):\n size = list(size)\n if len(size) < 4:\n size = [1] * (4 - len(size)) + size\n remapped_tag = GlobalTensor.get_remapped_tags(tag)\n if remapped_tag in GlobalTensor.IsInitEnclaveTensor:\n return\n else:\n GlobalTensor.IsInitEnclaveTensor[remapped_tag] = True\n eid = GlobalTensor.get_eid()\n GlobalTensor.EnclaveInterface.lib.InitTensor(eid, remapped_tag, size[0], size[1], size[2], size[3])\n\n @staticmethod\n def init_encrypted_tensor(tag, shape):\n GlobalTensor.encrypted_tensors[GlobalTensor.get_remapped_tags(tag)] = \\\n GlobalTensor.EnclaveInterface.create_encrypt_torch(shape)"
},
{
"identifier": "NamedTimerInstance",
"path": "python/utils/timer_utils.py",
"snippet": "class NamedTimerInstance(object):\n def __init__(self, name, verbose_level=VerboseLevel.EVERY):\n self.name = name\n self.verbose_level = verbose_level\n\n def __enter__(self):\n return NamedTimer.start(self.name, verbose_level=self.verbose_level)\n ...\n\n def __exit__(self, *args):\n NamedTimer.end(self.name)\n ..."
},
{
"identifier": "NetworkNamedTimerInstance",
"path": "python/utils/timer_utils.py",
"snippet": "class NetworkNamedTimerInstance(object):\n def __init__(self, name, verbose_level=VerboseLevel.EVERY):\n self.name = name\n self.verbose_level = verbose_level\n\n def __enter__(self):\n return NamedTimer.start(self.name, verbose_level=self.verbose_level)\n\n def __exit__(self, *args):\n NamedTimer.end(self.name)"
},
{
"identifier": "SecretConfig",
"path": "python/common_torch.py",
"snippet": "class GlobalCppExtension(object):\n def get_instance():\n def __init__(self):\n def get_conv2d_cudnn():\ndef union_dicts(*dicts):\ndef calc_conv2d_output_shape(x_shape, w_shape, padding):\ndef calc_conv2d_output_shape_stride(x_shape, w_shape, padding, stride):\ndef calc_shape_conv2d_weight(dy, x):\ndef mod_on_cpu(x):\ndef mod_on_gpu(x):\ndef mod_move_down(x):\ndef move_down(x):\ndef quantize(x, src):\ndef dequantize(x, src1, src2, dst):\ndef find_max_expand(x):\ndef rescale(x, scale):\ndef get_random_uniform(upper_bound, size):\ndef generate_unquantized_tensor(enum, shape):\ndef modest_magnitude(w):"
},
{
"identifier": "compare_expected_actual",
"path": "python/utils/torch_utils.py",
"snippet": "def compare_expected_actual(expected, actual, show_where_err=False, get_relative=False, verbose=False, show_values=False):\n def purify(x):\n # return torch.tensor(x)\n res = x\n # if not (isinstance(x, torch.Tensor) or isinstance(x, torch.Variable)):\n if not (isinstance(x, torch.Tensor) ):\n res = torch.tensor(x)\n # return x.detach().numpy()\n return res.type(torch.float).to(\"cpu\")\n expected = purify(expected)\n actual = purify(actual)\n\n if show_values:\n print(\"expected:\", expected[0, 0])\n print(\"actual:\", actual[0, 0])\n\n avg_abs_diff = torch.mean(torch.abs(expected - actual)).item()\n res = avg_abs_diff\n\n if show_where_err:\n show_indices = torch.abs(expected - actual) / torch.abs(expected) > 0.5\n # show_indices = (expected != actual)\n print(\"error indices: \", np.where(show_indices.cpu()))\n print(\"expected values:\", expected[show_indices])\n print(\"difference:\", (expected - actual)[show_indices])\n\n if get_relative:\n tmp_expected, tmp_actual = expected[expected != 0], actual[expected != 0]\n relative_diff = torch.abs(tmp_expected - tmp_actual) / torch.abs(tmp_expected)\n relative_avg_diff = torch.mean(torch.abs(tmp_actual - tmp_expected)) / torch.mean(torch.abs(tmp_expected))\n Error = namedtuple(\"Error\", (\"AvgAbsDiff\", \"RelAvgDiff\", \"AvgRelDiff\", \"StdRelDiff\"))\n res = Error(avg_abs_diff, relative_avg_diff.item(), torch.mean(relative_diff).item(), torch.std(relative_diff).item())\n\n if verbose:\n print(res)\n\n return res"
},
{
"identifier": "torch_sync",
"path": "python/utils/torch_utils.py",
"snippet": "def torch_sync():\n dist.barrier()"
},
{
"identifier": "TensorLoader",
"path": "python/tensor_loader.py",
"snippet": "class TensorLoader(EnclaveInterface):\n def __init__(self):\n super().__init__()\n self.sid = -1\n self.tensor_name_list = []\n self.encryption_tensor_name_list = {}\n self.RandomVarName = None\n self.ShareVarName = None\n self.ShareTuple = None\n\n def init(self, start_enclave=True):\n if start_enclave:\n print(\"Initializing sid: %d\" % self.sid)\n self.init_enclave()\n \n self.generate_tensor_name_list()\n # if hasattr(self, \"LayerName\") and self.LayerName == \"Layer1.0.main.relu2\":\n # st()\n\n self.init_enclave_tensors()\n self.init_cpu_tensor()\n self.init_encryption_tensor()\n \n\n def generate_tensor_name_list(self, force=False):\n return\n\n def link_tensors(self):\n pass\n\n def init_enclave_tensors(self):\n self.generate_tensor_name_list()\n for TensorName, shape, SeedList in self.tensor_name_list:\n if shape is None:\n raise ValueError(\"The shape is None. Please setup the shape before init_enclave_tensor\")\n # print(f\"TensorLoader init {TensorName}, {shape}\")\n self.init_enclave_tensor(TensorName, shape)\n if SeedList is None:\n continue\n for seed in SeedList:\n self.set_seed(TensorName, seed)\n\n def set_cpu(self, name, t):\n # print(\"---\", name, self.get_tag(name))\n GlobalTensor.set_cpu(self.get_tag(name), t)\n\n def set_gpu(self, name, t):\n GlobalTensor.set_gpu(self.get_tag(name), t)\n\n def set_encryption(self, name, t):\n GlobalTensor.set_encryption(self.get_tag(name), t)\n\n def get_cpu(self, name):\n return GlobalTensor.get_cpu(self.get_tag(name))\n\n def get_gpu(self, name):\n return GlobalTensor.get_gpu(self.get_tag(name))\n\n def get_encryption(self, name):\n return GlobalTensor.get_encryption(self.get_tag(name))\n\n def generate_cpu_tensor(self, name, shape):\n self.set_cpu(name, torch.zeros(shape).type(SecretConfig.dtypeForCpuOp))\n # self.CpuTensors[name] = torch.zeros(shape).type(SecretConfig.dtypeForCpuOp)\n\n def transfer_cpu_to_gpu(self, name):\n self.set_gpu(name, self.get_cpu(name).cuda(non_blocking=True).type(SecretConfig.dtypeForCudaMm))\n # self.GpuTensors[name] = self.CpuTensors[name].cuda(non_blocking=True).type(SecretConfig.dtypeForCudaMm)\n\n def transfer_gpu_to_cpu(self, name):\n cpu_tensor = self.get_cpu(name)\n gpu_tensor = self.get_gpu(name)\n cpu_tensor.copy_(gpu_tensor.type(SecretConfig.dtypeForCpuOp))\n\n def transfer_enclave_to_cpu(self, name):\n self.from_enclave(name, self.get_cpu(name))\n\n def transfer_cpu_to_enclave(self, name):\n self.set_tensor(name, self.get_cpu(name))\n\n def init_cpu_tensor(self):\n self.generate_tensor_name_list()\n\n for TensorName, shape, _ in self.tensor_name_list:\n self.generate_cpu_tensor(TensorName, shape)\n\n def init_encryption_tensor(self):\n self.generate_tensor_name_list()\n\n for name, shape in self.encryption_tensor_name_list:\n GlobalTensor.init_encrypted_tensor(self.get_tag(name), shape)\n # self.EncrtyptedTensors[name] = self.CreateEncryptTorch(shape)\n\n def set_tensor_cpu_enclave(self, name, tensor):\n # GlobalTensor.SetNamedTensor(self.GetTag(tag), tensor)\n self.set_cpu(name, tensor)\n self.set_tensor(name, tensor)\n # print(\"Set cpu enclave: \", tensor[0,:10])\n\n def set_tensor_cpu_gpu_enclave(self, name, tensor):\n # GlobalTensor.SetNamedTensor(self.GetTag(tag), tensor)\n self.set_cpu(name, tensor)\n self.set_tensor(name, tensor)\n self.set_gpu(name, tensor)\n # print(\"Set cpu enclave: \", tensor[0,:10])\n\n def from_enclave(self, name, tensor):\n self.get_tensor(name, tensor)\n\n # def generate_enclave_tensor(self, name):\n # if name in self.RandomVarName:\n # return self.async_get_random(name, self.get_cpu(name))\n # elif name in self.ShareVarName:\n # original, seed = self.ShareTuple[name]\n # return self.async_get_share(original, self.get_cpu(name), seed)\n # else:\n # raise Exception(\"Doesnt how to generate this tensor\")"
},
{
"identifier": "StatelessLogger",
"path": "python/stateless_logger.py",
"snippet": "class StatelessLogger:\n class __StatelessLogger:\n def __init__(self, rank):\n self.rank = rank\n self.logfile_path = SecretConfig.stateless_logfile\n logging.basicConfig(filename=self.logfile_path,\n level=logging.DEBUG,\n format=f'[NS][{rank}][%(asctime)s.%(msecs)03d][%(created).6f]: %(message)s',\n datefmt='%H.%M.%S')\n self.logger = logging.getLogger(SecretConfig.stateless_logger_name)\n\n def debug(self, msg):\n self.logger.debug(msg)\n\n def info(self, msg):\n self.logger.info(msg)\n\n def warning(self, msg):\n self.logger.warning(msg)\n\n def error(self, msg):\n self.logger.error(msg)\n\n def critical(self, msg):\n self.logger.critical(msg)\n\n\n instance = None\n def __init__(self, rank):\n if not StatelessLogger.instance:\n StatelessLogger.instance = StatelessLogger.__StatelessLogger(rank)\n else:\n StatelessLogger.instance.rank = rank\n def __getattr__(self, name):\n return getattr(self.instance, name)"
}
] | import os
import torch
import torch.nn.functional as F
import torch.distributed as dist
from itertools import product
from collections import defaultdict, namedtuple
from pdb import set_trace as st
from time import time
from python.layers.input import SecretInputLayer
from python.layers.output import SecretOutputLayer
from python.utils.basic_utils import str_hash
from python.enclave_interfaces import GlobalTensor
from python.utils.timer_utils import NamedTimerInstance, NetworkNamedTimerInstance
from python.common_torch import SecretConfig, mod_move_down, union_dicts, \
get_random_uniform, calc_shape_conv2d_weight, GlobalCppExtension
from python.utils.torch_utils import compare_expected_actual, torch_sync
from python.tensor_loader import TensorLoader
from python.stateless_logger import StatelessLogger | 7,631 |
def classifier_output(self):
with NamedTimerInstance(f"S{self.sid}: {self.nn_name} classifier_output"):
self.forward()
if self.sid == 2:
return
# layers: input_layer, ..., fc_layer, output_layer
last_fc = self.layers[-2]
last_fc.transfer_enclave_to_cpu("output")
outputs = last_fc.get_cpu("output")
_, predicted = torch.max(outputs.data, 1)
return predicted
def get_loss(self):
return self.layers[-1].get_loss()
def forward_with_time(self):
def run_forward(layer):
layer.forward()
t0 = time()
with NetworkNamedTimerInstance(f"S{self.sid}: {self.nn_name} Forward"):
self.execute_for_each_layer(run_forward)
t1 = time()
# time in ms
elapse_time = (t1 - t0) * (10 ** 3)
return elapse_time
def forward(self):
def run_forward(layer):
layer.forward()
with NetworkNamedTimerInstance(f"S{self.sid}: {self.nn_name} Forward"):
self.execute_for_each_layer(run_forward)
def backward(self):
def run_backward(layer):
layer.backward()
with NamedTimerInstance(f"S{self.sid}: {self.nn_name} Backward"):
self.execute_for_each_layer(run_backward, reverse=True)
def plain_forward(self):
with NetworkNamedTimerInstance(f"S{self.sid}: {self.nn_name} PlainForward"):
self.execute_for_each_layer(lambda x: x.plain_forward())
def plain_backward(self):
with NetworkNamedTimerInstance(f"S{self.sid}: {self.nn_name} PlainBackward"):
self.execute_for_each_layer(lambda x: x.plain_backward(), reverse=True)
def show_plain_error(self):
self.execute_for_each_layer(lambda x: x.show_plain_error())
# Take the registered learnable parameters list in layers and update them
# It may need take extra storage
# And execution depends on where the tensors are stored
# https://pytorch.org/docs/stable/optim.html#torch.optim.SGD
class SgdOptimizer(TensorLoader):
def __init__(self, sid):
super().__init__()
self.sid = sid
self.learning_rate = 0.05
self.momentum = 0.9
self.weight_decay = 5e-4
self.momentum_init_flags = defaultdict(lambda: False)
self.ideal_momentum_buf = {}
self.lr_gamma = 0.5
self.lr_step = 30
self.step_counter = 0
self.layers = None
def set_layers(self, layers):
self.layers = layers
def generate_tensor_name_list(self, force=False):
# Run if forced or self.tensor_name_list is not generated
if not force and self.tensor_name_list:
return
if self.sid == 2:
return
self.tensor_name_list = []
for layer in self.layers:
for (DerName, ParamName, shape) in layer.LearnableParamsList:
self.tensor_name_list.append((ParamName + "Momentum", shape, None))
def update_params(self, test_with_ideal=False):
if self.sid == 2:
return
for layer in self.layers:
self.update_params_in_layer(layer, test_with_ideal=test_with_ideal)
def update_params_in_layer(self, layer, test_with_ideal=False):
# ref: https://github.com/pytorch/pytorch/blob/master/torch/optim/sgd.py
if layer.LearnableParamsList is None:
return
task_ids = []
for (der_name, param_name, shape) in layer.LearnableParamsList:
momentum_name = param_name + "Momentum"
global_momentum_name = layer.name_modifier(momentum_name)
if layer.StoreInEnclave:
if test_with_ideal:
ideal_p, ideal_momentum = self.ideal_update_params_with_name(layer, der_name, param_name, shape)
first_momentum = not self.momentum_init_flags[global_momentum_name]
if first_momentum:
# print("FIRST MOMENTUM")
self.momentum_init_flags[global_momentum_name] = True
layer.init_enclave_tensor(momentum_name, shape)
task_id = layer.sgd_update(param_name=param_name, grad_name=der_name, momentum_name=momentum_name,
lr=self.learning_rate, momentum=self.momentum,
weight_decay=self.weight_decay,
first_momentum=first_momentum, is_async=True)
if test_with_ideal:
while not self.get_task_status(task_id):
pass
layer.generate_cpu_tensor(momentum_name, shape)
layer.transfer_enclave_to_cpu(momentum_name)
layer.transfer_enclave_to_cpu(param_name)
| #!/usr/bin/env python
from __future__ import print_function
torch.backends.cudnn.deterministic = True
LearnableParamTuple = namedtuple('LearnableParam', ('dw_name', 'w_name', 'shape'))
def conv2d_op(w, x, is_div=True):
padding = 1
batch_size, in_chan, img_hw, _ = x.size()
out_chan, _, fil_hw, __ = w.size()
y_shape = [batch_size, out_chan, img_hw, img_hw]
dtype = x.dtype
device = x.device
is_cpu = True if device == torch.device("cpu") else False
def base_conv2d(sub_x, sub_w):
return F.conv2d(sub_x, sub_w, padding=padding)
if is_cpu or (is_div is False):
return base_conv2d(x, w)
def sum_of_div(best_shape):
best_batch_size, best_in_chan, best_out_chan = best_shape
y = torch.zeros(y_shape, device=device, dtype=dtype)
for idx_batch_size, idx_in_chan, idx_out_chan in product(range(batch_size // best_batch_size),
range(in_chan // best_in_chan),
range(out_chan // best_out_chan)):
start_batch_size, end_batch_size = idx_batch_size * best_batch_size, (idx_batch_size + 1) * best_batch_size
start_in_chan, end_in_chan = idx_in_chan * best_in_chan, (idx_in_chan + 1) * best_in_chan
start_out_chan, end_out_chan = idx_out_chan * best_out_chan, (idx_out_chan + 1) * best_out_chan
y[start_batch_size:end_batch_size, start_out_chan:end_out_chan, :, :] += \
base_conv2d(x[start_batch_size:end_batch_size, start_in_chan:end_in_chan, :, :],
w[start_out_chan:end_out_chan, start_in_chan:end_in_chan, :, :])
return y
shapes_v100 = {
(1024, 512, 512, 2): (1024, 512, 128),
(1024, 512, 512, 4): (1024, 512, 128),
(1024, 256, 512, 4): (1024, 128, 128),
(1024, 256, 256, 8): (1024, 64, 128),
(1024, 128, 256, 8): (1024, 64, 128),
(512, 512, 512, 2): (512, 512, 128),
(512, 512, 512, 4): (256, 256, 128),
(512, 256, 512, 4): (256, 256, 128),
(512, 256, 256, 8): (512, 128, 128),
(512, 128, 256, 8): (512, 128, 128),
}
tunnable_shape = (batch_size, in_chan, out_chan, img_hw)
if is_div and tunnable_shape in shapes_v100:
return sum_of_div(shapes_v100[tunnable_shape])
else:
return base_conv2d(x, w)
def conv2d_input_grad_op(w, dy):
return F.conv_transpose2d(dy, w, padding=1)
def conv2d_weight_grad_op(dy, x, is_div=True):
batch_size, in_chan, img_hw, _ = x.size()
_, out_chan, __, ___ = dy.size()
w_shape = calc_shape_conv2d_weight(dy, x)
dtype = x.dtype
device = x.device
is_cpu = True if device == torch.device("cpu") else False
if is_cpu:
return torch.transpose(F.conv2d(torch.transpose(x, 0, 1), torch.transpose(dy, 0, 1), padding=1), 0,
1).contiguous()
def base_conv2d_weight_grad_op(sub_dy, sub_x):
sub_w_shape = calc_shape_conv2d_weight(sub_dy, sub_x)
return GlobalCppExtension.get_conv2d_cudnn().backward(sub_w_shape, sub_dy, sub_x, (1, 1), (1, 1), (1, 1), 1, 0, 0)
if is_div is False:
return base_conv2d_weight_grad_op(dy, x)
def sum_of_div(best_shape):
# print("running conv2d weight div")
best_batch_size, best_in_chan, best_out_chan = best_shape
dw = torch.zeros(w_shape, device=device, dtype=dtype)
for idx_batch_size, idx_in_chan, idx_out_chan in product(range(batch_size // best_batch_size),
range(in_chan // best_in_chan),
range(out_chan // best_out_chan)):
start_batch_size, end_batch_size = idx_batch_size * best_batch_size, (idx_batch_size + 1) * best_batch_size
start_in_chan, end_in_chan = idx_in_chan * best_in_chan, (idx_in_chan + 1) * best_in_chan
start_out_chan, end_out_chan = idx_out_chan * best_out_chan, (idx_out_chan + 1) * best_out_chan
dw[start_out_chan:end_out_chan, start_in_chan:end_in_chan, :, :] += \
base_conv2d_weight_grad_op(dy[start_batch_size:end_batch_size, start_out_chan:end_out_chan, :, :],
x[start_batch_size:end_batch_size, start_in_chan:end_in_chan, :, :])
return dw
shapes_v100 = {
(1024, 512, 512, 2): (1024, 512, 128),
(1024, 512, 512, 4): (1024, 512, 128),
(1024, 256, 512, 4): (1024, 128, 128),
(1024, 128, 256, 8): (1024, 128, 128),
(512, 512, 512, 2): (512, 512, 128),
(512, 512, 512, 4): (512, 512, 128),
(512, 256, 512, 4): (512, 128, 128),
(512, 128, 256, 8): (128, 128, 256),
}
tunnable_shape = (batch_size, in_chan, out_chan, img_hw)
if is_div and tunnable_shape in shapes_v100:
return sum_of_div(shapes_v100[tunnable_shape])
else:
return base_conv2d_weight_grad_op(dy, x)
def matmul_op(w, x):
return torch.mm(x, w.t())
def matmul_input_grad_op(w, dy):
return torch.mm(dy, w)
def matmul_weight_grad_op(dy, x):
return torch.mm(dy.t(), x)
def set_tensor_name_maybe_quantized(name, quantized):
return name + ("Q" if quantized else "")
# target_op = conv2d_op
# idealC = ModOnCpu(target_op(AQ.type(torch.double), BQ.type(torch.double))).type(SecretConfig.dtypeForCpuOp)
# Forward
# A: Weight
# B: Input
# A: Weight
# B: dy
InputGradRemap = {
"Af": "Af", "AQ": "AQ", "A0": "A0", "A1": "A1",
"Bf": "DerCf", "BQ": "DerCQ", "B0": "DerC0", "B1": "DerC1",
"E": "EForDerB", "F": "FForDerB",
"C0": "C0ForDerB", "C1": "C1ForDerB", "CQ": "CQForDerB", "Cf": "CfForDerB", "Z": "ZForDerB",
}
# A: dy
# B: InputQ
WeightGradRemap = {
"Af": "DerCf", "AQ": "DerCQ", "A0": "DerC0", "A1": "DerC1",
"Bf": "Bf", "BQ": "BQ", "B0": "B0", "B1": "B1",
"E": "EForDerA", "F": "FForDerA",
"C0": "C0ForDerA", "C1": "C1ForDerA", "CQ": "CQForDerA", "Cf": "CfForDerA", "Z": "ZForDerA",
}
def secret_op_class_factory(sid, target_op_name):
all_target_op = {"Matmul": matmul_op, "MatmulInputGrad": matmul_input_grad_op,
"MatmulWeightGrad": matmul_weight_grad_op,
"Conv2d": conv2d_op, "Conv2dInputGrad": conv2d_input_grad_op,
"Conv2dWeightGrad": conv2d_weight_grad_op}
all_sid_class = {0: SecretBaseS0, 1: SecretBaseS1, 2: SecretBaseS2}
target_op_func = all_target_op[target_op_name]
sid_class = all_sid_class[sid]
class_name = "Secret%sS%d" % (target_op_name, sid)
def __init__(self, name):
sid_class.__init__(self, name)
# noinspection PyUnusedLocal
def target_op(self, a, b):
return target_op_func(a, b)
new_class = type(class_name, (sid_class,), {"__init__": __init__, "target_op": target_op})
return new_class
class SecretNeuralNetwork(TensorLoader):
nn_name = None
layers = None
def __init__(self, sid, nn_name):
super().__init__()
self.sid = sid
self.init(start_enclave=False)
self.nn_name = nn_name
def set_layers(self, layers):
self.layers = layers
if not isinstance(self.layers[0], SecretInputLayer):
raise ValueError("The first layer has to be input layer")
if not isinstance(self.layers[-1], SecretOutputLayer):
raise ValueError("The last layer has to be output layer")
for i in range(len(self.layers) - 1):
PrevLayer = self.layers[i]
NextLayer = self.layers[i + 1]
if not PrevLayer.manually_register_next:
PrevLayer.register_next_layer(NextLayer)
if not NextLayer.manually_register_prev:
NextLayer.register_prev_layer(PrevLayer)
for layer in self.layers:
# print(f"Init_shape/link layer {layer.LayerName}")
layer.set_eid(self.get_eid())
layer.init_shape()
# if layer.LayerName in ["Layer1.0.weighted_add", "Layer1.0.proxies.0.bn"]:
# st()
layer.link_tensors()
# print(layer.LayerName)
# layer.print_tensor_link_relation()
# if layer.LayerName in ["Layer1.0.weighted_add", "Layer1.0.proxies.0.bn"]:
# st()
for idx, layer in enumerate(self.layers):
# print(f"Init layer {layer.LayerName}")
# if layer.LayerName == "Layer1.0.main.relu2":
# st()
layer.init(start_enclave=False)
# if idx > 3:
# print(layer.LayerName, self.layers[4].get_cpu("input").shape, self.layers[4].PrevLayer.LayerName)
def execute_for_each_layer(self, func, reverse=False):
layers = self.layers[::-1] if reverse else self.layers
for layer in layers:
# print(f"SID: {self.sid} {layer.LayerName}, {func}")
if self.sid == 2 and layer.IsDummyForS2:
continue
# print("Processing ", layer.LayerName)
func(layer)
# st()
def classifier_output(self):
with NamedTimerInstance(f"S{self.sid}: {self.nn_name} classifier_output"):
self.forward()
if self.sid == 2:
return
# layers: input_layer, ..., fc_layer, output_layer
last_fc = self.layers[-2]
last_fc.transfer_enclave_to_cpu("output")
outputs = last_fc.get_cpu("output")
_, predicted = torch.max(outputs.data, 1)
return predicted
def get_loss(self):
return self.layers[-1].get_loss()
def forward_with_time(self):
def run_forward(layer):
layer.forward()
t0 = time()
with NetworkNamedTimerInstance(f"S{self.sid}: {self.nn_name} Forward"):
self.execute_for_each_layer(run_forward)
t1 = time()
# time in ms
elapse_time = (t1 - t0) * (10 ** 3)
return elapse_time
def forward(self):
def run_forward(layer):
layer.forward()
with NetworkNamedTimerInstance(f"S{self.sid}: {self.nn_name} Forward"):
self.execute_for_each_layer(run_forward)
def backward(self):
def run_backward(layer):
layer.backward()
with NamedTimerInstance(f"S{self.sid}: {self.nn_name} Backward"):
self.execute_for_each_layer(run_backward, reverse=True)
def plain_forward(self):
with NetworkNamedTimerInstance(f"S{self.sid}: {self.nn_name} PlainForward"):
self.execute_for_each_layer(lambda x: x.plain_forward())
def plain_backward(self):
with NetworkNamedTimerInstance(f"S{self.sid}: {self.nn_name} PlainBackward"):
self.execute_for_each_layer(lambda x: x.plain_backward(), reverse=True)
def show_plain_error(self):
self.execute_for_each_layer(lambda x: x.show_plain_error())
# Take the registered learnable parameters list in layers and update them
# It may need take extra storage
# And execution depends on where the tensors are stored
# https://pytorch.org/docs/stable/optim.html#torch.optim.SGD
class SgdOptimizer(TensorLoader):
def __init__(self, sid):
super().__init__()
self.sid = sid
self.learning_rate = 0.05
self.momentum = 0.9
self.weight_decay = 5e-4
self.momentum_init_flags = defaultdict(lambda: False)
self.ideal_momentum_buf = {}
self.lr_gamma = 0.5
self.lr_step = 30
self.step_counter = 0
self.layers = None
def set_layers(self, layers):
self.layers = layers
def generate_tensor_name_list(self, force=False):
# Run if forced or self.tensor_name_list is not generated
if not force and self.tensor_name_list:
return
if self.sid == 2:
return
self.tensor_name_list = []
for layer in self.layers:
for (DerName, ParamName, shape) in layer.LearnableParamsList:
self.tensor_name_list.append((ParamName + "Momentum", shape, None))
def update_params(self, test_with_ideal=False):
if self.sid == 2:
return
for layer in self.layers:
self.update_params_in_layer(layer, test_with_ideal=test_with_ideal)
def update_params_in_layer(self, layer, test_with_ideal=False):
# ref: https://github.com/pytorch/pytorch/blob/master/torch/optim/sgd.py
if layer.LearnableParamsList is None:
return
task_ids = []
for (der_name, param_name, shape) in layer.LearnableParamsList:
momentum_name = param_name + "Momentum"
global_momentum_name = layer.name_modifier(momentum_name)
if layer.StoreInEnclave:
if test_with_ideal:
ideal_p, ideal_momentum = self.ideal_update_params_with_name(layer, der_name, param_name, shape)
first_momentum = not self.momentum_init_flags[global_momentum_name]
if first_momentum:
# print("FIRST MOMENTUM")
self.momentum_init_flags[global_momentum_name] = True
layer.init_enclave_tensor(momentum_name, shape)
task_id = layer.sgd_update(param_name=param_name, grad_name=der_name, momentum_name=momentum_name,
lr=self.learning_rate, momentum=self.momentum,
weight_decay=self.weight_decay,
first_momentum=first_momentum, is_async=True)
if test_with_ideal:
while not self.get_task_status(task_id):
pass
layer.generate_cpu_tensor(momentum_name, shape)
layer.transfer_enclave_to_cpu(momentum_name)
layer.transfer_enclave_to_cpu(param_name) | param_err = compare_expected_actual(ideal_p, layer.get_cpu(param_name), get_relative=True) | 7 | 2023-11-01 10:37:37+00:00 | 12k |
NVlabs/M2T2 | m2t2/m2t2.py | [
{
"identifier": "ActionDecoder",
"path": "m2t2/action_decoder.py",
"snippet": "class ActionDecoder(torch.nn.Module):\n def __init__(\n self, mask_dim, use_embed, embed_dim, max_num_pred, num_params,\n hidden_dim, num_layers, activation, offset_bins\n ):\n super(ActionDecoder, self).__init__()\n feat_dim = mask_dim\n if use_embed:\n feat_dim += embed_dim\n self.feat_dim = feat_dim\n self.use_embed = use_embed\n self.contact_dir_head = MLP(\n feat_dim, hidden_dim, 3, num_layers, activation\n )\n self.approach_dir_head = MLP(\n feat_dim, hidden_dim, 3, num_layers, activation\n )\n self.offset_head = MLP(\n feat_dim, hidden_dim, len(offset_bins) - 1,\n num_layers, activation\n )\n offset_bins = torch.tensor(offset_bins).float()\n self.offset_vals = (offset_bins[:-1] + offset_bins[1:]) / 2\n self.max_num_pred = max_num_pred\n self.param_head, self.release_head = None, None\n if num_params > 0:\n self.param_head = MLP(\n embed_dim, hidden_dim, num_params, num_layers, activation\n )\n self.release_head = MLP(\n embed_dim, hidden_dim, 1, num_layers, activation\n )\n\n @classmethod\n def from_config(cls, cfg, contact_decoder):\n args = {}\n args['mask_dim'] = contact_decoder.mask_dim\n args['use_embed'] = cfg.use_embed\n args['embed_dim'] = contact_decoder.embed_dim\n args['max_num_pred'] = cfg.max_num_pred\n args['num_params'] = cfg.num_params\n args['hidden_dim'] = cfg.hidden_dim\n args['num_layers'] = cfg.num_layers\n args['activation'] = cfg.activation\n args['offset_bins'] = cfg.offset_bins\n return cls(**args)\n\n def forward(\n self, xyz, mask_feats, confidence, mask_thresh, embedding, gt_masks=None\n ):\n mask_feats = mask_feats.moveaxis(1, -1) # [B, H, W, mask_dim]\n contacts, conf_all, inputs, num_grasps = [], [], [], []\n total_grasps, num_objs = 0, 0\n for i, (pts, feat, emb, conf) in enumerate(\n zip(xyz, mask_feats, embedding, confidence)\n ):\n mask = conf > mask_thresh\n if gt_masks is not None:\n mask = mask | (gt_masks[i] > 0)\n conf_list, num = [], []\n for e, m, conf in zip(emb, mask, conf):\n f, p, c = feat[m], pts[m], conf[m]\n if self.max_num_pred is not None:\n perm = torch.randperm(f.shape[0])[:self.max_num_pred]\n perm = perm.to(f.device)\n f, p, c = f[perm], p[perm], c[perm]\n if self.use_embed:\n f = torch.cat([\n f, repeat_new_axis(e, f.shape[0], dim=0)\n ], dim=-1)\n contacts.append(p)\n inputs.append(f)\n conf_list.append(c)\n num.append(f.shape[0])\n total_grasps += f.shape[0]\n conf_all.append(conf_list)\n num_grasps.append(num)\n num_objs += conf.shape[0]\n if total_grasps > 0:\n contacts = torch.cat(contacts)\n inputs = torch.cat(inputs)\n else:\n contacts = torch.zeros(0, 3).to(xyz.device)\n inputs = torch.zeros(0, self.feat_dim).to(xyz.device)\n\n if gt_masks is not None:\n gt_inputs, total_gt_grasps = [], 0\n for feat, emb, mask in zip(mask_feats, embedding, gt_masks):\n for e, m in zip(emb, mask):\n f = feat[m > 0]\n if self.use_embed:\n f = torch.cat([\n f, repeat_new_axis(e, f.shape[0], 0)\n ], dim=-1)\n gt_inputs.append(f)\n total_gt_grasps += f.shape[0]\n if total_gt_grasps > 0:\n gt_inputs = torch.cat(gt_inputs)\n else:\n gt_inputs = torch.zeros(0, self.feat_dim).to(xyz.device)\n inputs = torch.cat([inputs, gt_inputs])\n\n contact_dirs = F.normalize(self.contact_dir_head(inputs), dim=-1)\n approach_dirs = self.approach_dir_head(inputs)\n approach_dirs = F.normalize(\n approach_dirs - contact_dirs * (\n approach_dirs * contact_dirs\n ).sum(dim=-1, keepdim=True), dim=-1\n )\n offset_logits = self.offset_head(inputs)\n offsets_one_hot = F.one_hot(\n offset_logits.argmax(dim=-1), self.offset_vals.shape[0]\n )\n offsets = (\n offsets_one_hot.float() @ self.offset_vals.to(inputs.device)\n ).squeeze(-1)\n\n outputs = {}\n if gt_masks is not None:\n contact_dirs, outputs['contact_dirs'] = contact_dirs.split(\n [total_grasps, total_gt_grasps], dim=0\n )\n approach_dirs, outputs['approach_dirs'] = approach_dirs.split(\n [total_grasps, total_gt_grasps], dim=0\n )\n offsets = offsets[:total_grasps]\n outputs['offsets'] = offset_logits[total_grasps:]\n \n grasps = build_6d_grasp(contacts, contact_dirs, approach_dirs, offsets)\n grasps = double_split(grasps, num_grasps)\n contacts = double_split(contacts, num_grasps)\n outputs.update({\n 'grasps': grasps,\n 'grasp_confidence': conf_all,\n 'grasp_contacts': contacts,\n 'num_pred_grasps': torch.tensor(\n total_grasps / max(num_objs, 1), device=inputs.device\n )\n })\n if gt_masks is not None:\n outputs['num_gt_grasps'] = torch.tensor(\n total_gt_grasps / max(num_objs, 1), device=inputs.device\n )\n\n if self.param_head is not None:\n outputs['params'] = self.param_head(embedding)\n outputs['release'] = self.release_head(embedding).squeeze(-1)\n return outputs"
},
{
"identifier": "infer_placements",
"path": "m2t2/action_decoder.py",
"snippet": "def infer_placements(\n xyz, logits, bottom_center, ee_poses, cam_poses, conf_thresh, height\n):\n rot_prompts = torch.stack([torch.from_numpy(\n tra.euler_matrix(0, 0, 2 * np.pi / logits.shape[1] * i)\n )[:3, :3].float() for i in range(logits.shape[1])]).to(xyz.device)\n rot_prompts = repeat_new_axis(rot_prompts, xyz.shape[1], dim=1)\n\n placements, confidence, contact_points = [], [], []\n for i, (pts, bc, ee_pose, logit) in enumerate(zip(\n xyz, bottom_center, ee_poses, logits\n )):\n conf = logit.sigmoid()\n mask = conf > conf_thresh\n num = list(mask.sum(dim=1))\n rot = rot_prompts[mask]\n offsets = (ee_pose[:3, 3] - bc) @ rot.transpose(1, 2)\n if cam_poses is not None:\n pts = pts @ cam_poses[i, :3, :3].T + cam_poses[i, :3, 3]\n contacts = repeat_new_axis(pts, mask.shape[0], dim=0)[mask]\n place = build_6d_place(contacts, rot, offsets, ee_pose)\n place[:, 2, 3] = place[:, 2, 3] + height\n if cam_poses is not None:\n place = cam_poses[i].inverse() @ place\n placements.append(list(place.split(num)))\n confidence.append(list(conf[mask].split(num)))\n contact_points.append(list(contacts.split(num)))\n outputs = {\n 'placements': placements,\n 'placement_confidence': confidence,\n 'placement_contacts': contact_points\n }\n return outputs"
},
{
"identifier": "ContactDecoder",
"path": "m2t2/contact_decoder.py",
"snippet": "class ContactDecoder(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n feedforward_dim: int,\n lang_context_length: int,\n lang_token_dim: int,\n num_grasp_queries: int,\n num_place_queries: int,\n scene_in_features: List[str],\n scene_in_channels: List[int],\n mask_feature: str,\n mask_dim: int,\n place_feature: str,\n place_dim: int,\n num_layers: int,\n num_heads: int,\n use_attn_mask: bool,\n use_task_embed: bool,\n activation: str\n ):\n \"\"\"\n Args:\n activation: activation function for the feedforward network\n embed_dim: transformer feature dimension\n feedforward_dim: hidden dimension of the feedforward network\n lang_context_length: sequence length for language context\n lang_token_dim: dimension of language tokens from pretrained network\n num_layers: number of transformer decoder layers\n num_heads: number of attention heads\n use_attn_mask: mask attention with downsampled instance mask\n predicted by the previous layer\n \"\"\"\n super(ContactDecoder, self).__init__()\n\n self.num_grasp_queries = num_grasp_queries\n self.num_place_queries = num_place_queries\n # learnable grasp query features\n self.query_embed = nn.Embedding(\n num_grasp_queries + num_place_queries, embed_dim\n )\n # learnable query p.e.\n self.query_pos_enc = nn.Embedding(\n num_grasp_queries + num_place_queries + lang_context_length, embed_dim\n )\n self.lang_context_length = lang_context_length\n\n self.place_feature = place_feature\n if place_dim != embed_dim and num_place_queries > 0:\n self.place_embed_proj = nn.Linear(place_dim, embed_dim)\n else:\n self.place_embed_proj = nn.Identity()\n if lang_token_dim != embed_dim:\n self.lang_token_proj = nn.Linear(lang_token_dim, embed_dim)\n else:\n self.lang_token_proj = nn.Identity()\n\n self.scene_in_features = scene_in_features\n self.num_scales = len(scene_in_features)\n # context scale embedding\n self.scale_embed = nn.Embedding(self.num_scales, embed_dim)\n # scene feature projection\n self.scene_feature_proj = nn.ModuleList([\n nn.Conv2d(channel, embed_dim, kernel_size=1)\n if channel != embed_dim else nn.Identity()\n for channel in scene_in_channels\n ])\n # context positional encoding\n self.pe_layer = PositionEncoding3D(embed_dim)\n\n # transformer decoder\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.num_layers = num_layers\n self.cross_attention_layers = nn.ModuleList()\n self.self_attention_layers = nn.ModuleList()\n self.ffn_layers = nn.ModuleList()\n for _ in range(num_layers):\n self.cross_attention_layers.append(\n AttentionLayer(embed_dim, num_heads)\n )\n self.self_attention_layers.append(\n AttentionLayer(embed_dim, num_heads)\n )\n self.ffn_layers.append(\n FFNLayer(embed_dim, feedforward_dim, activation)\n )\n self.use_attn_mask = use_attn_mask\n\n # prediction MLPs\n self.mask_feature = mask_feature\n self.mask_dim = mask_dim\n self.norm = nn.LayerNorm(embed_dim)\n num_tasks = 0\n if num_grasp_queries > 0:\n if num_grasp_queries > 1:\n self.object_head = nn.Linear(embed_dim, 1)\n self.grasp_mask_head = MLP(\n embed_dim, embed_dim, mask_dim,\n num_layers=3, activation=activation\n )\n num_tasks += 1\n if num_place_queries > 0:\n self.place_mask_head = MLP(\n embed_dim, embed_dim, mask_dim,\n num_layers=3, activation=activation\n )\n num_tasks += 1\n self.use_task_embed = use_task_embed\n if use_task_embed:\n # learnable task embedding\n self.task_embed = nn.Embedding(num_tasks, embed_dim)\n\n @classmethod\n def from_config(cls, cfg, scene_channels, obj_channels):\n args = {}\n args[\"mask_feature\"] = cfg.mask_feature\n args[\"embed_dim\"] = cfg.embed_dim\n args[\"feedforward_dim\"] = cfg.feedforward_dim\n args[\"lang_context_length\"] = cfg.language_context_length\n args[\"lang_token_dim\"] = cfg.language_token_dim\n args[\"scene_in_features\"] = cfg.in_features[::-1]\n args[\"scene_in_channels\"] = [\n scene_channels[f] for f in cfg.in_features[::-1]\n ]\n args[\"num_grasp_queries\"] = cfg.num_grasp_queries\n args[\"num_place_queries\"] = cfg.num_place_queries\n args[\"mask_dim\"] = scene_channels[cfg.mask_feature]\n args[\"place_feature\"] = cfg.place_feature\n args[\"place_dim\"] = obj_channels[cfg.place_feature]\n args[\"num_layers\"] = cfg.num_layers\n args[\"num_heads\"] = cfg.num_heads\n args[\"use_attn_mask\"] = cfg.use_attn_mask\n args[\"use_task_embed\"] = cfg.use_task_embed\n args[\"activation\"] = cfg.activation\n return cls(**args)\n\n def predict(self, embed, mask_features):\n grasp_embed, place_embed = embed.split(\n [self.num_grasp_queries, self.num_place_queries]\n )\n pred, embed, attn_mask = {}, {}, []\n if grasp_embed.shape[0] > 0:\n embed['grasp'] = grasp_embed.transpose(0, 1)\n if self.num_grasp_queries > 1:\n pred['objectness'] = self.object_head(\n embed['grasp']\n ).squeeze(-1)\n emb = self.grasp_mask_head(embed['grasp'])\n pred['grasping_masks'] = torch.einsum(\n \"bqc,bcn->bqn\", emb, mask_features\n )\n attn_mask.append(pred['grasping_masks'])\n if place_embed.shape[0] > 0:\n embed['place'] = place_embed.transpose(0, 1)\n emb = self.place_mask_head(embed['place'])\n pred['placement_masks'] = torch.einsum(\n \"bqc,bcn->bqn\", emb, mask_features\n )\n attn_mask.append(pred['placement_masks'])\n attn_mask = torch.cat(attn_mask, dim=1).detach()\n return pred, embed, attn_mask\n\n def construct_context(self, features, feature_keys, feature_proj):\n context = [features['features'][f] for f in feature_keys]\n pos_encs, context_sizes = [], []\n for i, f in enumerate(feature_keys):\n pos_enc = self.pe_layer(features['context_pos'][f])\n context_sizes.append(context[i].shape[-1])\n pos_enc = pos_enc.flatten(start_dim=2).permute(2, 0, 1)\n pos_encs.append(pos_enc)\n context[i] = feature_proj[i](context[i].unsqueeze(-1)).squeeze(-1)\n context[i] = context[i] + self.scale_embed.weight[i].unsqueeze(1)\n # NxCxHW -> HWxNxC\n context[i] = context[i].permute(2, 0, 1)\n return context, pos_encs, context_sizes\n\n def forward(self, scene_features, obj_features, lang_tokens=None):\n \"\"\"\n Args:\n scene_features: a dict containing multi-scale feature maps \n from scene point cloud\n obj_features: a dict containing multi-scale feature maps\n from point cloud of object to be placed\n \"\"\"\n context, pos_encs, context_sizes = self.construct_context(\n scene_features, self.scene_in_features, self.scene_feature_proj\n )\n mask_feat = scene_features['features'][self.mask_feature]\n\n grasp_embed, place_embed = self.query_embed.weight.split(\n [self.num_grasp_queries, self.num_place_queries]\n )\n embed, task_id = [], 0\n if grasp_embed.shape[0] > 0:\n if self.use_task_embed:\n grasp_embed = grasp_embed + self.task_embed.weight[task_id]\n embed.append(repeat_new_axis(\n grasp_embed, mask_feat.shape[0], dim=1\n ))\n task_id += 1\n if place_embed.shape[0] > 0:\n place_prompts = obj_features['features'][self.place_feature]\n place_prompts = place_prompts.max(dim=-1)[0]\n place_prompts = self.place_embed_proj(place_prompts)\n if self.use_task_embed:\n place_embed = place_embed + self.task_embed.weight[task_id]\n embed.append(\n place_embed.unsqueeze(1) + place_prompts.unsqueeze(0)\n )\n if lang_tokens is not None:\n embed.append(self.lang_token_proj(lang_tokens).transpose(0, 1))\n embed = torch.cat(embed)\n query_pos_enc = repeat_new_axis(\n self.query_pos_enc.weight, mask_feat.shape[0], dim=1\n )\n\n # initial prediction with learnable query features only (no context)\n embed = self.norm(embed)\n prediction, _, attn_mask = self.predict(\n embed[:embed.shape[0] - self.lang_context_length], mask_feat\n )\n predictions = [prediction]\n\n for i in range(self.num_layers):\n j = i % self.num_scales\n if self.use_attn_mask:\n attn_mask = compute_attention_mask(\n attn_mask, scene_features['sample_ids'],\n context_sizes[j], self.num_heads\n )\n if lang_tokens is not None:\n attn_mask = torch.cat([\n attn_mask, repeat_new_axis(\n torch.zeros_like(attn_mask[:, 0]),\n lang_tokens.shape[1], dim=1\n )\n ], dim=1)\n else:\n attn_mask = None\n context_feat = context[j]\n key_pos_enc = pos_encs[j]\n embed = self.cross_attention_layers[i](\n embed, context_feat, context_feat + key_pos_enc,\n query_pos_enc, key_pos_enc, attn_mask\n )\n embed = self.self_attention_layers[i](\n embed, embed, embed + query_pos_enc,\n query_pos_enc, query_pos_enc\n )\n embed = self.ffn_layers[i](embed)\n\n prediction, embedding, attn_mask = self.predict(\n embed[:embed.shape[0] - self.lang_context_length], mask_feat\n )\n predictions.append(prediction)\n return embedding, predictions"
},
{
"identifier": "SetCriterion",
"path": "m2t2/criterion.py",
"snippet": "class SetCriterion(nn.Module):\n \"\"\"This class computes the Hungarian matching loss.\n The process consists of two steps:\n 1) compute 1-1 assignments between outputs of the model and ground\n truth targets (usually, there are more outputs than targets)\n 2) supervise each matched prediction with the corresponding target\n \"\"\"\n\n def __init__(\n self, matcher, deep_supervision, recompute_indices, mask_criterion,\n object_weight, not_object_weight, pseudo_ce_weight\n ):\n \"\"\"Create the criterion.\n Parameters:\n matcher: module to compute 1-1 matching between targets and outputs\n sampler: sample a subset of points to compute mask loss\n deep_supervision: whether to supervise intermediate layer outputs\n recompute_indices: recompute matching for each intermediate layer\n object_weight: weight of the objectness classification loss\n not_object_weight: multiplier for the ce loss of unmatched outputs\n instance_weights: weights of the instance mask loss\n contact_weights: weights of the contact mask loss\n pseudo_ce: use cross entropy with pseudo labels from matcher\n \"\"\"\n super(SetCriterion, self).__init__()\n self.matcher = matcher\n self.deep_supervision = deep_supervision\n self.recompute_indices = recompute_indices\n\n self.object_weight = object_weight\n self.not_object_weight = not_object_weight\n self.mask_criterion = mask_criterion\n self.pseudo_ce_weight = pseudo_ce_weight\n if pseudo_ce_weight > 0:\n self.pseudo_ce_loss = nn.CrossEntropyLoss()\n\n @classmethod\n def from_config(cls, cfg, matcher):\n args = {}\n args['deep_supervision'] = cfg.deep_supervision\n args['recompute_indices'] = cfg.recompute_indices\n args['object_weight'] = cfg.object_weight\n args['not_object_weight'] = cfg.not_object_weight\n args['mask_criterion'] = MaskCriterion.from_config(cfg)\n args['pseudo_ce_weight'] = cfg.pseudo_ce_weight\n return cls(matcher, **args)\n\n def get_pseudo_ce_loss(self, pred_masks, gt_masks, matched_idx):\n B, N, H, W = pred_masks.shape\n pseudo_label = torch.zeros(B, H, W).long()\n pseudo_label = pseudo_label.to(pred_masks.device)\n tgt_mask_any = []\n for i, (tgt_mask, idx) in enumerate(zip(gt_masks, matched_idx)):\n obj_id, y, x = torch.where(tgt_mask > 0)\n pseudo_label[i, y, x] = idx[obj_id]\n tgt_mask_any.append(tgt_mask.any(dim=0))\n tgt_mask_any = torch.stack(tgt_mask_any)\n loss = self.pseudo_ce_loss(\n pred_masks.permute(0, 2, 3, 1)[tgt_mask_any],\n pseudo_label[tgt_mask_any]\n )\n return loss\n\n def get_loss(self, pred, data, matched_idx, layer=None):\n obj_label = torch.zeros_like(pred['objectness'])\n for i, idx in enumerate(matched_idx):\n obj_label[i][idx] = 1\n pos_weight = torch.tensor(1 / self.not_object_weight).to(\n pred['objectness'].device\n )\n loss_obj = bce_loss(\n pred['objectness'], obj_label,\n pos_weight=pos_weight, reduction='none'\n ) * self.not_object_weight\n mask = data['task_is_pick'].unsqueeze(1).float()\n loss_obj = (loss_obj * mask).sum() / torch.clamp(mask.sum(), 1)\n losses = {'objectness': (self.object_weight, loss_obj)}\n\n if self.pseudo_ce_weight > 0:\n pseudo_ce = self.get_pseudo_ce_loss(\n pred['grasping_masks'], data['grasping_masks'], matched_idx\n )\n losses['pseudo_ce'] = (self.pseudo_ce_weight, pseudo_ce)\n\n matched_masks = [mask[idx] for mask, idx in zip(\n pred['grasping_masks'], matched_idx\n )]\n outputs = {'matched_grasping_masks': matched_masks}\n mask_loss, stats = self.mask_criterion(\n 'grasping', torch.cat(matched_masks),\n torch.cat(data['grasping_masks'])\n )\n losses.update(mask_loss)\n outputs.update(stats)\n\n if layer is not None:\n losses = {\n f'layer{layer}/{key}': val for key, val in losses.items()\n }\n return losses, outputs\n\n def forward(self, pred, targets):\n outputs = pred[-1]\n\n # Compute matching between final prediction and the targets\n output_idx, cost_matrices = self.matcher(pred[-1], targets)\n outputs.update({\n 'matched_idx': output_idx, 'cost_matrices': cost_matrices\n })\n\n # Compute losses for the final layer outputs\n losses, stats = self.get_loss(pred[-1], targets, output_idx)\n outputs.update(stats)\n\n if self.deep_supervision and self.training:\n # Compute losses for each intermediate layer outputs\n for i, p in enumerate(pred[:-1]):\n if self.recompute_indices:\n output_idx, _ = self.matcher(p, targets)\n l_dict, _ = self.get_loss(p, targets, output_idx, i + 1)\n losses.update(l_dict)\n outputs[f'layer{i+1}/matched_idx'] = output_idx\n\n return losses, outputs"
},
{
"identifier": "GraspCriterion",
"path": "m2t2/criterion.py",
"snippet": "class GraspCriterion(nn.Module):\n def __init__(\n self, adds_criterion, contact_dir_weight, approach_dir_weight,\n offset_weight, param_weight, bin_weights\n ):\n super(GraspCriterion, self).__init__()\n self.adds_criterion = adds_criterion\n self.loss_weights = {\n 'contact_dir': contact_dir_weight,\n 'approach_dir': approach_dir_weight,\n 'offset': offset_weight,\n 'param': param_weight,\n 'release': param_weight\n }\n self.bin_weights = torch.tensor(bin_weights)\n\n @classmethod\n def from_config(cls, cfg):\n args = {}\n args['adds_criterion'] = ADDSCriterion(\n cfg.adds_pred2gt, cfg.adds_gt2pred, cfg.adds_per_obj\n )\n args['contact_dir_weight'] = cfg.contact_dir\n args['approach_dir_weight'] = cfg.approach_dir\n args['offset_weight'] = cfg.offset\n args['param_weight'] = cfg.param\n args['bin_weights'] = cfg.offset_bin_weights\n return cls(**args)\n\n def forward(self, pred, data):\n losses = {}\n losses['contact_dir'] = (1 - (\n pred['contact_dirs'] * data['contact_dirs']\n ).sum(dim=1))\n losses['approach_dir'] = (1 - (\n pred['approach_dirs'] * data['approach_dirs']\n ).sum(dim=1))\n losses['offset'] = cross_entropy(\n pred['offsets'], data['offsets'],\n self.bin_weights.to(pred['offsets'].device), reduction='none'\n )\n if 'params' in data:\n losses['param'] = ((pred['params'] - data['params']) ** 2).mean()\n if 'release' in data:\n losses['release'] = bce_loss(\n pred['release'], data['release'].float()\n ).mean()\n for key in ['contact_dir', 'approach_dir', 'offset']:\n losses[key] = losses[key].sum() / max(losses[key].numel(), 1)\n losses = {\n key: (self.loss_weights[key], losses[key]) for key in losses\n }\n losses.update(self.adds_criterion(\n pred['grasps'], pred['grasp_confidence'],\n data['grasps'], data['inputs'].device\n ))\n return losses"
},
{
"identifier": "PlaceCriterion",
"path": "m2t2/criterion.py",
"snippet": "class PlaceCriterion(nn.Module):\n def __init__(self, mask_criterion, deep_supervision):\n super(PlaceCriterion, self).__init__()\n self.mask_criterion = mask_criterion\n self.deep_supervision = deep_supervision\n\n @classmethod\n def from_config(cls, cfg):\n args = {}\n args['mask_criterion'] = MaskCriterion.from_config(cfg)\n args['deep_supervision'] = cfg.deep_supervision\n return cls(**args)\n\n def forward(self, pred, data):\n pred_masks = pred[-1]['placement_masks'][data['task_is_place']]\n target_masks = data['placement_masks'][data['task_is_place']]\n loss_masks = data['placement_region'][data['task_is_place']]\n loss_masks = repeat_new_axis(\n loss_masks, target_masks.shape[1], dim=1\n ) # (B, H, W) -> (B, Q, H, W)\n loss_masks = loss_masks.flatten(0, 1)\n target_masks = target_masks.flatten(0, 1)\n pred_masks = pred_masks.flatten(0, 1)\n losses, stats = self.mask_criterion(\n 'placement', pred_masks, target_masks, loss_masks\n )\n\n if self.deep_supervision and self.training:\n # Compute losses for each intermediate layer outputs\n for i, p in enumerate(pred[:-1]):\n pred_masks = p['placement_masks'][data['task_is_place']]\n pred_masks = pred_masks.flatten(0, 1)\n mask_losses, _ = self.mask_criterion(\n 'placement', pred_masks, target_masks, loss_masks\n )\n mask_losses = {\n f'layer{i+1}/{key}': val\n for key, val in mask_losses.items()\n }\n losses.update(mask_losses)\n return losses, stats"
},
{
"identifier": "HungarianMatcher",
"path": "m2t2/matcher.py",
"snippet": "class HungarianMatcher(torch.nn.Module):\n \"\"\"This class computes a 1-to-1 assignment between the targets and the\n network's predictions. The targets only include objects, so in general,\n there are more predictions than targets. The un-matched predictions are\n treated as non-objects).\n \"\"\"\n def __init__(self, object_weight, bce_weight, dice_weight):\n super(HungarianMatcher, self).__init__()\n self.object_weight = object_weight\n self.bce_weight = bce_weight\n self.dice_weight = dice_weight\n\n @classmethod\n def from_config(cls, cfg):\n args = {}\n args['object_weight'] = cfg.object_weight\n args['bce_weight'] = cfg.bce_weight\n args['dice_weight'] = cfg.dice_weight\n return cls(**args)\n\n @torch.no_grad()\n def forward(self, outputs, data):\n \"\"\"Performs the matching\n Params:\n outputs: a dict that contains these entries:\n \"objectness\": dim [batch_size, num_queries]\n logits for the objectness score\n \"instance_masks\": dim [batch_size, num_queries, ...]\n predicted object instance masks\n \"contact_masks\": dim [batch_size, num_queries, ...]\n predicted grasp contact masks\n targets: a dict that contains these entries:\n \"instance_masks\": a list of batch_size tensors\n ground truth object instance masks\n \"contact_masks\": a list of batch_size tensors\n ground truth grasp contact masks\n Returns:\n indices: a list of length batch_size, containing indices of the\n predictions that match the best with each target\n \"\"\"\n indices, cost_matrices = [], []\n for i in range(len(outputs['objectness'])):\n # We approximate objectness NLL loss with 1 - prob.\n # The 1 is a constant that can be ommitted.\n cost = self.object_weight * (\n -outputs['objectness'][i:i+1].T.sigmoid()\n ) + self.bce_weight * bce_loss_matrix(\n outputs['grasping_masks'][i], data['grasping_masks'][i]\n ) + self.dice_weight * dice_loss_matrix(\n outputs['grasping_masks'][i], data['grasping_masks'][i]\n )\n output_idx, target_idx = linear_sum_assignment(cost.cpu().numpy())\n output_idx = output_idx[np.argsort(target_idx)]\n indices.append(torch.from_numpy(output_idx).long().to(cost.device))\n cost_matrices.append(cost)\n return indices, cost_matrices"
},
{
"identifier": "PointNet2MSG",
"path": "m2t2/pointnet2.py",
"snippet": "class PointNet2MSG(PointNet2Base):\n def __init__(\n self, num_points, downsample, radius,\n radius_mult, use_rgb=True, norm='BN'\n ):\n super(PointNet2MSG, self).__init__()\n\n self.use_rgb = use_rgb\n c_in = 3 if use_rgb else 0\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_in, 32, 32, 64], [c_in, 32, 32, 64]],\n norm=norm\n )\n )\n c_out_0 = 64 + 64\n radius = radius * radius_mult\n\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_out_0, 64, 64, 128], [c_out_0, 64, 64, 128]],\n norm=norm\n )\n )\n c_out_1 = 128 + 128\n radius = radius * radius_mult\n\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_out_1, 128, 128, 256], [c_out_1, 128, 128, 256]],\n norm=norm\n )\n )\n c_out_2 = 256 + 256\n radius = radius * radius_mult\n\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_out_2, 256, 256, 512], [c_out_2, 256, 256, 512]],\n norm=norm\n )\n )\n c_out_3 = 512 + 512\n\n self.FP_modules.append(\n PointnetFPModule(mlp=[256 + c_in, 128, 128])\n )\n self.FP_modules.append(\n PointnetFPModule(mlp=[512 + c_out_0, 256, 256])\n )\n self.FP_modules.append(\n PointnetFPModule(mlp=[512 + c_out_1, 512, 512])\n )\n self.FP_modules.append(\n PointnetFPModule(mlp=[c_out_3 + c_out_2, 512, 512])\n )\n\n self.out_channels = {\n 'res0': 128, 'res1': 256, 'res2': 512, 'res3': 512, 'res4': 1024\n }\n\n @classmethod\n def from_config(cls, cfg):\n args = {}\n args['num_points'] = cfg.num_points\n args['downsample'] = cfg.downsample\n args['radius'] = cfg.radius\n args['radius_mult'] = cfg.radius_mult\n args['use_rgb'] = cfg.use_rgb\n return cls(**args)"
},
{
"identifier": "PointNet2MSGCls",
"path": "m2t2/pointnet2.py",
"snippet": "class PointNet2MSGCls(PointNet2Base):\n def __init__(\n self, num_points, downsample, radius,\n radius_mult, use_rgb=True, norm='BN'\n ):\n super(PointNet2MSGCls, self).__init__()\n\n self.use_rgb = use_rgb\n c_in = 3 if use_rgb else 0\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_in, 32, 32, 64], [c_in, 32, 32, 64]],\n norm=norm\n )\n )\n c_out_0 = 64 + 64\n radius = radius * radius_mult\n\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_out_0, 64, 64, 128], [c_out_0, 64, 64, 128]],\n norm=norm\n )\n )\n c_out_1 = 128 + 128\n radius = radius * radius_mult\n\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_out_1, 128, 128, 256], [c_out_1, 128, 128, 256]],\n norm=norm\n )\n )\n c_out_2 = 256 + 256\n self.SA_modules.append(\n PointnetSAModule(mlp=[c_out_2, 256, 256, 512], norm=norm)\n )\n\n self.out_channels = {\n 'res0': c_in, 'res1': 128, 'res2': 256, 'res3': 512, 'res4': 512\n }\n\n @classmethod\n def from_config(cls, cfg):\n args = {}\n args['num_points'] = cfg.num_points\n args['downsample'] = cfg.downsample\n args['radius'] = cfg.radius\n args['radius_mult'] = cfg.radius_mult\n args['use_rgb'] = cfg.use_rgb\n return cls(**args)"
}
] | import torch
import torch.nn as nn
from m2t2.action_decoder import ActionDecoder, infer_placements
from m2t2.contact_decoder import ContactDecoder
from m2t2.criterion import SetCriterion, GraspCriterion, PlaceCriterion
from m2t2.matcher import HungarianMatcher
from m2t2.pointnet2 import PointNet2MSG, PointNet2MSGCls | 9,412 | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Author: Wentao Yuan
'''
Top-level M2T2 network.
'''
class M2T2(nn.Module):
def __init__(
self,
backbone: nn.Module,
transformer: nn.Module,
object_encoder: nn.Module = None,
grasp_mlp: nn.Module = None,
set_criterion: nn.Module = None,
grasp_criterion: nn.Module = None,
place_criterion: nn.Module = None
):
super(M2T2, self).__init__()
self.backbone = backbone
self.object_encoder = object_encoder
self.transformer = transformer
self.grasp_mlp = grasp_mlp
self.set_criterion = set_criterion
self.grasp_criterion = grasp_criterion
self.place_criterion = place_criterion
@classmethod
def from_config(cls, cfg):
args = {}
args['backbone'] = PointNet2MSG.from_config(cfg.scene_encoder)
channels = args['backbone'].out_channels
obj_channels = None
if cfg.contact_decoder.num_place_queries > 0:
args['object_encoder'] = PointNet2MSGCls.from_config(
cfg.object_encoder
)
obj_channels = args['object_encoder'].out_channels
| # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Author: Wentao Yuan
'''
Top-level M2T2 network.
'''
class M2T2(nn.Module):
def __init__(
self,
backbone: nn.Module,
transformer: nn.Module,
object_encoder: nn.Module = None,
grasp_mlp: nn.Module = None,
set_criterion: nn.Module = None,
grasp_criterion: nn.Module = None,
place_criterion: nn.Module = None
):
super(M2T2, self).__init__()
self.backbone = backbone
self.object_encoder = object_encoder
self.transformer = transformer
self.grasp_mlp = grasp_mlp
self.set_criterion = set_criterion
self.grasp_criterion = grasp_criterion
self.place_criterion = place_criterion
@classmethod
def from_config(cls, cfg):
args = {}
args['backbone'] = PointNet2MSG.from_config(cfg.scene_encoder)
channels = args['backbone'].out_channels
obj_channels = None
if cfg.contact_decoder.num_place_queries > 0:
args['object_encoder'] = PointNet2MSGCls.from_config(
cfg.object_encoder
)
obj_channels = args['object_encoder'].out_channels | args['place_criterion'] = PlaceCriterion.from_config( | 5 | 2023-11-03 22:32:05+00:00 | 12k |
Codra-Ingenierie-Informatique/DataLab | cdl/core/gui/processor/base.py | [
{
"identifier": "env",
"path": "cdl/env.py",
"snippet": "DEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\", \"true\")\n QUIET = \"quiet\"\n NORMAL = \"normal\"\n DEBUG = \"debug\"\n UNATTENDED_ARG = \"unattended\"\n VERBOSE_ARG = \"verbose\"\n SCREENSHOT_ARG = \"screenshot\"\n DELAY_ARG = \"delay\"\n XMLRPCPORT_ARG = \"xmlrpcport\"\n DONOTQUIT_ENV = \"CDL_DO_NOT_QUIT\"\n UNATTENDED_ENV = GuiDataExecEnv.UNATTENDED_ENV\n VERBOSE_ENV = GuiDataExecEnv.VERBOSE_ENV\n SCREENSHOT_ENV = GuiDataExecEnv.SCREENSHOT_ENV\n DELAY_ENV = GuiDataExecEnv.DELAY_ENV\n XMLRPCPORT_ENV = \"CDL_XMLRPCPORT\"\n CATCHER_TEST_ENV = \"CDL_CATCHER_TEST\"\nclass VerbosityLevels(enum.Enum):\nclass CDLExecEnv:\n def __init__(self):\n def to_dict(self):\n def __str__(self):\n def enable_demo_mode(self, delay: int):\n def __get_mode(env):\n def __set_mode(env, value):\n def do_not_quit(self):\n def do_not_quit(self, value):\n def unattended(self):\n def unattended(self, value):\n def catcher_test(self):\n def catcher_test(self, value):\n def screenshot(self):\n def screenshot(self, value):\n def verbose(self):\n def verbose(self, value):\n def delay(self):\n def delay(self, value: int):\n def xmlrpcport(self):\n def xmlrpcport(self, value: int):\n def parse_args(self):\n def set_env_from_args(self, args):\n def log(self, source: Any, *objects: Any) -> None:\n def print(self, *objects, sep=\" \", end=\"\\n\", file=sys.stdout, flush=False):\n def pprint(\n self,\n obj,\n stream=None,\n indent=1,\n width=80,\n depth=None,\n compact=False,\n sort_dicts=True,\n ):"
},
{
"identifier": "is_complex_dtype",
"path": "cdl/algorithms/datatypes.py",
"snippet": "def is_complex_dtype(dtype: np.dtype) -> bool:\n \"\"\"Return True if data type is a complex type\n\n Args:\n dtype: Data type to check\n\n Returns:\n True if data type is a complex type\n \"\"\"\n return issubclass(np.dtype(dtype).type, complex)"
},
{
"identifier": "is_integer_dtype",
"path": "cdl/algorithms/datatypes.py",
"snippet": "def is_integer_dtype(dtype: np.dtype) -> bool:\n \"\"\"Return True if data type is an integer type\n\n Args:\n dtype: Data type to check\n\n Returns:\n True if data type is an integer type\n \"\"\"\n return issubclass(np.dtype(dtype).type, np.integer)"
},
{
"identifier": "Conf",
"path": "cdl/config.py",
"snippet": "CONF_VERSION = \"1.0.0\"\nAPP_NAME = \"DataLab\"\nMOD_NAME = \"cdl\"\nAPP_DESC = _(\"\"\"DataLab is a generic signal and image processing platform\"\"\")\nAPP_PATH = osp.dirname(__file__)\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\", \"true\")\nTEST_SEGFAULT_ERROR = len(os.environ.get(\"TEST_SEGFAULT_ERROR\", \"\")) > 0\nDATETIME_FORMAT = \"%d/%m/%Y - %H:%M:%S\"\nDATAPATH = configtools.get_module_data_path(MOD_NAME, \"data\")\nSHOTPATH = osp.join(\n configtools.get_module_data_path(MOD_NAME), os.pardir, \"doc\", \"images\", \"shots\"\n)\nOTHER_PLUGINS_PATHLIST = [configtools.get_module_data_path(MOD_NAME, \"plugins\")]\nIS_FROZEN = is_frozen(MOD_NAME)\nPLOTPY_DEFAULTS = {\n \"plot\": {\n # \"antialiasing\": False,\n # \"title/font/size\": 12,\n # \"title/font/bold\": False,\n # \"marker/curve/text/font/size\": 8,\n # \"marker/curve/text/font/family\": \"default\",\n # \"marker/curve/text/font/bold\": False,\n # \"marker/curve/text/font/italic\": False,\n \"marker/curve/text/textcolor\": \"black\",\n # \"marker/curve/text/background_color\": \"#ffffff\",\n # \"marker/curve/text/background_alpha\": 0.8,\n # \"marker/cross/text/font/family\": \"default\",\n # \"marker/cross/text/font/size\": 8,\n # \"marker/cross/text/font/bold\": False,\n # \"marker/cross/text/font/italic\": False,\n \"marker/cross/text/textcolor\": \"black\",\n # \"marker/cross/text/background_color\": \"#ffffff\",\n \"marker/cross/text/background_alpha\": 0.7,\n # \"marker/cross/line/style\": \"DashLine\",\n # \"marker/cross/line/color\": \"yellow\",\n # \"marker/cross/line/width\": 1,\n # \"marker/cursor/text/font/size\": 8,\n # \"marker/cursor/text/font/family\": \"default\",\n # \"marker/cursor/text/font/bold\": False,\n # \"marker/cursor/text/font/italic\": False,\n # \"marker/cursor/text/textcolor\": \"#ff9393\",\n # \"marker/cursor/text/background_color\": \"#ffffff\",\n # \"marker/cursor/text/background_alpha\": 0.8,\n \"shape/drag/symbol/marker\": \"NoSymbol\",\n \"shape/mask/symbol/size\": 5,\n \"shape/mask/sel_symbol/size\": 8,\n # -----------------------------------------------------------------------------\n # Annotated shape style for annotations:\n \"shape/annotation/line/style\": \"SolidLine\",\n \"shape/annotation/line/color\": \"#ffff00\",\n \"shape/annotation/line/width\": 1,\n \"shape/annotation/fill/style\": \"SolidPattern\",\n \"shape/annotation/fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/fill/alpha\": 0.1,\n \"shape/annotation/symbol/marker\": \"Rect\",\n \"shape/annotation/symbol/size\": 3,\n \"shape/annotation/symbol/edgecolor\": \"#ffff00\",\n \"shape/annotation/symbol/facecolor\": \"#ffff00\",\n \"shape/annotation/symbol/alpha\": 1.0,\n \"shape/annotation/sel_line/style\": \"SolidLine\",\n \"shape/annotation/sel_line/color\": \"#00ff00\",\n \"shape/annotation/sel_line/width\": 1,\n \"shape/annotation/sel_fill/style\": \"SolidPattern\",\n \"shape/annotation/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/sel_fill/alpha\": 0.1,\n \"shape/annotation/sel_symbol/marker\": \"Rect\",\n \"shape/annotation/sel_symbol/size\": 9,\n \"shape/annotation/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/annotation/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/annotation/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / signals:\n \"shape/result/s/line/style\": \"SolidLine\",\n \"shape/result/s/line/color\": MAIN_FG_COLOR,\n \"shape/result/s/line/width\": 1,\n \"shape/result/s/fill/style\": \"SolidPattern\",\n \"shape/result/s/fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/fill/alpha\": 0.1,\n \"shape/result/s/symbol/marker\": \"XCross\",\n \"shape/result/s/symbol/size\": 7,\n \"shape/result/s/symbol/edgecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/facecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/alpha\": 1.0,\n \"shape/result/s/sel_line/style\": \"SolidLine\",\n \"shape/result/s/sel_line/color\": \"#00ff00\",\n \"shape/result/s/sel_line/width\": 1,\n \"shape/result/s/sel_fill/style\": \"SolidPattern\",\n \"shape/result/s/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/sel_fill/alpha\": 0.1,\n \"shape/result/s/sel_symbol/marker\": \"Rect\",\n \"shape/result/s/sel_symbol/size\": 9,\n \"shape/result/s/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/s/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/s/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / images:\n \"shape/result/i/line/style\": \"SolidLine\",\n \"shape/result/i/line/color\": \"#ffff00\",\n \"shape/result/i/line/width\": 1,\n \"shape/result/i/fill/style\": \"SolidPattern\",\n \"shape/result/i/fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/fill/alpha\": 0.1,\n \"shape/result/i/symbol/marker\": \"Rect\",\n \"shape/result/i/symbol/size\": 3,\n \"shape/result/i/symbol/edgecolor\": \"#ffff00\",\n \"shape/result/i/symbol/facecolor\": \"#ffff00\",\n \"shape/result/i/symbol/alpha\": 1.0,\n \"shape/result/i/sel_line/style\": \"SolidLine\",\n \"shape/result/i/sel_line/color\": \"#00ff00\",\n \"shape/result/i/sel_line/width\": 1,\n \"shape/result/i/sel_fill/style\": \"SolidPattern\",\n \"shape/result/i/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/sel_fill/alpha\": 0.1,\n \"shape/result/i/sel_symbol/marker\": \"Rect\",\n \"shape/result/i/sel_symbol/size\": 9,\n \"shape/result/i/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/i/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/i/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n },\n}\ndef is_frozen(module_name: str) -> bool:\ndef get_mod_source_dir() -> str | None:\n def get_def_dict(cls, category: str) -> dict:\n def set_def_dict(cls, category: str, def_dict: dict) -> None:\ndef get_old_log_fname(fname):\ndef initialize():\ndef reset():\nclass MainSection(conf.Section, metaclass=conf.SectionMeta):\nclass ConsoleSection(conf.Section, metaclass=conf.SectionMeta):\nclass IOSection(conf.Section, metaclass=conf.SectionMeta):\nclass ProcSection(conf.Section, metaclass=conf.SectionMeta):\nclass ViewSection(conf.Section, metaclass=conf.SectionMeta):\nclass Conf(conf.Configuration, metaclass=conf.ConfMeta):"
},
{
"identifier": "ROIDataParam",
"path": "cdl/core/computation/base.py",
"snippet": "class ROIDataParam(gds.DataSet):\n \"\"\"ROI Editor data\"\"\"\n\n roidata = gds.FloatArrayItem(_(\"ROI data\"))\n singleobj = gds.BoolItem(_(\"Single object\"))\n modified = gds.BoolItem(_(\"Modified\")).set_prop(\"display\", hide=True)\n\n # pylint: disable=arguments-differ\n @classmethod\n def create(cls, roidata: np.ndarray | None = None, singleobj: bool | None = None):\n \"\"\"Create ROIDataParam instance\"\"\"\n if roidata is not None:\n roidata = np.array(roidata, dtype=int)\n instance = cls()\n instance.roidata = roidata\n instance.singleobj = singleobj\n return instance\n\n @property\n def is_empty(self) -> bool:\n \"\"\"Return True if there is no ROI\"\"\"\n return self.roidata is None or self.roidata.size == 0"
},
{
"identifier": "CompOut",
"path": "cdl/core/gui/processor/catcher.py",
"snippet": "class CompOut:\n \"\"\"Class for representing computation output\n Attributes:\n result (SignalObj | ImageObj | np.ndarray | None): computation result\n error_msg (str | None): error message\n warning_msg (str | None): warning message\n \"\"\"\n\n result: SignalObj | ImageObj | np.ndarray | None = None\n error_msg: str | None = None\n warning_msg: str | None = None"
},
{
"identifier": "wng_err_func",
"path": "cdl/core/gui/processor/catcher.py",
"snippet": "def wng_err_func(func: Callable, args: tuple[Any]) -> CompOut:\n \"\"\"Wrapper function to catch errors and warnings during computation\n Args:\n func (Callable): function to call\n args (tuple[Any]): function arguments\n Returns:\n CompOut: computation output\n \"\"\"\n with warnings.catch_warnings(record=True) as wngs:\n try:\n result = func(*args)\n if wngs:\n wng = wngs[-1]\n warning_msg = warnings.formatwarning(\n message=wng.message,\n category=wng.category,\n filename=wng.filename,\n lineno=wng.lineno,\n line=wng.line,\n )\n return CompOut(result=result, warning_msg=warning_msg)\n return CompOut(result=result)\n except Exception: # pylint: disable=broad-except\n if execenv.unattended and not execenv.catcher_test:\n # In unattended mode (test cases), we want to raise the exception\n # because test cases are supposed to work without any error. In real\n # life, we want to avoid raising the exception because it would stop\n # the application, and exceptions could be related to non-critical\n # errors due to external libraries.\n # When testing the catcher, on the other hand, we don't want to\n # raise the exception because it would stop the unattended test\n # execution.\n raise\n return CompOut(error_msg=traceback.format_exc())"
},
{
"identifier": "ResultShape",
"path": "cdl/core/model/base.py",
"snippet": "class ResultShape:\n \"\"\"Object representing a geometrical shape serializable in signal/image metadata.\n\n Result `array` is a NumPy 2-D array: each row is a result, optionnally associated\n to a ROI (first column value).\n\n ROI index is starting at 0 (or is simply 0 if there is no ROI).\n\n Args:\n shapetype: shape type\n array: shape coordinates (multiple shapes: one shape per row),\n first column is ROI index (0 if there is no ROI)\n label: shape label\n\n Raises:\n AssertionError: invalid argument\n \"\"\"\n\n def __init__(self, shapetype: ShapeTypes, array: np.ndarray, label: str = \"\"):\n assert isinstance(label, str)\n assert isinstance(shapetype, ShapeTypes)\n self.label = self.show_label = label\n self.shapetype = shapetype\n if isinstance(array, (list, tuple)):\n if isinstance(array[0], (list, tuple)):\n array = np.array(array)\n else:\n array = np.array([array])\n assert isinstance(array, np.ndarray)\n self.array = array\n if label.endswith(\"s\"):\n self.show_label = label[:-1]\n self.check_array()\n\n @classmethod\n def label_shapetype_from_key(cls, key: str):\n \"\"\"Return metadata shape label and shapetype from metadata key\"\"\"\n for member in ShapeTypes:\n if key.startswith(member.value):\n label = key[len(member.value) :]\n return label, member\n raise ValueError(f\"Invalid metadata key `{key}`\")\n\n @classmethod\n def from_metadata_entry(cls, key, value) -> ResultShape | None:\n \"\"\"Create metadata shape object from (key, value) metadata entry\"\"\"\n if isinstance(key, str) and isinstance(value, np.ndarray):\n try:\n label, shapetype = cls.label_shapetype_from_key(key)\n return cls(shapetype, value, label)\n except ValueError:\n pass\n return None\n\n @classmethod\n def match(cls, key, value) -> bool:\n \"\"\"Return True if metadata dict entry (key, value) is a metadata result\"\"\"\n return cls.from_metadata_entry(key, value) is not None\n\n @property\n def key(self) -> str:\n \"\"\"Return metadata key associated to result\"\"\"\n return self.shapetype.value + self.label\n\n @property\n def xlabels(self) -> tuple[str]:\n \"\"\"Return labels for result array columns\"\"\"\n if self.shapetype in (ShapeTypes.MARKER, ShapeTypes.POINT):\n labels = \"ROI\", \"x\", \"y\"\n elif self.shapetype in (\n ShapeTypes.RECTANGLE,\n ShapeTypes.CIRCLE,\n ShapeTypes.SEGMENT,\n ShapeTypes.ELLIPSE,\n ShapeTypes.POLYGON,\n ):\n labels = [\"ROI\"]\n for index in range(0, self.array.shape[1] - 1, 2):\n labels += [f\"x{index//2}\", f\"y{index//2}\"]\n labels = tuple(labels)\n else:\n raise NotImplementedError(f\"Unsupported shapetype {self.shapetype}\")\n return labels[-self.array.shape[1] :]\n\n def add_to(self, obj: BaseObj):\n \"\"\"Add metadata shape to object (signal/image)\"\"\"\n obj.metadata[self.key] = self.array\n if self.shapetype in (\n ShapeTypes.SEGMENT,\n ShapeTypes.CIRCLE,\n ShapeTypes.ELLIPSE,\n ):\n # Automatically adds segment norm / circle diameter to object metadata\n colnb = 2\n if self.shapetype is ShapeTypes.ELLIPSE:\n colnb += 1\n arr = self.array\n results = np.zeros((arr.shape[0], colnb), dtype=arr.dtype)\n results[:, 0] = arr[:, 0] # ROI indexes\n dx1, dy1 = arr[:, 3] - arr[:, 1], arr[:, 4] - arr[:, 2]\n results[:, 1] = np.linalg.norm(np.vstack([dx1, dy1]).T, axis=1)\n if self.shapetype is ShapeTypes.ELLIPSE:\n dx2, dy2 = arr[:, 7] - arr[:, 5], arr[:, 8] - arr[:, 6]\n results[:, 2] = np.linalg.norm(np.vstack([dx2, dy2]).T, axis=1)\n label = self.label\n if self.shapetype is ShapeTypes.CIRCLE:\n label += \"Diameter\"\n if self.shapetype is ShapeTypes.ELLIPSE:\n label += \"Diameters\"\n obj.metadata[label] = results\n\n def merge_with(self, obj: BaseObj, other_obj: BaseObj | None = None):\n \"\"\"Merge object resultshape with another's: obj <-- other_obj\n or simply merge this resultshape with obj if other_obj is None\"\"\"\n if other_obj is None:\n other_obj = obj\n other_value = other_obj.metadata.get(self.key)\n if other_value is not None:\n other = ResultShape.from_metadata_entry(self.key, other_value)\n assert other is not None\n other_array = np.array(other.array, copy=True)\n if other_array.shape[1] % 2: # Column 0 is the ROI index\n other_array[:, 0] += self.array[-1, 0] + 1 # Adding ROI index offset\n if other_array.shape[1] != self.array.shape[1]:\n # This can only happen if the shape is a polygon\n assert self.shapetype is ShapeTypes.POLYGON\n # We must padd the array with NaNs\n max_colnb = max(self.array.shape[1], other_array.shape[1])\n new_array = np.full(\n (self.array.shape[0] + other_array.shape[0], max_colnb), np.nan\n )\n new_array[: self.array.shape[0], : self.array.shape[1]] = self.array\n new_array[self.array.shape[0] :, : other_array.shape[1]] = other_array\n self.array = new_array\n else:\n self.array = np.vstack([self.array, other_array])\n self.add_to(obj)\n\n @property\n def data_colnb(self):\n \"\"\"Return raw data results column number\"\"\"\n if self.shapetype == ShapeTypes.POLYGON:\n raise ValueError(\"Polygon has an undefined number of data columns\")\n return {\n ShapeTypes.MARKER: 2,\n ShapeTypes.POINT: 2,\n ShapeTypes.RECTANGLE: 4,\n ShapeTypes.CIRCLE: 4,\n ShapeTypes.SEGMENT: 4,\n ShapeTypes.ELLIPSE: 8,\n }[self.shapetype]\n\n @property\n def data(self):\n \"\"\"Return raw data (array without ROI informations)\"\"\"\n if self.array.shape[1] % 2:\n # Column 0 is the ROI index\n return self.array[:, 1:]\n # No ROI index\n return self.array\n\n def check_array(self):\n \"\"\"Check if array is valid\"\"\"\n assert len(self.array.shape) == 2\n if self.shapetype is ShapeTypes.POLYGON:\n # Polygon is a special case: the number of data columns is variable\n # (2 columns per point). So we only check if the number of columns\n # is odd, which means that the first column is the ROI index, followed\n # by an even number of data columns (flattened x, y coordinates).\n assert self.array.shape[1] % 2 == 1\n else:\n assert self.array.shape[1] == self.data_colnb + 1\n\n def iterate_plot_items(self, fmt: str, lbl: bool, option: str) -> Iterable:\n \"\"\"Iterate over metadata shape plot items.\n\n Args:\n fmt (str): numeric format (e.g. \"%.3f\")\n lbl (bool): if True, show shape labels\n option (str): shape style option (e.g. \"shape/drag\")\n\n Yields:\n PlotItem: plot item\n \"\"\"\n for args in self.data:\n yield self.create_plot_item(args, fmt, lbl, option)\n\n def create_plot_item(self, args: np.ndarray, fmt: str, lbl: bool, option: str):\n \"\"\"Make plot item.\n\n Args:\n args (numpy.ndarray): shape data\n fmt (str): numeric format (e.g. \"%.3f\")\n lbl (bool): if True, show shape labels\n option (str): shape style option (e.g. \"shape/drag\")\n\n Returns:\n PlotItem: plot item\n \"\"\"\n if self.shapetype is ShapeTypes.MARKER:\n item = self.make_marker_item(args, fmt)\n elif self.shapetype is ShapeTypes.POINT:\n item = AnnotatedPoint(*args)\n sparam = item.shape.shapeparam\n sparam.symbol.marker = \"Ellipse\"\n sparam.symbol.size = 6\n sparam.sel_symbol.marker = \"Ellipse\"\n sparam.sel_symbol.size = 6\n sparam.update_shape(item.shape)\n param = item.annotationparam\n param.title = self.show_label\n param.update_annotation(item)\n elif self.shapetype is ShapeTypes.RECTANGLE:\n x0, y0, x1, y1 = args\n item = make.annotated_rectangle(x0, y0, x1, y1, title=self.show_label)\n elif self.shapetype is ShapeTypes.CIRCLE:\n x0, y0, x1, y1 = args\n item = make.annotated_circle(x0, y0, x1, y1, title=self.show_label)\n elif self.shapetype is ShapeTypes.SEGMENT:\n x0, y0, x1, y1 = args\n item = make.annotated_segment(x0, y0, x1, y1, title=self.show_label)\n elif self.shapetype is ShapeTypes.ELLIPSE:\n x0, y0, x1, y1, x2, y2, x3, y3 = args\n item = make.annotated_ellipse(\n x0, y0, x1, y1, x2, y2, x3, y3, title=self.show_label\n )\n elif self.shapetype is ShapeTypes.POLYGON:\n x, y = args[::2], args[1::2]\n item = make.polygon(x, y, title=self.show_label, closed=False)\n else:\n print(f\"Warning: unsupported item {self.shapetype}\", file=sys.stderr)\n return None\n if isinstance(item, AnnotatedShape):\n config_annotated_shape(item, fmt, lbl, option)\n set_plot_item_editable(item, False)\n return item\n\n def make_marker_item(self, args, fmt):\n \"\"\"Make marker item\"\"\"\n x0, y0 = args\n if np.isnan(x0):\n mstyle = \"-\"\n\n def label(x, y): # pylint: disable=unused-argument\n return (self.show_label + \": \" + fmt) % y\n\n elif np.isnan(y0):\n mstyle = \"|\"\n\n def label(x, y): # pylint: disable=unused-argument\n return (self.show_label + \": \" + fmt) % x\n\n else:\n mstyle = \"+\"\n txt = self.show_label + \": (\" + fmt + \", \" + fmt + \")\"\n\n def label(x, y):\n return txt % (x, y)\n\n return make.marker(\n position=(x0, y0),\n markerstyle=mstyle,\n label_cb=label,\n linestyle=\"DashLine\",\n color=\"yellow\",\n )"
},
{
"identifier": "ShapeTypes",
"path": "cdl/core/model/base.py",
"snippet": "class ShapeTypes(enum.Enum):\n \"\"\"Shape types for image metadata\"\"\"\n\n # Reimplement enum.Enum method as suggested by Python documentation:\n # https://docs.python.org/3/library/enum.html#enum.Enum._generate_next_value_\n # Here, it is only needed for ImageDatatypes (see core/model/image.py).\n # pylint: disable=unused-argument,no-self-argument,no-member\n def _generate_next_value_(name, start, count, last_values):\n return f\"_{name.lower()[:3]}_\"\n\n RECTANGLE = enum.auto()\n CIRCLE = enum.auto()\n ELLIPSE = enum.auto()\n SEGMENT = enum.auto()\n MARKER = enum.auto()\n POINT = enum.auto()\n POLYGON = enum.auto()"
},
{
"identifier": "create_progress_bar",
"path": "cdl/utils/qthelpers.py",
"snippet": "@contextmanager\ndef create_progress_bar(\n parent: QW.QWidget, label: str, max_: int\n) -> Generator[QW.QProgressDialog, None, None]:\n \"\"\"Create modal progress bar\"\"\"\n prog = QW.QProgressDialog(label, _(\"Cancel\"), 0, max_, parent, QC.Qt.SplashScreen)\n prog.setWindowModality(QC.Qt.WindowModal)\n prog.show()\n try:\n yield prog\n finally:\n prog.close()\n prog.deleteLater()"
},
{
"identifier": "qt_try_except",
"path": "cdl/utils/qthelpers.py",
"snippet": "def qt_try_except(message=None, context=None):\n \"\"\"Try...except Qt widget method decorator\"\"\"\n\n def qt_try_except_decorator(func):\n \"\"\"Try...except Qt widget method decorator\"\"\"\n\n @functools.wraps(func)\n def method_wrapper(*args, **kwargs):\n \"\"\"Decorator wrapper function\"\"\"\n self = args[0] # extracting 'self' from method arguments\n # If \"self\" is a BaseProcessor, then we need to get the panel instance\n panel = getattr(self, \"panel\", self)\n if message is not None:\n panel.SIG_STATUS_MESSAGE.emit(message)\n QW.QApplication.setOverrideCursor(QG.QCursor(QC.Qt.WaitCursor))\n panel.repaint()\n output = None\n try:\n output = func(*args, **kwargs)\n except Exception as msg: # pylint: disable=broad-except\n qt_handle_error_message(panel.parent(), msg, context)\n finally:\n panel.SIG_STATUS_MESSAGE.emit(\"\")\n QW.QApplication.restoreOverrideCursor()\n return output\n\n return method_wrapper\n\n return qt_try_except_decorator"
},
{
"identifier": "show_warning_error",
"path": "cdl/widgets/warningerror.py",
"snippet": "def show_warning_error(\n parent: QW.QWidget,\n category: str,\n context: str = None,\n message: str = None,\n tip: str = None,\n) -> None:\n \"\"\"Show error message\n\n Args:\n parent (QW.QWidget): parent widget\n category (str): message category (\"error\" or \"warning\")\n context (str | None): context. Defaults to None.\n message (str | None): message. Defaults to None.\n tip (str | None): tip. Defaults to None.\n \"\"\"\n if category == \"warning\" and Conf.proc.ignore_warnings.get():\n return\n dlg = WarningErrorMessageBox(parent, category, context, message, tip)\n exec_dialog(dlg)"
}
] | import abc
import multiprocessing
import time
import warnings
import guidata.dataset as gds
import numpy as np
from collections.abc import Callable
from multiprocessing import Pool
from typing import TYPE_CHECKING, Any, Union
from guidata.configtools import get_icon
from guidata.dataset import update_dataset
from guidata.qthelpers import exec_dialog
from guidata.widgets.arrayeditor import ArrayEditor
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from cdl import env
from cdl.algorithms.datatypes import is_complex_dtype, is_integer_dtype
from cdl.config import Conf, _
from cdl.core.computation.base import ROIDataParam
from cdl.core.gui.processor.catcher import CompOut, wng_err_func
from cdl.core.model.base import ResultShape, ShapeTypes
from cdl.utils.qthelpers import create_progress_bar, qt_try_except
from cdl.widgets.warningerror import show_warning_error
from multiprocessing.pool import AsyncResult
from plotpy.plot import PlotWidget
from cdl.core.computation.base import (
ClipParam,
GaussianParam,
MovingAverageParam,
MovingMedianParam,
ThresholdParam,
)
from cdl.core.gui.panel.image import ImagePanel
from cdl.core.gui.panel.signal import SignalPanel
from cdl.core.model.image import ImageObj
from cdl.core.model.signal import SignalObj | 8,137 |
if TYPE_CHECKING: # pragma: no cover
Obj = Union[SignalObj, ImageObj]
# Enable multiprocessing support for Windows, with frozen executable (e.g. PyInstaller)
multiprocessing.freeze_support()
COMPUTATION_TIP = _(
"DataLab relies on various libraries to perform the computation. During the "
"computation, errors may occur because of the data (e.g. division by zero, "
"unexpected data type, etc.) or because of the libraries (e.g. memory error, "
"etc.). If you encounter an error, before reporting it, please ensure that "
"the computation is correct, by checking the data and the parameters."
)
POOL: Pool = None
class Worker:
"""Multiprocessing worker, to run long-running tasks in a separate process"""
def __init__(self) -> None:
self.asyncresult: AsyncResult = None
self.result: Any = None
@staticmethod
def create_pool() -> None:
"""Create multiprocessing pool"""
global POOL # pylint: disable=global-statement
# Create a pool with one process
POOL = Pool(processes=1) # pylint: disable=not-callable,consider-using-with
@staticmethod
def terminate_pool(wait: bool = False) -> None:
"""Terminate multiprocessing pool.
Args:
wait (bool | None): wait for all tasks to finish. Defaults to False.
"""
global POOL # pylint: disable=global-statement
if POOL is not None:
if wait:
# Close the pool properly (wait for all tasks to finish)
POOL.close()
else:
# Terminate the pool and stop the timer
POOL.terminate()
POOL.join()
POOL = None
def restart_pool(self) -> None:
"""Terminate and recreate the pool"""
# Terminate the process and stop the timer
self.terminate_pool(wait=False)
# Recreate the pool for the next computation
self.create_pool()
def run(self, func: Callable, args: tuple[Any]) -> None:
"""Run computation.
Args:
func (Callable): function to run
args (tuple[Any]): arguments
"""
global POOL # pylint: disable=global-statement,global-variable-not-assigned
assert POOL is not None
self.asyncresult = POOL.apply_async(wng_err_func, (func, args))
def close(self) -> None:
"""Close worker: close pool properly and wait for all tasks to finish"""
# Close multiprocessing Pool properly, but only if no computation is running,
# to avoid blocking the GUI at exit (so, when wait=True, we wait for the
# task to finish before closing the pool but there is actually no task running,
# so the pool is closed immediately but *properly*)
self.terminate_pool(wait=self.asyncresult is None)
def is_computation_finished(self) -> bool:
"""Return True if computation is finished.
Returns:
bool: True if computation is finished
"""
return self.asyncresult.ready()
def get_result(self) -> CompOut:
"""Return computation result.
Returns:
CompOut: computation result
"""
self.result = self.asyncresult.get()
self.asyncresult = None
return self.result
class BaseProcessor(QC.QObject):
"""Object handling data processing: operations, processing, computing.
Args:
panel (SignalPanel | ImagePanel): panel
plotwidget (CurveWidget | ImageWidget): plot widget
"""
SIG_ADD_SHAPE = QC.Signal(str)
EDIT_ROI_PARAMS = False
PARAM_DEFAULTS: dict[str, gds.DataSet] = {}
def __init__(self, panel: SignalPanel | ImagePanel, plotwidget: PlotWidget):
super().__init__()
self.panel = panel
self.plotwidget = plotwidget
self.worker: Worker | None = None
| # -*- coding: utf-8 -*-
#
# Licensed under the terms of the BSD 3-Clause
# (see cdl/LICENSE for details)
"""
DataLab Base Processor GUI module
---------------------------------
This module defines the base class for data processing GUIs.
"""
# pylint: disable=invalid-name # Allows short reference names like x, y, ...
from __future__ import annotations
if TYPE_CHECKING: # pragma: no cover
Obj = Union[SignalObj, ImageObj]
# Enable multiprocessing support for Windows, with frozen executable (e.g. PyInstaller)
multiprocessing.freeze_support()
COMPUTATION_TIP = _(
"DataLab relies on various libraries to perform the computation. During the "
"computation, errors may occur because of the data (e.g. division by zero, "
"unexpected data type, etc.) or because of the libraries (e.g. memory error, "
"etc.). If you encounter an error, before reporting it, please ensure that "
"the computation is correct, by checking the data and the parameters."
)
POOL: Pool = None
class Worker:
"""Multiprocessing worker, to run long-running tasks in a separate process"""
def __init__(self) -> None:
self.asyncresult: AsyncResult = None
self.result: Any = None
@staticmethod
def create_pool() -> None:
"""Create multiprocessing pool"""
global POOL # pylint: disable=global-statement
# Create a pool with one process
POOL = Pool(processes=1) # pylint: disable=not-callable,consider-using-with
@staticmethod
def terminate_pool(wait: bool = False) -> None:
"""Terminate multiprocessing pool.
Args:
wait (bool | None): wait for all tasks to finish. Defaults to False.
"""
global POOL # pylint: disable=global-statement
if POOL is not None:
if wait:
# Close the pool properly (wait for all tasks to finish)
POOL.close()
else:
# Terminate the pool and stop the timer
POOL.terminate()
POOL.join()
POOL = None
def restart_pool(self) -> None:
"""Terminate and recreate the pool"""
# Terminate the process and stop the timer
self.terminate_pool(wait=False)
# Recreate the pool for the next computation
self.create_pool()
def run(self, func: Callable, args: tuple[Any]) -> None:
"""Run computation.
Args:
func (Callable): function to run
args (tuple[Any]): arguments
"""
global POOL # pylint: disable=global-statement,global-variable-not-assigned
assert POOL is not None
self.asyncresult = POOL.apply_async(wng_err_func, (func, args))
def close(self) -> None:
"""Close worker: close pool properly and wait for all tasks to finish"""
# Close multiprocessing Pool properly, but only if no computation is running,
# to avoid blocking the GUI at exit (so, when wait=True, we wait for the
# task to finish before closing the pool but there is actually no task running,
# so the pool is closed immediately but *properly*)
self.terminate_pool(wait=self.asyncresult is None)
def is_computation_finished(self) -> bool:
"""Return True if computation is finished.
Returns:
bool: True if computation is finished
"""
return self.asyncresult.ready()
def get_result(self) -> CompOut:
"""Return computation result.
Returns:
CompOut: computation result
"""
self.result = self.asyncresult.get()
self.asyncresult = None
return self.result
class BaseProcessor(QC.QObject):
"""Object handling data processing: operations, processing, computing.
Args:
panel (SignalPanel | ImagePanel): panel
plotwidget (CurveWidget | ImageWidget): plot widget
"""
SIG_ADD_SHAPE = QC.Signal(str)
EDIT_ROI_PARAMS = False
PARAM_DEFAULTS: dict[str, gds.DataSet] = {}
def __init__(self, panel: SignalPanel | ImagePanel, plotwidget: PlotWidget):
super().__init__()
self.panel = panel
self.plotwidget = plotwidget
self.worker: Worker | None = None | self.set_process_isolation_enabled(Conf.main.process_isolation_enabled.get()) | 3 | 2023-11-09 16:56:03+00:00 | 12k |
choderalab/chiron | chiron/tests/test_pairs.py | [
{
"identifier": "NeighborListNsqrd",
"path": "chiron/neighbors.py",
"snippet": "class NeighborListNsqrd(PairsBase):\n \"\"\"\n N^2 neighborlist implementation that returns the particle pair ids, displacement vectors, and distances.\n\n Parameters\n ----------\n space: Space\n Class that defines how to calculate the displacement between two points and apply the boundary conditions\n cutoff: float, default = 2.5\n Cutoff distance for the neighborlist\n skin: float, default = 0.4\n Skin distance for the neighborlist\n n_max_neighbors: int, default=200\n Maximum number of neighbors for each particle. Used for padding arrays for efficient jax computations\n This will be checked and dynamically updated during the build stage\n Examples\n --------\n\n\n \"\"\"\n\n def __init__(\n self,\n space: Space,\n cutoff: unit.Quantity = unit.Quantity(1.2, unit.nanometer),\n skin: unit.Quantity = unit.Quantity(0.4, unit.nanometer),\n n_max_neighbors: float = 200,\n ):\n if not isinstance(space, Space):\n raise TypeError(f\"space must be of type Space, found {type(space)}\")\n if not cutoff.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, cutoff.unit = {cutoff.unit}\"\n )\n if not skin.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, skin.unit = {skin.unit}\"\n )\n\n self.cutoff = cutoff.value_in_unit_system(unit.md_unit_system)\n self.skin = skin.value_in_unit_system(unit.md_unit_system)\n self.cutoff_and_skin = self.cutoff + self.skin\n self.n_max_neighbors = n_max_neighbors\n self.space = space\n\n # set a a simple variable to know if this has at least been built once as opposed to just initialized\n # this does not imply that the neighborlist is up to date\n self.is_built = False\n\n # note, we need to use the partial decorator in order to use the jit decorate\n # so that it knows to ignore the `self` argument\n @partial(jax.jit, static_argnums=(0,))\n def _pairs_mask(self, particle_ids: jnp.array):\n \"\"\"\n Jitted function to generate mask that allows us to remove self-interactions and double-counting of pairs\n\n Parameters\n ----------\n particle_ids: jnp.array\n Array of particle ids\n\n Returns\n -------\n jnp.array\n Bool mask to remove self-interactions and double-counting of pairs\n\n \"\"\"\n # for the nsq approach, we consider the distance between a particle and all other particles in the system\n # if we used a cell list the possible_neighbors would be a smaller list, i.e., only those in the neigboring cells\n\n possible_neighbors = particle_ids\n\n particles_j = jnp.broadcast_to(\n possible_neighbors,\n (particle_ids.shape[0], possible_neighbors.shape[0]),\n )\n\n # reshape the particle_ids\n particles_i = jnp.reshape(particle_ids, (particle_ids.shape[0], 1))\n # create a mask to exclude self interactions and double counting\n temp_mask = particles_i < particles_j\n\n return temp_mask\n\n @partial(jax.jit, static_argnums=(0, 5))\n def _build_neighborlist(\n self, particle_i, reduction_mask, pid, coordinates, n_max_neighbors\n ):\n \"\"\"\n Jitted function to build the neighbor list for a single particle\n\n Parameters\n ----------\n particle_i: jnp.array\n X,Y,Z coordinates of particle i\n reduction_mask: jnp.array\n Mask to exclude self-interactions and double counting of pairs\n coordinates: jnp.array\n X,Y,Z coordinates of all particles\n n_max_neighbors: int\n Maximum number of neighbors for each particle. Used for padding arrays for efficient jax computations\n\n Returns\n -------\n neighbor_list_mask: jnp.array\n Mask to exclude padding from the neighbor list\n neighbor_list: jnp.array\n List of particle ids for the neighbors, padded to n_max_neighbors\n n_neighbors: int\n Number of neighbors for the particle\n \"\"\"\n\n # calculate the displacement between particle i and all other particles\n r_ij, dist = self.space.displacement(particle_i, coordinates)\n\n # neighbor_mask will be an array of length n_particles (i.e., length of coordinates)\n # where each element is True if the particle is a neighbor, False if it is not\n # subject to both the cutoff+skin and the reduction mask that eliminates double counting and self-interactions\n neighbor_mask = jnp.where(\n (dist < self.cutoff_and_skin) & (reduction_mask), True, False\n )\n # when we pad the neighbor list, we will use last particle id in the neighbor list\n # this choice was made such that when we use the neighbor list in the masked energy calculat\n # the padded values will result in reasonably well defined values\n fill_value = jnp.argmax(neighbor_mask)\n fill_value = jnp.where(fill_value == pid, fill_value + 1, fill_value)\n\n # count up the number of neighbors\n n_neighbors = jnp.where(neighbor_mask, 1, 0).sum()\n\n # since neighbor_mask indices have a one-to-one correspondence to particle ids,\n # applying jnp.where, will return an array of the indices that are neighbors.\n # since this needs to be uniformly sized, we can just fill this array up to the n_max_neighbors.\n neighbor_list = jnp.array(\n jnp.where(neighbor_mask, size=n_max_neighbors, fill_value=fill_value),\n dtype=jnp.uint32,\n )\n # we need to generate a new mask associatd with the padded neighbor list\n # to be able to quickly exclude the padded values from the neighbor list\n neighbor_list_mask = jnp.where(jnp.arange(n_max_neighbors) < n_neighbors, 1, 0)\n\n del r_ij, dist\n return neighbor_list_mask, neighbor_list, n_neighbors\n\n def build(\n self,\n coordinates: Union[jnp.array, unit.Quantity],\n box_vectors: Union[jnp.array, unit.Quantity],\n ):\n \"\"\"\n Build the neighborlist from an array of coordinates and box vectors.\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[N,3] array of particle coordinates\n box_vectors: jnp.array\n Shape[3,3] array of box vectors\n\n Returns\n -------\n None\n\n \"\"\"\n\n # set our reference coordinates\n # the call to x0 and box_vectors automatically convert these to jnp arrays in the correct unit system\n if isinstance(coordinates, unit.Quantity):\n if not coordinates.unit.is_compatible(unit.nanometer):\n raise ValueError(\n f\"Coordinates require distance units, not {coordinates.unit}\"\n )\n coordinates = coordinates.value_in_unit_system(unit.md_unit_system)\n\n if isinstance(box_vectors, unit.Quantity):\n if not box_vectors.unit.is_compatible(unit.nanometer):\n raise ValueError(\n f\"Box vectors require distance unit, not {box_vectors.unit}\"\n )\n box_vectors = box_vectors.value_in_unit_system(unit.md_unit_system)\n\n if box_vectors.shape != (3, 3):\n raise ValueError(\n f\"box_vectors should be a 3x3 array, shape provided: {box_vectors.shape}\"\n )\n\n self.ref_coordinates = coordinates\n self.box_vectors = box_vectors\n\n # the neighborlist assumes that the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n self.space.box_vectors = self.box_vectors\n\n # store the ids of all the particles\n self.particle_ids = jnp.array(\n range(0, self.ref_coordinates.shape[0]), dtype=jnp.uint32\n )\n\n # calculate which pairs to exclude\n reduction_mask = self._pairs_mask(self.particle_ids)\n\n # calculate the distance for all pairs this will return\n # neighbor_mask: an array of shape (n_particles, n_particles) where each element is the mask\n # to determine if the particle is a neighbor\n # neighbor_list: an array of shape (n_particles, n_max_neighbors) where each element is the particle id of the neighbor\n # this is padded with zeros to ensure a uniform size;\n # n_neighbors: an array of shape (n_particles) where each element is the number of neighbors for that particle\n\n self.neighbor_mask, self.neighbor_list, self.n_neighbors = jax.vmap(\n self._build_neighborlist, in_axes=(0, 0, 0, None, None)\n )(\n self.ref_coordinates,\n reduction_mask,\n self.particle_ids,\n self.ref_coordinates,\n self.n_max_neighbors,\n )\n\n self.neighbor_list = self.neighbor_list.reshape(-1, self.n_max_neighbors)\n\n while jnp.any(self.n_neighbors == self.n_max_neighbors).block_until_ready():\n log.debug(\n f\"Increasing n_max_neighbors from {self.n_max_neighbors} to at {jnp.max(self.n_neighbors)+10}\"\n )\n self.n_max_neighbors = int(jnp.max(self.n_neighbors) + 10)\n\n self.neighbor_mask, self.neighbor_list, self.n_neighbors = jax.vmap(\n self._build_neighborlist, in_axes=(0, 0, 0, None, None)\n )(\n self.ref_coordinates,\n reduction_mask,\n self.particle_ids,\n self.ref_coordinates,\n self.n_max_neighbors,\n )\n\n self.neighbor_list = self.neighbor_list.reshape(-1, self.n_max_neighbors)\n\n self.is_built = True\n\n @partial(jax.jit, static_argnums=(0,))\n def _calc_distance_per_particle(\n self, particle1, neighbors, neighbor_mask, coordinates\n ):\n \"\"\"\n Jitted function to calculate the distance between a particle and its neighbors\n\n Parameters\n ----------\n particle1: int\n Particle id\n neighbors: jnp.array\n Array of particle ids for the neighbors of particle1\n neighbor_mask: jnp.array\n Mask to exclude padding from the neighbor list of particle1\n coordinates: jnp.array\n X,Y,Z coordinates of all particles\n\n Returns\n -------\n n_pairs: int\n Number of interacting pairs for the particle\n mask: jnp.array\n Mask to exclude padding from the neighbor list of particle1.\n If a particle is within the interaction cutoff, the mask is 1, otherwise it is 0\n dist: jnp.array\n Array of distances between the particle and its neighbors\n r_ij: jnp.array\n Array of displacement vectors between the particle and its neighbors\n \"\"\"\n # repeat the particle id for each neighbor\n particles1 = jnp.repeat(particle1, neighbors.shape[0])\n\n # calculate the displacement between particle i and all neighbors\n r_ij, dist = self.space.displacement(\n coordinates[particles1], coordinates[neighbors]\n )\n # calculate the mask to determine if the particle is a neighbor\n # this will be done based on the interaction cutoff and using the neighbor_mask to exclude padding\n mask = jnp.where((dist < self.cutoff) & (neighbor_mask), 1, 0)\n\n # calculate the number of pairs\n n_pairs = mask.sum()\n\n return n_pairs, mask, dist, r_ij\n\n def calculate(self, coordinates: jnp.array):\n \"\"\"\n Calculate the neighbor list for the current state\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[N,3] array of particle coordinates\n\n Returns\n -------\n n_neighbors: jnp.array\n Array of number of neighbors for each particle\n neighbor_list: jnp.array\n Array of particle ids for the neighbors, padded to n_max_neighbors. Shape (n_particles, n_max_neighbors)\n padding_mask: jnp.array\n Array of masks to exclude padding from the neighbor list of each particle. Shape (n_particles, n_max_neighbors)\n dist: jnp.array\n Array of distances between each particle and its neighbors. Shape (n_particles, n_max_neighbors)\n r_ij: jnp.array\n Array of displacement vectors between each particle and its neighbors. Shape (n_particles, n_max_neighbors, 3)\n \"\"\"\n # coordinates = sampler_state.x0\n # note, we assume the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n\n n_neighbors, padding_mask, dist, r_ij = jax.vmap(\n self._calc_distance_per_particle, in_axes=(0, 0, 0, None)\n )(self.particle_ids, self.neighbor_list, self.neighbor_mask, coordinates)\n # mask = mask.reshape(-1, self.n_max_neighbors)\n return n_neighbors, self.neighbor_list, padding_mask, dist, r_ij\n\n @partial(jax.jit, static_argnums=(0,))\n def _calculate_particle_displacement(self, particle, coordinates, ref_coordinates):\n \"\"\"\n Calculate the displacement of a particle from the reference coordinates.\n If the displacement exceeds the half the skin distance, return True, otherwise return False.\n\n This function is designed to allow it to be jitted and vmapped over particle indices.\n\n Parameters\n ----------\n particle: int\n Particle id\n coordinates: jnp.array\n Array of particle coordinates\n ref_coordinates: jnp.array\n Array of reference particle coordinates\n\n Returns\n -------\n bool\n True if the particle is outside the skin distance, False if it is not.\n \"\"\"\n # calculate the displacement of a particle from the initial coordinates\n\n r_ij, displacement = self.space.displacement(\n coordinates[particle], ref_coordinates[particle]\n )\n\n status = jnp.where(displacement >= self.skin / 2.0, True, False)\n del displacement\n return status\n\n def check(self, coordinates: jnp.array) -> bool:\n \"\"\"\n Check if the neighbor list needs to be rebuilt based on displacement of the particles from the reference coordinates.\n If a particle moves more than 0.5 skin distance, the neighborlist will be rebuilt.\n Will also return True if the size of the coordinates array changes.\n\n Note, this could also accept a user defined criteria for distance, but this is not implemented yet.\n\n Parameters\n ----------\n coordinates: jnp.array\n Array of particle coordinates\n Returns\n -------\n bool\n True if the neighbor list needs to be rebuilt, False if it does not.\n \"\"\"\n\n if self.ref_coordinates.shape[0] != coordinates.shape[0]:\n return True\n\n status = jax.vmap(\n self._calculate_particle_displacement, in_axes=(0, None, None)\n )(self.particle_ids, coordinates, self.ref_coordinates)\n if jnp.any(status):\n del status\n return True\n else:\n del status\n return False"
},
{
"identifier": "PairList",
"path": "chiron/neighbors.py",
"snippet": "class PairList(PairsBase):\n \"\"\"\n N^2 pairlist implementation that returns the particle pair ids, displacement vectors, and distances.\n\n Parameters\n ----------\n space: Space\n Class that defines how to calculate the displacement between two points and apply the boundary conditions\n cutoff: float, default = 2.5\n Cutoff distance for the pair list calculation\n Examples\n --------\n >>> from chiron.neighbors import PairList, OrthogonalPeriodicSpace\n >>> from chiron.states import SamplerState\n >>> import jax.numpy as jnp\n >>>\n >>> space = OrthogonalPeriodicSpace()\n >>> pair_list = PairList(space, cutoff=2.5)\n >>> sampler_state = SamplerState(x0=jnp.array([[0.0, 0.0, 0.0], [2, 0.0, 0.0], [0.0, 2, 0.0]]),\n >>> box_vectors=jnp.array([[10, 0.0, 0.0], [0.0, 10, 0.0], [0.0, 0.0, 10]]))\n >>> pair_list.build_from_state(sampler_state)\n >>>\n >>> # mask and distances are of shape (n_particles, n_particles-1),\n >>> displacement_vectors of shape (n_particles, n_particles-1, 3)\n >>> # mask, is a bool array that is True if the particle is within the cutoff distance, False if it is not\n >>> # n_pairs is of shape (n_particles) and is per row sum of the mask. The mask ensure we also do not double count pairs\n >>> n_pairs, mask, distances, displacement_vectors = pair_list.calculate(sampler_state.x0)\n \"\"\"\n\n def __init__(\n self,\n space: Space,\n cutoff: unit.Quantity = unit.Quantity(1.2, unit.nanometer),\n ):\n if not isinstance(space, Space):\n raise TypeError(f\"space must be of type Space, found {type(space)}\")\n if not cutoff.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, cutoff.unit = {cutoff.unit}\"\n )\n\n self.cutoff = cutoff.value_in_unit_system(unit.md_unit_system)\n self.space = space\n\n # set a a simple variable to know if this has at least been built once as opposed to just initialized\n # this does not imply that the neighborlist is up to date\n self.is_built = False\n\n # note, we need to use the partial decorator in order to use the jit decorate\n # so that it knows to ignore the `self` argument\n @partial(jax.jit, static_argnums=(0,))\n def _pairs_and_mask(self, particle_ids: jnp.array):\n \"\"\"\n Jitted function to generate all pairs (excluding self interactions)\n and mask that allows us to remove double-counting of pairs.\n\n Parameters\n ----------\n particle_ids: jnp.array\n Array of particle ids\n\n Returns\n -------\n all_pairs: jnp.array\n Array of all pairs (excluding self interactions), of size (n_particles, n_particles-1)\n reduction_mask: jnp.array\n Bool mask that identifies which pairs to exclude to remove double counting of pairs\n\n \"\"\"\n # for the nsq approach, we consider the distance between a particle and all other particles in the system\n # if we used a cell list the possible_neighbors would be a smaller list, i.e., only those in the neigboring cells\n # we'll just keep with naming syntax for future flexibility\n\n possible_neighbors = particle_ids\n\n particles_j = jnp.broadcast_to(\n possible_neighbors,\n (particle_ids.shape[0], possible_neighbors.shape[0]),\n )\n # reshape the particle_ids\n particles_i = jnp.reshape(particle_ids, (particle_ids.shape[0], 1))\n # create a mask to exclude self interactions and double counting\n temp_mask = particles_i != particles_j\n all_pairs = jax.vmap(self._remove_self_interactions, in_axes=(0, 0))(\n particles_j, temp_mask\n )\n del temp_mask\n all_pairs = jnp.array(all_pairs[0], dtype=jnp.uint32)\n\n reduction_mask = jnp.where(particles_i < all_pairs, True, False)\n\n return all_pairs, reduction_mask\n\n @partial(jax.jit, static_argnums=(0,))\n def _remove_self_interactions(self, particles, temp_mask):\n return jnp.where(\n temp_mask, size=particles.shape[0] - 1, fill_value=particles.shape[0] - 1\n )\n\n def build(\n self,\n coordinates: Union[jnp.array, unit.Quantity],\n box_vectors: Union[jnp.array, unit.Quantity],\n ):\n \"\"\"\n Build the neighborlist from an array of coordinates and box vectors.\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[n_particles,3] array of particle coordinates\n box_vectors: jnp.array\n Shape[3,3] array of box vectors\n\n Returns\n -------\n None\n\n \"\"\"\n\n # set our reference coordinates\n # this will set self.ref_coordinates=coordinates and self.box_vectors\n self._validate_build_inputs(coordinates, box_vectors)\n\n self.n_particles = self.ref_coordinates.shape[0]\n\n # the neighborlist assumes that the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n self.space.box_vectors = self.box_vectors\n\n # store the ids of all the particles\n self.particle_ids = jnp.array(range(0, coordinates.shape[0]), dtype=jnp.uint32)\n\n # calculate which pairs to exclude\n self.all_pairs, self.reduction_mask = self._pairs_and_mask(self.particle_ids)\n\n self.is_built = True\n\n @partial(jax.jit, static_argnums=(0,))\n def _calc_distance_per_particle(\n self, particle1, neighbors, neighbor_mask, coordinates\n ):\n \"\"\"\n Jitted function to calculate the distance between a particle and all possible neighbors\n\n Parameters\n ----------\n particle1: int\n Particle id\n neighbors: jnp.array\n Array of particle ids for the possible particle pairs of particle1\n neighbor_mask: jnp.array\n Mask to exclude double particles to prevent double counting\n coordinates: jnp.array\n X,Y,Z coordinates of all particles, shaped (n_particles, 3)\n\n Returns\n -------\n n_pairs: int\n Number of interacting pairs for the particle\n mask: jnp.array\n Mask to exclude padding particles not within the cutoff particle1.\n If a particle is within the interaction cutoff, the mask is 1, otherwise it is 0\n Array has shape (n_particles, n_particles-1) as it excludes self interactions\n dist: jnp.array\n Array of distances between the particle and all other particles in the system.\n Array has shape (n_particles, n_particles-1) as it excludes self interactions\n r_ij: jnp.array\n Array of displacement vectors between the particle and all other particles in the system.\n Array has shape (n_particles, n_particles-1, 3) as it excludes self interactions\n\n \"\"\"\n # repeat the particle id for each neighbor\n particles1 = jnp.repeat(particle1, neighbors.shape[0])\n\n # calculate the displacement between particle i and all neighbors\n r_ij, dist = self.space.displacement(\n coordinates[particles1], coordinates[neighbors]\n )\n # calculate the mask to determine if the particle is a neighbor\n # this will be done based on the interaction cutoff and using the neighbor_mask to exclude padding\n mask = jnp.where((dist < self.cutoff) & (neighbor_mask), 1, 0)\n\n # calculate the number of pairs\n n_pairs = mask.sum()\n\n return n_pairs, mask, dist, r_ij\n\n def calculate(self, coordinates: jnp.array):\n \"\"\"\n Calculate the neighbor list for the current state\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[n_particles,3] array of particle coordinates\n\n Returns\n -------\n n_neighbors: jnp.array\n Array of the number of interacting particles (i.e., where dist < cutoff). Shape: (n_particles)\n pairs: jnp.array\n Array of particle ids that were considered for interaction. Shape: (n_particles, n_particles-1)\n padding_mask: jnp.array\n Array used to masks non interaction particle pairs. Shape: (n_particles, n_particles-1)\n dist: jnp.array\n Array of distances between pairs in the system. Shape: (n_particles, n_particles-1)\n r_ij: jnp.array\n Array of displacement vectors between particle pairs. Shape: (n_particles, n_particles-1, 3).\n \"\"\"\n if coordinates.shape[0] != self.n_particles:\n raise ValueError(\n f\"Number of particles cannot changes without rebuilding. \"\n f\"Coordinates must have shape ({self.n_particles}, 3), found {coordinates.shape}\"\n )\n\n # coordinates = self.space.wrap(coordinates)\n\n n_neighbors, padding_mask, dist, r_ij = jax.vmap(\n self._calc_distance_per_particle, in_axes=(0, 0, 0, None)\n )(self.particle_ids, self.all_pairs, self.reduction_mask, coordinates)\n\n return n_neighbors, self.all_pairs, padding_mask, dist, r_ij\n\n def check(self, coordinates: jnp.array) -> bool:\n \"\"\"\n Check if we need to reconstruct internal arrays.\n For a simple pairlist this will always return False, unless the number of particles change.\n\n Parameters\n ----------\n coordinates: jnp.array\n Array of particle coordinates\n Returns\n -------\n bool\n True if we need to rebuild the neighbor list, False if we do not.\n \"\"\"\n if coordinates.shape[0] != self.n_particles:\n return True\n else:\n return False"
},
{
"identifier": "OrthogonalPeriodicSpace",
"path": "chiron/neighbors.py",
"snippet": "class OrthogonalPeriodicSpace(Space):\n \"\"\"\n Defines the simulation space for an orthogonal periodic system.\n\n \"\"\"\n\n @property\n def box_vectors(self) -> jnp.array:\n return self._box_vectors\n\n @box_vectors.setter\n def box_vectors(self, box_vectors: jnp.array) -> None:\n self._box_vectors = box_vectors\n self._box_lengths = jnp.array(\n [box_vectors[0][0], box_vectors[1][1], box_vectors[2][2]]\n )\n\n @partial(jax.jit, static_argnums=(0,))\n def displacement(\n self, xyz_1: jnp.array, xyz_2: jnp.array\n ) -> Tuple[jnp.array, jnp.array]:\n \"\"\"\n Calculate the periodic distance between two points.\n\n Parameters\n ----------\n xyz_1: jnp.array\n Coordinates of the first point\n xyz_2: jnp.array\n Coordinates of the second point\n\n Returns\n -------\n r_ij: jnp.array\n Displacement vector between the two points\n dist: float\n Distance between the two points\n\n \"\"\"\n # calculate uncorrect r_ij\n r_ij = xyz_1 - xyz_2\n\n # calculated corrected displacement vector\n r_ij = (\n jnp.mod(r_ij + self._box_lengths * 0.5, self._box_lengths)\n - self._box_lengths * 0.5\n )\n # calculate the scalar distance\n dist = jnp.linalg.norm(r_ij, axis=-1)\n\n return r_ij, dist\n\n @partial(jax.jit, static_argnums=(0,))\n def wrap(self, xyz: jnp.array) -> jnp.array:\n \"\"\"\n Wrap the coordinates of the system.\n\n Parameters\n ----------\n xyz: jnp.array\n Coordinates of the system\n\n Returns\n -------\n jnp.array\n Wrapped coordinates of the system\n\n \"\"\"\n xyz = xyz - jnp.floor(xyz / self._box_lengths) * self._box_lengths\n\n return xyz"
},
{
"identifier": "OrthogonalNonperiodicSpace",
"path": "chiron/neighbors.py",
"snippet": "class OrthogonalNonperiodicSpace(Space):\n @partial(jax.jit, static_argnums=(0,))\n def displacement(\n self,\n xyz_1: jnp.array,\n xyz_2: jnp.array,\n ) -> Tuple[jnp.array, jnp.array]:\n \"\"\"\n Calculate the periodic distance between two points.\n\n Parameters\n ----------\n xyz_1: jnp.array\n Coordinates of the first point\n xyz_2: jnp.array\n Coordinates of the second point\n\n Returns\n -------\n r_ij: jnp.array\n Displacement vector between the two points\n dist: float\n Distance between the two points\n\n \"\"\"\n # calculate uncorrect r_ij\n r_ij = xyz_1 - xyz_2\n\n # calculate the scalar distance\n dist = jnp.linalg.norm(r_ij, axis=-1)\n\n return r_ij, dist\n\n @partial(jax.jit, static_argnums=(0,))\n def wrap(self, xyz: jnp.array) -> jnp.array:\n \"\"\"\n Wrap the coordinates of the system.\n For the Non-periodic system, this does not alter the coordinates\n\n Parameters\n ----------\n xyz: jnp.array\n Coordinates of the system\n\n Returns\n -------\n jnp.array\n Wrapped coordinates of the system\n\n \"\"\"\n return xyz"
},
{
"identifier": "SamplerState",
"path": "chiron/states.py",
"snippet": "class SamplerState:\n \"\"\"\n Represents the state of the system that is updated during integration.\n\n Parameters\n ----------\n x0 : unit.Quantity\n The current positions of the particles in the simulation.\n velocities : unit.Quantity, optional\n The velocities of the particles in the simulation.\n box_vectors : unit.Quantity, optional\n The box vectors defining the simulation's periodic boundary conditions.\n\n \"\"\"\n\n def __init__(\n self,\n x0: unit.Quantity,\n velocities: Optional[unit.Quantity] = None,\n box_vectors: Optional[unit.Quantity] = None,\n ) -> None:\n # NOTE: all units are internally in the openMM units system as documented here:\n # http://docs.openmm.org/latest/userguide/theory/01_introduction.html#units\n if not isinstance(x0, unit.Quantity):\n raise TypeError(f\"x0 must be a unit.Quantity, got {type(x0)} instead.\")\n if velocities is not None and not isinstance(velocities, unit.Quantity):\n raise TypeError(\n f\"velocities must be a unit.Quantity, got {type(velocities)} instead.\"\n )\n if box_vectors is not None and not isinstance(box_vectors, unit.Quantity):\n if isinstance(box_vectors, List):\n try:\n box_vectors = self._convert_from_openmm_box(box_vectors)\n except:\n raise TypeError(f\"Unable to parse box_vectors {box_vectors}.\")\n else:\n raise TypeError(\n f\"box_vectors must be a unit.Quantity or openMM box, got {type(box_vectors)} instead.\"\n )\n if not x0.unit.is_compatible(unit.nanometer):\n raise ValueError(f\"x0 must have units of distance, got {x0.unit} instead.\")\n if velocities is not None and not velocities.unit.is_compatible(\n unit.nanometer / unit.picosecond\n ):\n raise ValueError(\n f\"velocities must have units of distance/time, got {velocities.unit} instead.\"\n )\n if box_vectors is not None and not box_vectors.unit.is_compatible(\n unit.nanometer\n ):\n raise ValueError(\n f\"box_vectors must have units of distance, got {box_vectors.unit} instead.\"\n )\n if box_vectors is not None and box_vectors.shape != (3, 3):\n raise ValueError(\n f\"box_vectors must be a 3x3 array, got {box_vectors.shape} instead.\"\n )\n\n self._x0 = x0\n self._velocities = velocities\n self._box_vectors = box_vectors\n self._distance_unit = unit.nanometer\n\n @property\n def x0(self) -> jnp.array:\n return self._convert_to_jnp(self._x0)\n\n @property\n def velocities(self) -> jnp.array:\n if self._velocities is None:\n return None\n return self._convert_to_jnp(self._velocities)\n\n @property\n def box_vectors(self) -> jnp.array:\n if self._box_vectors is None:\n return None\n return self._convert_to_jnp(self._box_vectors)\n\n @x0.setter\n def x0(self, x0: Union[jnp.array, unit.Quantity]) -> None:\n if isinstance(x0, unit.Quantity):\n self._x0 = x0\n else:\n self._x0 = unit.Quantity(x0, self._distance_unit)\n\n @property\n def distance_unit(self) -> unit.Unit:\n return self._distance_unit\n\n def _convert_to_jnp(self, array: unit.Quantity) -> jnp.array:\n \"\"\"\n Convert the sampler state to jnp arrays.\n \"\"\"\n import jax.numpy as jnp\n\n array_ = array.value_in_unit_system(unit.md_unit_system)\n return jnp.array(array_)\n\n def _convert_from_openmm_box(self, openmm_box_vectors: List) -> unit.Quantity:\n box_vec = []\n for i in range(0, 3):\n layer = []\n for j in range(0, 3):\n layer.append(\n openmm_box_vectors[i][j].value_in_unit(openmm_box_vectors[0].unit)\n )\n box_vec.append(layer)\n return unit.Quantity(jnp.array(box_vec), openmm_box_vectors[0].unit)"
}
] | import jax.numpy as jnp
import pytest
from chiron.neighbors import (
NeighborListNsqrd,
PairList,
OrthogonalPeriodicSpace,
OrthogonalNonperiodicSpace,
)
from chiron.states import SamplerState
from openmm import unit | 9,303 |
def test_orthogonal_periodic_displacement():
# test that the incorrect box shapes throw an exception
with pytest.raises(ValueError):
space = OrthogonalPeriodicSpace(jnp.array([10.0, 10.0, 10.0]))
# test that incorrect units throw an exception
with pytest.raises(ValueError):
space = OrthogonalPeriodicSpace(
unit.Quantity(
jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]),
unit.radians,
)
)
space = OrthogonalPeriodicSpace(
jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]])
)
# test that the box vectors are set correctly
assert jnp.all(
space.box_vectors
== jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]])
)
# test that the box lengths for an orthogonal box are set correctly
assert jnp.all(space._box_lengths == jnp.array([10.0, 10.0, 10.0]))
# test calculation of the displacement_vector and distance between two points
p1 = jnp.array([[0, 0, 0], [0, 0, 0]])
p2 = jnp.array([[1, 0, 0], [6, 0, 0]])
r_ij, distance = space.displacement(p1, p2)
assert jnp.all(r_ij == jnp.array([[-1.0, 0.0, 0.0], [4.0, 0.0, 0.0]]))
assert jnp.all(distance == jnp.array([1, 4]))
# test that the periodic wrapping works as expected
wrapped_x = space.wrap(jnp.array([11, 0, 0]))
assert jnp.all(wrapped_x == jnp.array([1, 0, 0]))
wrapped_x = space.wrap(jnp.array([-1, 0, 0]))
assert jnp.all(wrapped_x == jnp.array([9, 0, 0]))
wrapped_x = space.wrap(jnp.array([5, 0, 0]))
assert jnp.all(wrapped_x == jnp.array([5, 0, 0]))
wrapped_x = space.wrap(jnp.array([5, 12, -1]))
assert jnp.all(wrapped_x == jnp.array([5, 2, 9]))
# test the setter for the box vectors
space.box_vectors = jnp.array(
[[10.0, 0.0, 0.0], [0.0, 20.0, 0.0], [0.0, 0.0, 30.0]]
)
assert jnp.all(
space._box_vectors
== jnp.array([[10.0, 0.0, 0.0], [0.0, 20.0, 0.0], [0.0, 0.0, 30.0]])
)
assert jnp.all(space._box_lengths == jnp.array([10.0, 20.0, 30.0]))
def test_orthogonal_nonperiodic_displacement():
space = OrthogonalNonperiodicSpace(
jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]])
)
p1 = jnp.array([[0, 0, 0], [0, 0, 0]])
p2 = jnp.array([[1, 0, 0], [6, 0, 0]])
r_ij, distance = space.displacement(p1, p2)
assert jnp.all(r_ij == jnp.array([[-1.0, 0.0, 0.0], [-6.0, 0.0, 0.0]]))
assert jnp.all(distance == jnp.array([1, 6]))
wrapped_x = space.wrap(jnp.array([11, -1, 2]))
assert jnp.all(wrapped_x == jnp.array([11, -1, 2]))
def test_neighborlist_pair():
"""
This simple test of the neighborlist for 2 particles
"""
coordinates = jnp.array([[0, 0, 0], [1, 0, 0]])
box_vectors = jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]])
state = SamplerState(
x0=unit.Quantity(coordinates, unit.nanometer),
box_vectors=unit.Quantity(box_vectors, unit.nanometer),
)
space = OrthogonalPeriodicSpace()
cutoff = 1.1
skin = 0.1
|
def test_orthogonal_periodic_displacement():
# test that the incorrect box shapes throw an exception
with pytest.raises(ValueError):
space = OrthogonalPeriodicSpace(jnp.array([10.0, 10.0, 10.0]))
# test that incorrect units throw an exception
with pytest.raises(ValueError):
space = OrthogonalPeriodicSpace(
unit.Quantity(
jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]),
unit.radians,
)
)
space = OrthogonalPeriodicSpace(
jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]])
)
# test that the box vectors are set correctly
assert jnp.all(
space.box_vectors
== jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]])
)
# test that the box lengths for an orthogonal box are set correctly
assert jnp.all(space._box_lengths == jnp.array([10.0, 10.0, 10.0]))
# test calculation of the displacement_vector and distance between two points
p1 = jnp.array([[0, 0, 0], [0, 0, 0]])
p2 = jnp.array([[1, 0, 0], [6, 0, 0]])
r_ij, distance = space.displacement(p1, p2)
assert jnp.all(r_ij == jnp.array([[-1.0, 0.0, 0.0], [4.0, 0.0, 0.0]]))
assert jnp.all(distance == jnp.array([1, 4]))
# test that the periodic wrapping works as expected
wrapped_x = space.wrap(jnp.array([11, 0, 0]))
assert jnp.all(wrapped_x == jnp.array([1, 0, 0]))
wrapped_x = space.wrap(jnp.array([-1, 0, 0]))
assert jnp.all(wrapped_x == jnp.array([9, 0, 0]))
wrapped_x = space.wrap(jnp.array([5, 0, 0]))
assert jnp.all(wrapped_x == jnp.array([5, 0, 0]))
wrapped_x = space.wrap(jnp.array([5, 12, -1]))
assert jnp.all(wrapped_x == jnp.array([5, 2, 9]))
# test the setter for the box vectors
space.box_vectors = jnp.array(
[[10.0, 0.0, 0.0], [0.0, 20.0, 0.0], [0.0, 0.0, 30.0]]
)
assert jnp.all(
space._box_vectors
== jnp.array([[10.0, 0.0, 0.0], [0.0, 20.0, 0.0], [0.0, 0.0, 30.0]])
)
assert jnp.all(space._box_lengths == jnp.array([10.0, 20.0, 30.0]))
def test_orthogonal_nonperiodic_displacement():
space = OrthogonalNonperiodicSpace(
jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]])
)
p1 = jnp.array([[0, 0, 0], [0, 0, 0]])
p2 = jnp.array([[1, 0, 0], [6, 0, 0]])
r_ij, distance = space.displacement(p1, p2)
assert jnp.all(r_ij == jnp.array([[-1.0, 0.0, 0.0], [-6.0, 0.0, 0.0]]))
assert jnp.all(distance == jnp.array([1, 6]))
wrapped_x = space.wrap(jnp.array([11, -1, 2]))
assert jnp.all(wrapped_x == jnp.array([11, -1, 2]))
def test_neighborlist_pair():
"""
This simple test of the neighborlist for 2 particles
"""
coordinates = jnp.array([[0, 0, 0], [1, 0, 0]])
box_vectors = jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]])
state = SamplerState(
x0=unit.Quantity(coordinates, unit.nanometer),
box_vectors=unit.Quantity(box_vectors, unit.nanometer),
)
space = OrthogonalPeriodicSpace()
cutoff = 1.1
skin = 0.1 | nbr_list = NeighborListNsqrd( | 0 | 2023-11-07 18:17:43+00:00 | 12k |
Rishit-dagli/Astroformer | pytorch-image-models/timm/models/mobilenetv3.py | [
{
"identifier": "build_model_with_cfg",
"path": "pytorch-image-models/timm/models/_builder.py",
"snippet": "def build_model_with_cfg(\n model_cls: Callable,\n variant: str,\n pretrained: bool,\n pretrained_cfg: Optional[Dict] = None,\n pretrained_cfg_overlay: Optional[Dict] = None,\n model_cfg: Optional[Any] = None,\n feature_cfg: Optional[Dict] = None,\n pretrained_strict: bool = True,\n pretrained_filter_fn: Optional[Callable] = None,\n kwargs_filter: Optional[Tuple[str]] = None,\n **kwargs,\n):\n \"\"\" Build model with specified default_cfg and optional model_cfg\n\n This helper fn aids in the construction of a model including:\n * handling default_cfg and associated pretrained weight loading\n * passing through optional model_cfg for models with config based arch spec\n * features_only model adaptation\n * pruning config / model adaptation\n\n Args:\n model_cls (nn.Module): model class\n variant (str): model variant name\n pretrained (bool): load pretrained weights\n pretrained_cfg (dict): model's pretrained weight/task config\n model_cfg (Optional[Dict]): model's architecture config\n feature_cfg (Optional[Dict]: feature extraction adapter config\n pretrained_strict (bool): load pretrained weights strictly\n pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights\n kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model\n **kwargs: model args passed through to model __init__\n \"\"\"\n pruned = kwargs.pop('pruned', False)\n features = False\n feature_cfg = feature_cfg or {}\n\n # resolve and update model pretrained config and model kwargs\n pretrained_cfg = resolve_pretrained_cfg(\n variant,\n pretrained_cfg=pretrained_cfg,\n pretrained_cfg_overlay=pretrained_cfg_overlay\n )\n\n # FIXME converting back to dict, PretrainedCfg use should be propagated further, but not into model\n pretrained_cfg = pretrained_cfg.to_dict()\n\n _update_default_kwargs(pretrained_cfg, kwargs, kwargs_filter)\n\n # Setup for feature extraction wrapper done at end of this fn\n if kwargs.pop('features_only', False):\n features = True\n feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))\n if 'out_indices' in kwargs:\n feature_cfg['out_indices'] = kwargs.pop('out_indices')\n\n # Instantiate the model\n if model_cfg is None:\n model = model_cls(**kwargs)\n else:\n model = model_cls(cfg=model_cfg, **kwargs)\n model.pretrained_cfg = pretrained_cfg\n model.default_cfg = model.pretrained_cfg # alias for backwards compat\n\n if pruned:\n model = adapt_model_from_file(model, variant)\n\n # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats\n num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))\n if pretrained:\n load_pretrained(\n model,\n pretrained_cfg=pretrained_cfg,\n num_classes=num_classes_pretrained,\n in_chans=kwargs.get('in_chans', 3),\n filter_fn=pretrained_filter_fn,\n strict=pretrained_strict,\n )\n\n # Wrap the model in a feature extraction module if enabled\n if features:\n feature_cls = FeatureListNet\n output_fmt = getattr(model, 'output_fmt', None)\n if output_fmt is not None:\n feature_cfg.setdefault('output_fmt', output_fmt)\n if 'feature_cls' in feature_cfg:\n feature_cls = feature_cfg.pop('feature_cls')\n if isinstance(feature_cls, str):\n feature_cls = feature_cls.lower()\n if 'hook' in feature_cls:\n feature_cls = FeatureHookNet\n elif feature_cls == 'fx':\n feature_cls = FeatureGraphNet\n else:\n assert False, f'Unknown feature class {feature_cls}'\n model = feature_cls(model, **feature_cfg)\n model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back pretrained cfg\n model.default_cfg = model.pretrained_cfg # alias for rename backwards compat (default_cfg -> pretrained_cfg)\n\n return model"
},
{
"identifier": "pretrained_cfg_for_features",
"path": "pytorch-image-models/timm/models/_builder.py",
"snippet": "def pretrained_cfg_for_features(pretrained_cfg):\n pretrained_cfg = deepcopy(pretrained_cfg)\n # remove default pretrained cfg fields that don't have much relevance for feature backbone\n to_remove = ('num_classes', 'classifier', 'global_pool') # add default final pool size?\n for tr in to_remove:\n pretrained_cfg.pop(tr, None)\n return pretrained_cfg"
},
{
"identifier": "SqueezeExcite",
"path": "pytorch-image-models/timm/models/_efficientnet_blocks.py",
"snippet": "class SqueezeExcite(nn.Module):\n \"\"\" Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family\n\n Args:\n in_chs (int): input channels to layer\n rd_ratio (float): ratio of squeeze reduction\n act_layer (nn.Module): activation layer of containing block\n gate_layer (Callable): attention gate function\n force_act_layer (nn.Module): override block's activation fn if this is set/bound\n rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs\n \"\"\"\n\n def __init__(\n self, in_chs, rd_ratio=0.25, rd_channels=None, act_layer=nn.ReLU,\n gate_layer=nn.Sigmoid, force_act_layer=None, rd_round_fn=None):\n super(SqueezeExcite, self).__init__()\n if rd_channels is None:\n rd_round_fn = rd_round_fn or round\n rd_channels = rd_round_fn(in_chs * rd_ratio)\n act_layer = force_act_layer or act_layer\n self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True)\n self.act1 = create_act_layer(act_layer, inplace=True)\n self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True)\n self.gate = create_act_layer(gate_layer)\n\n def forward(self, x):\n x_se = x.mean((2, 3), keepdim=True)\n x_se = self.conv_reduce(x_se)\n x_se = self.act1(x_se)\n x_se = self.conv_expand(x_se)\n return x * self.gate(x_se)"
},
{
"identifier": "BlockArgs",
"path": "pytorch-image-models/timm/models/_efficientnet_builder.py",
"snippet": "_DEBUG_BUILDER = False\nBN_MOMENTUM_TF_DEFAULT = 1 - 0.99\nBN_EPS_TF_DEFAULT = 1e-3\n_BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT)\ndef get_bn_args_tf():\ndef resolve_bn_args(kwargs):\ndef resolve_act_layer(kwargs, default='relu'):\ndef round_channels(channels, multiplier=1.0, divisor=8, channel_min=None, round_limit=0.9):\ndef _log_info_if(msg, condition):\ndef _parse_ksize(ss):\ndef _decode_block_str(block_str):\ndef _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'):\ndef decode_arch_def(\n arch_def,\n depth_multiplier=1.0,\n depth_trunc='ceil',\n experts_multiplier=1,\n fix_first_last=False,\n group_size=None,\n):\n def __init__(self, output_stride=32, pad_type='', round_chs_fn=round_channels, se_from_exp=False,\n act_layer=None, norm_layer=None, se_layer=None, drop_path_rate=0., feature_location=''):\n def _make_block(self, ba, block_idx, block_count):\n def __call__(self, in_chs, model_block_args):\ndef _init_weight_goog(m, n='', fix_group_fanout=True):\ndef efficientnet_init_weights(model: nn.Module, init_fn=None):\nclass EfficientNetBuilder:"
},
{
"identifier": "FeatureInfo",
"path": "pytorch-image-models/timm/models/_features.py",
"snippet": "class FeatureInfo:\n\n def __init__(self, feature_info: List[Dict], out_indices: Tuple[int]):\n prev_reduction = 1\n for i, fi in enumerate(feature_info):\n # sanity check the mandatory fields, there may be additional fields depending on the model\n assert 'num_chs' in fi and fi['num_chs'] > 0\n assert 'reduction' in fi and fi['reduction'] >= prev_reduction\n prev_reduction = fi['reduction']\n assert 'module' in fi\n fi.setdefault('index', i)\n self.out_indices = out_indices\n self.info = feature_info\n\n def from_other(self, out_indices: Tuple[int]):\n return FeatureInfo(deepcopy(self.info), out_indices)\n\n def get(self, key, idx=None):\n \"\"\" Get value by key at specified index (indices)\n if idx == None, returns value for key at each output index\n if idx is an integer, return value for that feature module index (ignoring output indices)\n if idx is a list/tupple, return value for each module index (ignoring output indices)\n \"\"\"\n if idx is None:\n return [self.info[i][key] for i in self.out_indices]\n if isinstance(idx, (tuple, list)):\n return [self.info[i][key] for i in idx]\n else:\n return self.info[idx][key]\n\n def get_dicts(self, keys=None, idx=None):\n \"\"\" return info dicts for specified keys (or all if None) at specified indices (or out_indices if None)\n \"\"\"\n if idx is None:\n if keys is None:\n return [self.info[i] for i in self.out_indices]\n else:\n return [{k: self.info[i][k] for k in keys} for i in self.out_indices]\n if isinstance(idx, (tuple, list)):\n return [self.info[i] if keys is None else {k: self.info[i][k] for k in keys} for i in idx]\n else:\n return self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys}\n\n def channels(self, idx=None):\n \"\"\" feature channels accessor\n \"\"\"\n return self.get('num_chs', idx)\n\n def reduction(self, idx=None):\n \"\"\" feature reduction (output stride) accessor\n \"\"\"\n return self.get('reduction', idx)\n\n def module_name(self, idx=None):\n \"\"\" feature module name accessor\n \"\"\"\n return self.get('module', idx)\n\n def __getitem__(self, item):\n return self.info[item]\n\n def __len__(self):\n return len(self.info)"
},
{
"identifier": "FeatureHooks",
"path": "pytorch-image-models/timm/models/_features.py",
"snippet": "class FeatureHooks:\n \"\"\" Feature Hook Helper\n\n This module helps with the setup and extraction of hooks for extracting features from\n internal nodes in a model by node name.\n\n FIXME This works well in eager Python but needs redesign for torchscript.\n \"\"\"\n\n def __init__(\n self,\n hooks: Sequence[str],\n named_modules: dict,\n out_map: Sequence[Union[int, str]] = None,\n default_hook_type: str = 'forward',\n ):\n # setup feature hooks\n self._feature_outputs = defaultdict(OrderedDict)\n modules = {k: v for k, v in named_modules}\n for i, h in enumerate(hooks):\n hook_name = h['module']\n m = modules[hook_name]\n hook_id = out_map[i] if out_map else hook_name\n hook_fn = partial(self._collect_output_hook, hook_id)\n hook_type = h.get('hook_type', default_hook_type)\n if hook_type == 'forward_pre':\n m.register_forward_pre_hook(hook_fn)\n elif hook_type == 'forward':\n m.register_forward_hook(hook_fn)\n else:\n assert False, \"Unsupported hook type\"\n\n def _collect_output_hook(self, hook_id, *args):\n x = args[-1] # tensor we want is last argument, output for fwd, input for fwd_pre\n if isinstance(x, tuple):\n x = x[0] # unwrap input tuple\n self._feature_outputs[x.device][hook_id] = x\n\n def get_output(self, device) -> Dict[str, torch.tensor]:\n output = self._feature_outputs[device]\n self._feature_outputs[device] = OrderedDict() # clear after reading\n return output"
},
{
"identifier": "checkpoint_seq",
"path": "pytorch-image-models/timm/models/_manipulate.py",
"snippet": "def checkpoint_seq(\n functions,\n x,\n every=1,\n flatten=False,\n skip_last=False,\n preserve_rng_state=True\n):\n r\"\"\"A helper function for checkpointing sequential models.\n\n Sequential models execute a list of modules/functions in order\n (sequentially). Therefore, we can divide such a sequence into segments\n and checkpoint each segment. All segments except run in :func:`torch.no_grad`\n manner, i.e., not storing the intermediate activations. The inputs of each\n checkpointed segment will be saved for re-running the segment in the backward pass.\n\n See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works.\n\n .. warning::\n Checkpointing currently only supports :func:`torch.autograd.backward`\n and only if its `inputs` argument is not passed. :func:`torch.autograd.grad`\n is not supported.\n\n .. warning:\n At least one of the inputs needs to have :code:`requires_grad=True` if\n grads are needed for model inputs, otherwise the checkpointed part of the\n model won't have gradients.\n\n Args:\n functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially.\n x: A Tensor that is input to :attr:`functions`\n every: checkpoint every-n functions (default: 1)\n flatten (bool): flatten nn.Sequential of nn.Sequentials\n skip_last (bool): skip checkpointing the last function in the sequence if True\n preserve_rng_state (bool, optional, default=True): Omit stashing and restoring\n the RNG state during each checkpoint.\n\n Returns:\n Output of running :attr:`functions` sequentially on :attr:`*inputs`\n\n Example:\n >>> model = nn.Sequential(...)\n >>> input_var = checkpoint_seq(model, input_var, every=2)\n \"\"\"\n def run_function(start, end, functions):\n def forward(_x):\n for j in range(start, end + 1):\n _x = functions[j](_x)\n return _x\n return forward\n\n if isinstance(functions, torch.nn.Sequential):\n functions = functions.children()\n if flatten:\n functions = chain.from_iterable(functions)\n if not isinstance(functions, (tuple, list)):\n functions = tuple(functions)\n\n num_checkpointed = len(functions)\n if skip_last:\n num_checkpointed -= 1\n end = -1\n for start in range(0, num_checkpointed, every):\n end = min(start + every - 1, num_checkpointed - 1)\n x = checkpoint(run_function(start, end, functions), x, preserve_rng_state=preserve_rng_state)\n if skip_last:\n return run_function(end + 1, len(functions) - 1, functions)(x)\n return x"
},
{
"identifier": "generate_default_cfgs",
"path": "pytorch-image-models/timm/models/_registry.py",
"snippet": "def generate_default_cfgs(cfgs: Dict[str, Union[Dict[str, Any], PretrainedCfg]]):\n out = defaultdict(DefaultCfg)\n default_set = set() # no tag and tags ending with * are prioritized as default\n\n for k, v in cfgs.items():\n if isinstance(v, dict):\n v = PretrainedCfg(**v)\n has_weights = v.has_weights\n\n model, tag = split_model_name_tag(k)\n is_default_set = model in default_set\n priority = (has_weights and not tag) or (tag.endswith('*') and not is_default_set)\n tag = tag.strip('*')\n\n default_cfg = out[model]\n\n if priority:\n default_cfg.tags.appendleft(tag)\n default_set.add(model)\n elif has_weights and not default_cfg.is_pretrained:\n default_cfg.tags.appendleft(tag)\n else:\n default_cfg.tags.append(tag)\n\n if has_weights:\n default_cfg.is_pretrained = True\n\n default_cfg.cfgs[tag] = v\n\n return out"
},
{
"identifier": "register_model",
"path": "pytorch-image-models/timm/models/_registry.py",
"snippet": "def register_model(fn: Callable[..., Any]) -> Callable[..., Any]:\n # lookup containing module\n mod = sys.modules[fn.__module__]\n module_name_split = fn.__module__.split('.')\n module_name = module_name_split[-1] if len(module_name_split) else ''\n\n # add model to __all__ in module\n model_name = fn.__name__\n if hasattr(mod, '__all__'):\n mod.__all__.append(model_name)\n else:\n mod.__all__ = [model_name] # type: ignore\n\n # add entries to registry dict/sets\n _model_entrypoints[model_name] = fn\n _model_to_module[model_name] = module_name\n _module_to_models[module_name].add(model_name)\n if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs:\n # this will catch all models that have entrypoint matching cfg key, but miss any aliasing\n # entrypoints or non-matching combos\n default_cfg = mod.default_cfgs[model_name]\n if not isinstance(default_cfg, DefaultCfg):\n # new style default cfg dataclass w/ multiple entries per model-arch\n assert isinstance(default_cfg, dict)\n # old style cfg dict per model-arch\n pretrained_cfg = PretrainedCfg(**default_cfg)\n default_cfg = DefaultCfg(tags=deque(['']), cfgs={'': pretrained_cfg})\n\n for tag_idx, tag in enumerate(default_cfg.tags):\n is_default = tag_idx == 0\n pretrained_cfg = default_cfg.cfgs[tag]\n model_name_tag = '.'.join([model_name, tag]) if tag else model_name\n replace_items = dict(architecture=model_name, tag=tag if tag else None)\n if pretrained_cfg.hf_hub_id and pretrained_cfg.hf_hub_id == 'timm/':\n # auto-complete hub name w/ architecture.tag\n replace_items['hf_hub_id'] = pretrained_cfg.hf_hub_id + model_name_tag\n pretrained_cfg = replace(pretrained_cfg, **replace_items)\n\n if is_default:\n _model_pretrained_cfgs[model_name] = pretrained_cfg\n if pretrained_cfg.has_weights:\n # add tagless entry if it's default and has weights\n _model_has_pretrained.add(model_name)\n\n if tag:\n _model_pretrained_cfgs[model_name_tag] = pretrained_cfg\n if pretrained_cfg.has_weights:\n # add model w/ tag if tag is valid\n _model_has_pretrained.add(model_name_tag)\n _model_with_tags[model_name].append(model_name_tag)\n else:\n _model_with_tags[model_name].append(model_name) # has empty tag (to slowly remove these instances)\n\n _model_default_cfgs[model_name] = default_cfg\n\n return fn"
},
{
"identifier": "register_model_deprecations",
"path": "pytorch-image-models/timm/models/_registry.py",
"snippet": "def register_model_deprecations(module_name: str, deprecation_map: Dict[str, Optional[str]]):\n mod = sys.modules[module_name]\n module_name_split = module_name.split('.')\n module_name = module_name_split[-1] if len(module_name_split) else ''\n\n for deprecated, current in deprecation_map.items():\n if hasattr(mod, '__all__'):\n mod.__all__.append(deprecated)\n current_fn = None\n current_tag = ''\n if current:\n current_name, current_tag = split_model_name_tag(current)\n current_fn = getattr(mod, current_name)\n deprecated_entrypoint_fn = _deprecated_model_shim(deprecated, current_fn, current_tag)\n setattr(mod, deprecated, deprecated_entrypoint_fn)\n _model_entrypoints[deprecated] = deprecated_entrypoint_fn\n _model_to_module[deprecated] = module_name\n _module_to_models[module_name].add(deprecated)\n _deprecated_models[deprecated] = current\n _module_to_deprecated_models[module_name][deprecated] = current"
}
] | from functools import partial
from typing import Callable, List, Optional, Tuple
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import SelectAdaptivePool2d, Linear, LayerType, PadType, create_conv2d, get_norm_act_layer
from ._builder import build_model_with_cfg, pretrained_cfg_for_features
from ._efficientnet_blocks import SqueezeExcite
from ._efficientnet_builder import BlockArgs, EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \
round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT
from ._features import FeatureInfo, FeatureHooks
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
import torch
import torch.nn as nn
import torch.nn.functional as F | 7,991 | se_layer: Type of Squeeze-and-Excite layer.
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth rate.
"""
super(MobileNetV3Features, self).__init__()
act_layer = act_layer or nn.ReLU
norm_layer = norm_layer or nn.BatchNorm2d
se_layer = se_layer or SqueezeExcite
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_layer(stem_size)
self.act1 = act_layer(inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=output_stride,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
se_from_exp=se_from_exp,
act_layer=act_layer,
norm_layer=norm_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
feature_location=feature_location,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = FeatureInfo(builder.features, out_indices)
self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()}
efficientnet_init_weights(self)
# Register feature extraction hooks with FeatureHooks helper
self.feature_hooks = None
if feature_location != 'bottleneck':
hooks = self.feature_info.get_dicts(keys=('module', 'hook_type'))
self.feature_hooks = FeatureHooks(hooks, self.named_modules())
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True):
self.grad_checkpointing = enable
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
if self.feature_hooks is None:
features = []
if 0 in self._stage_out_idx:
features.append(x) # add stem out
for i, b in enumerate(self.blocks):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(b, x)
else:
x = b(x)
if i + 1 in self._stage_out_idx:
features.append(x)
return features
else:
self.blocks(x)
out = self.feature_hooks.get_output(x.device)
return list(out.values())
def _create_mnv3(variant: str, pretrained: bool = False, **kwargs) -> MobileNetV3:
features_mode = ''
model_cls = MobileNetV3
kwargs_filter = None
if kwargs.pop('features_only', False):
if 'feature_cfg' in kwargs:
features_mode = 'cfg'
else:
kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'global_pool')
model_cls = MobileNetV3Features
features_mode = 'cls'
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
features_only=features_mode == 'cfg',
pretrained_strict=features_mode != 'cls',
kwargs_filter=kwargs_filter,
**kwargs,
)
if features_mode == 'cls':
model.default_cfg = pretrained_cfg_for_features(model.default_cfg)
return model
def _gen_mobilenet_v3_rw(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs) -> MobileNetV3:
"""Creates a MobileNet-V3 model.
Ref impl: ?
Paper: https://arxiv.org/abs/1905.02244
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish
# stage 5, 14x14in
['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'], # hard-swish
]
model_kwargs = dict(
| """ MobileNet V3
A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl.
Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244
Hacked together by / Copyright 2019, Ross Wightman
"""
__all__ = ['MobileNetV3', 'MobileNetV3Features']
class MobileNetV3(nn.Module):
""" MobiletNet-V3
Based on my EfficientNet implementation and building blocks, this model utilizes the MobileNet-v3 specific
'efficient head', where global pooling is done before the head convolution without a final batch-norm
layer before the classifier.
Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244
Other architectures utilizing MobileNet-V3 efficient head that are supported by this impl include:
* HardCoRe-NAS - https://arxiv.org/abs/2102.11646 (defn in hardcorenas.py uses this class)
* FBNet-V3 - https://arxiv.org/abs/2006.02049
* LCNet - https://arxiv.org/abs/2109.15099
"""
def __init__(
self,
block_args: BlockArgs,
num_classes: int = 1000,
in_chans: int = 3,
stem_size: int = 16,
fix_stem: bool = False,
num_features: int = 1280,
head_bias: bool = True,
pad_type: PadType = '',
act_layer: Optional[LayerType] = None,
norm_layer: Optional[LayerType] = None,
se_layer: Optional[LayerType] = None,
se_from_exp: bool = True,
round_chs_fn: Callable = round_channels,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
global_pool: str = 'avg',
):
"""
Args:
block_args: Arguments for blocks of the network.
num_classes: Number of classes for classification head.
in_chans: Number of input image channels.
stem_size: Number of output channels of the initial stem convolution.
fix_stem: If True, don't scale stem by round_chs_fn.
num_features: Number of output channels of the conv head layer.
head_bias: If True, add a learnable bias to the conv head layer.
pad_type: Type of padding to use for convolution layers.
act_layer: Type of activation layer.
norm_layer: Type of normalization layer.
se_layer: Type of Squeeze-and-Excite layer.
se_from_exp: If True, calculate SE channel reduction from expanded mid channels.
round_chs_fn: Callable to round number of filters based on depth multiplier.
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth rate.
global_pool: Type of pooling to use for global pooling features of the FC head.
"""
super(MobileNetV3, self).__init__()
act_layer = act_layer or nn.ReLU
norm_layer = norm_layer or nn.BatchNorm2d
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
se_layer = se_layer or SqueezeExcite
self.num_classes = num_classes
self.num_features = num_features
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_act_layer(stem_size, inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=32,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
se_from_exp=se_from_exp,
act_layer=act_layer,
norm_layer=norm_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = builder.features
head_chs = builder.in_chs
# Head + Pooling
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
num_pooled_chs = head_chs * self.global_pool.feat_mult()
self.conv_head = create_conv2d(num_pooled_chs, self.num_features, 1, padding=pad_type, bias=head_bias)
self.act2 = act_layer(inplace=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
efficientnet_init_weights(self)
def as_sequential(self):
layers = [self.conv_stem, self.bn1]
layers.extend(self.blocks)
layers.extend([self.global_pool, self.conv_head, self.act2])
layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])
return nn.Sequential(*layers)
@torch.jit.ignore
def group_matcher(self, coarse: bool = False):
return dict(
stem=r'^conv_stem|bn1',
blocks=r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)'
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
# cannot meaningfully change pooling of efficient head after creation
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv_stem(x)
x = self.bn1(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x, flatten=True)
else:
x = self.blocks(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor:
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
x = self.flatten(x)
if pre_logits:
return x
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
return self.classifier(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
class MobileNetV3Features(nn.Module):
""" MobileNetV3 Feature Extractor
A work-in-progress feature extraction module for MobileNet-V3 to use as a backbone for segmentation
and object detection models.
"""
def __init__(
self,
block_args: BlockArgs,
out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4),
feature_location: str = 'bottleneck',
in_chans: int = 3,
stem_size: int = 16,
fix_stem: bool = False,
output_stride: int = 32,
pad_type: PadType = '',
round_chs_fn: Callable = round_channels,
se_from_exp: bool = True,
act_layer: Optional[LayerType] = None,
norm_layer: Optional[LayerType] = None,
se_layer: Optional[LayerType] = None,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
):
"""
Args:
block_args: Arguments for blocks of the network.
out_indices: Output from stages at indices.
feature_location: Location of feature before/after each block, must be in ['bottleneck', 'expansion']
in_chans: Number of input image channels.
stem_size: Number of output channels of the initial stem convolution.
fix_stem: If True, don't scale stem by round_chs_fn.
output_stride: Output stride of the network.
pad_type: Type of padding to use for convolution layers.
round_chs_fn: Callable to round number of filters based on depth multiplier.
se_from_exp: If True, calculate SE channel reduction from expanded mid channels.
act_layer: Type of activation layer.
norm_layer: Type of normalization layer.
se_layer: Type of Squeeze-and-Excite layer.
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth rate.
"""
super(MobileNetV3Features, self).__init__()
act_layer = act_layer or nn.ReLU
norm_layer = norm_layer or nn.BatchNorm2d
se_layer = se_layer or SqueezeExcite
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_layer(stem_size)
self.act1 = act_layer(inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=output_stride,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
se_from_exp=se_from_exp,
act_layer=act_layer,
norm_layer=norm_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
feature_location=feature_location,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = FeatureInfo(builder.features, out_indices)
self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()}
efficientnet_init_weights(self)
# Register feature extraction hooks with FeatureHooks helper
self.feature_hooks = None
if feature_location != 'bottleneck':
hooks = self.feature_info.get_dicts(keys=('module', 'hook_type'))
self.feature_hooks = FeatureHooks(hooks, self.named_modules())
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True):
self.grad_checkpointing = enable
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
if self.feature_hooks is None:
features = []
if 0 in self._stage_out_idx:
features.append(x) # add stem out
for i, b in enumerate(self.blocks):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(b, x)
else:
x = b(x)
if i + 1 in self._stage_out_idx:
features.append(x)
return features
else:
self.blocks(x)
out = self.feature_hooks.get_output(x.device)
return list(out.values())
def _create_mnv3(variant: str, pretrained: bool = False, **kwargs) -> MobileNetV3:
features_mode = ''
model_cls = MobileNetV3
kwargs_filter = None
if kwargs.pop('features_only', False):
if 'feature_cfg' in kwargs:
features_mode = 'cfg'
else:
kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'global_pool')
model_cls = MobileNetV3Features
features_mode = 'cls'
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
features_only=features_mode == 'cfg',
pretrained_strict=features_mode != 'cls',
kwargs_filter=kwargs_filter,
**kwargs,
)
if features_mode == 'cls':
model.default_cfg = pretrained_cfg_for_features(model.default_cfg)
return model
def _gen_mobilenet_v3_rw(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs) -> MobileNetV3:
"""Creates a MobileNet-V3 model.
Ref impl: ?
Paper: https://arxiv.org/abs/1905.02244
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish
# stage 5, 14x14in
['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'], # hard-swish
]
model_kwargs = dict( | block_args=decode_arch_def(arch_def), | 3 | 2023-11-05 01:25:14+00:00 | 12k |
ilur98/DGQ | dgq/quant/quant_sequence.py | [
{
"identifier": "prepare_hook",
"path": "dgq/quant/smooth_hooker.py",
"snippet": "def prepare_hook(layer, inps, qconfig, inps_kwargs): \n handles = []\n for mod in layer.modules():\n if isinstance(mod, nn.LayerNorm) or isinstance(mod, LlamaRMSNorm):\n if qconfig[\"meanact\"]:\n handles.append(mod.register_forward_hook(sta_batch_minmax))\n if qconfig[\"smoothquant\"]:\n handles.append(mod.register_forward_hook(sta_batch0))\n if isinstance(layer, LlamaDecoderLayer):\n handles.append(layer.mlp.down_proj.register_forward_hook(sta_batch1))\n handles.append(layer.self_attn.o_proj.register_forward_hook(sta_batch1))\n if qconfig['kvquant']:\n handles.append(layer.self_attn.k_quant.register_forward_hook(sta_batch_qkv))\n handles.append(layer.self_attn.v_quant.register_forward_hook(sta_batch_qkv))\n handles.append(layer.self_attn.q_quant.register_forward_hook(sta_batch_qkv))\n elif isinstance(layer, OPTDecoderLayer):\n handles.append(layer.fc2.register_forward_hook(sta_batch1))\n handles.append(layer.self_attn.out_proj.register_forward_hook(sta_batch1))\n if qconfig['kvquant']:\n handles.append(layer.self_attn.k_quant.register_forward_hook(sta_batch_qkv))\n handles.append(layer.self_attn.v_quant.register_forward_hook(sta_batch_qkv))\n handles.append(layer.self_attn.q_quant.register_forward_hook(sta_batch_qkv))\n elif isinstance(layer, BloomBlock):\n if qconfig['kvquant']:\n handles.append(layer.self_attn.k_quant.register_forward_hook(sta_batch_qkv))\n handles.append(layer.self_attn.v_quant.register_forward_hook(sta_batch_qkv))\n handles.append(layer.self_attn.q_quant.register_forward_hook(sta_batch_qkv))\n else:\n raise NotImplemented\n\n for inp in inps:\n # print(inp.unsqueeze(0).shape)\n layer(inp.unsqueeze(0), **inps_kwargs)\n for h in handles:\n h.remove()\n return "
},
{
"identifier": "mean_bias",
"path": "dgq/quant/smooth.py",
"snippet": "@torch.no_grad()\ndef mean_bias(module):\n if isinstance(module, OPTDecoderLayer):\n attn_ln = module.self_attn_layer_norm\n qkv = [module.self_attn.q_proj,\n module.self_attn.k_proj, module.self_attn.v_proj]\n qkv_input_scales = (attn_ln.out_max + attn_ln.out_min) / 2\n mean_ln_fcs(attn_ln, qkv, qkv_input_scales)\n\n ffn_ln = module.final_layer_norm\n fc1 = module.fc1\n fc1_input_scales = (ffn_ln.out_max + ffn_ln.out_min) / 2\n mean_ln_fcs(ffn_ln, fc1, fc1_input_scales)\n elif isinstance(module, BloomBlock):\n attn_ln = module.input_layernorm\n qkv = module.self_attention.query_key_value\n qkv_input_scales = (attn_ln.out_max + attn_ln.out_min) / 2\n mean_ln_fcs(attn_ln, qkv, qkv_input_scales)\n\n ffn_ln = module.post_attention_layernorm\n fc1 = module.mlp.dense_h_to_4h\n fc1_input_scales = (ffn_ln.out_max + ffn_ln.out_min) / 2\n mean_ln_fcs(ffn_ln, fc1, fc1_input_scales)\n elif isinstance(module, LlamaDecoderLayer):\n attn_ln = module.input_layernorm\n qkv = [module.self_attn.q_proj,\n module.self_attn.k_proj, module.self_attn.v_proj]\n qkv_input_scales = (attn_ln.out_max + attn_ln.out_min) / 2\n mean_ln_fcs(attn_ln, qkv, qkv_input_scales)\n ffn_ln = module.post_attention_layernorm\n gate_proj = [module.mlp.gate_proj,module.mlp.up_proj]\n gate_proj_scales = (ffn_ln.out_max + ffn_ln.out_min) / 2\n mean_ln_fcs(ffn_ln, gate_proj, gate_proj_scales)\n for mod in module.modules():\n if hasattr(mod, 'out_max'):\n delattr(mod, 'out_max')\n if hasattr(mod, 'out_min'):\n delattr(mod, 'out_min') "
},
{
"identifier": "smooth_module",
"path": "dgq/quant/smooth.py",
"snippet": "@torch.no_grad()\ndef smooth_module(module, alpha=0.5, group_size=-1, weight_smooth=False, attention_mask=None, position_ids=None):\n if weight_smooth:\n if isinstance(module, OPTDecoderLayer):\n attn_ln = module.self_attn_layer_norm\n qkv = [module.self_attn.q_proj,\n module.self_attn.k_proj, module.self_attn.v_proj]\n # smooth_ln_fcs_weight(attn_ln, qkv) ##opt66b very bad...\n smooth_fc_weight(module.self_attn.v_proj, module.self_attn.out_proj, group_size)\n ffn_ln = module.final_layer_norm\n fc1 = module.fc1\n smooth_ln_fcs_weight(ffn_ln, fc1)\n smooth_fc_weight(module.fc1, module.fc2, group_size)\n elif isinstance(module, BloomBlock):\n attn_ln = module.input_layernorm\n qkv = module.self_attention.query_key_value\n smooth_ln_fcs_weight(attn_ln, qkv)\n v_proj = module.self_attention.query_key_value\n o_proj = module.self_attention.dense\n # smooth_fc_weight(v_proj, o_proj,qkv=True) ##bloom3b bad\n ffn_ln = module.post_attention_layernorm\n fc1 = module.mlp.dense_h_to_4h\n smooth_ln_fcs_weight(ffn_ln, fc1)\n # smooth_fc_weight(module.mlp.dense_4h_to_h, module.mlp.dense_h_to_4h, group_size)\n elif isinstance(module, LlamaDecoderLayer):\n attn_ln = module.input_layernorm\n qkv = [module.self_attn.q_proj,\n module.self_attn.k_proj, module.self_attn.v_proj]\n smooth_ln_fcs_weight(attn_ln, qkv)\n smooth_fc_weight(module.self_attn.v_proj, module.self_attn.o_proj, group_size)\n ffn_ln = module.post_attention_layernorm\n gate_proj = [module.mlp.gate_proj,module.mlp.up_proj]\n smooth_ln_fcs_weight(ffn_ln, gate_proj)\n smooth_fc_weight(module.mlp.up_proj, module.mlp.down_proj, group_size)\n else:\n if isinstance(module, OPTDecoderLayer):\n attn_ln = module.self_attn_layer_norm\n qkv = [module.self_attn.q_proj,\n module.self_attn.k_proj, module.self_attn.v_proj]\n qkv_input_scales = attn_ln.out_absmax\n smooth_ln_fcs(attn_ln, qkv, qkv_input_scales, alpha)\n v_proj = module.self_attn.v_proj\n o_proj = module.self_attn.out_proj\n # smooth_ov(v_proj, o_proj, o_proj.inp_absmax)\n ffn_ln = module.final_layer_norm\n fc1 = module.fc1\n fc1_input_scales = ffn_ln.out_absmax\n smooth_ln_fcs(ffn_ln, fc1, fc1_input_scales, alpha)\n # fc2 = module.fc2\n # fc2.inp_bias = ((fc2.inp_absmax )/2 ).clamp(min=0.).to(torch.float16)\n elif isinstance(module, BloomBlock):\n attn_ln = module.input_layernorm\n qkv = module.self_attention.query_key_value\n qkv_input_scales = attn_ln.out_absmax\n smooth_ln_fcs(attn_ln, qkv, qkv_input_scales, alpha)\n v_proj = module.self_attention.query_key_value\n o_proj = module.self_attention.dense\n # smooth_ov(v_proj, o_proj, o_proj.inp_absmax,qkv=True)\n ffn_ln = module.post_attention_layernorm\n fc1 = module.mlp.dense_h_to_4h\n fc1_input_scales = ffn_ln.out_absmax\n smooth_ln_fcs(ffn_ln, fc1, fc1_input_scales, alpha)\n fc2 = module.mlp.dense_4h_to_h\n fc2.inp_bias = ((fc2.inp_absmax + 0.2)/2 - 0.2 ).clamp(min=0.).to(torch.float16)\n elif isinstance(module, LlamaDecoderLayer):\n attn_ln = module.input_layernorm\n qkv = [module.self_attn.q_proj,\n module.self_attn.k_proj, module.self_attn.v_proj]\n qkv_input_scales = attn_ln.out_absmax\n smooth_ln_fcs(attn_ln, qkv, qkv_input_scales, alpha)\n v_proj = module.self_attn.v_proj\n o_proj = module.self_attn.o_proj\n # smooth_ov(v_proj, o_proj, o_proj.inp_absmax)\n ffn_ln = module.post_attention_layernorm\n gate_proj = [module.mlp.gate_proj,module.mlp.up_proj]\n gate_proj_scales = ffn_ln.out_absmax\n smooth_ln_fcs(ffn_ln, gate_proj, gate_proj_scales, alpha)\n smooth_llama_mlp(module.mlp.gate_proj,module.mlp.up_proj,module.mlp.down_proj,module.mlp.down_proj.inp_absmax)\n for mod in module.modules():\n if hasattr(mod, 'inp_absmax'):\n delattr(mod, 'inp_absmax')\n if hasattr(mod, 'out_absmax'):\n delattr(mod, 'out_absmax')\n if hasattr(mod, 'inp_absmean'):\n delattr(mod, 'inp_absmean')\n if hasattr(mod, 'out_absmean'):\n delattr(mod, 'out_absmean')"
},
{
"identifier": "QuantLinear",
"path": "dgq/quant/quant_linear.py",
"snippet": "class QuantLinear(nn.Module):\n def __init__(self, in_features, out_features, bias, qconfig):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.actq = qconfig[\"act_quant\"] is not None\n self.wtq = qconfig[\"wt_quant\"] is not None\n self.qconfig = qconfig\n if self.actq:\n self.abits = self.qconfig[\"act_quant\"][\"bits\"]\n self.register_buffer(\"amax\", torch.zeros(1, dtype=torch.bfloat16))\n if self.wtq:\n self.groupsize = self.qconfig[\"wt_quant\"][\"groupsize\"] if self.qconfig[\"wt_quant\"][\"groupsize\"] != -1 else self.in_features\n self.wbits = self.qconfig[\"wt_quant\"][\"bits\"]\n self.register_buffer('qweight', torch.zeros((in_features // 32 * self.wbits, out_features), dtype=torch.int32))\n self.register_buffer('wscales', torch.zeros((math.ceil(in_features / self.groupsize), out_features), dtype=torch.bfloat16))\n self.register_buffer('wzeros', torch.zeros((math.ceil(in_features / self.groupsize), out_features // 32 * self.wbits), dtype=torch.int32))\n if qconfig[\"wt_quant\"][\"w4w8\"]:\n self.register_buffer('wscales8', torch.zeros((out_features, ), dtype=torch.float16))\n if bias:\n self.register_buffer('bias', torch.zeros((out_features), dtype=torch.float16))\n else:\n self.bias = None\n\n def unpack(self, tensor):\n if self.wbits < 8:\n fintweight = python_decompress(tensor).view(-1, self.groupsize)\n else:\n fintweight = tensor.view(-1, self.groupsize)\n if hasattr(self, \"wscales8\"):\n qscales = (self.wscales.view(self.out_features, -1) * self.wscales8).view(-1, 1).to(tensor.device)\n else:\n qscales = self.wscales.to(tensor.device)\n fweight = (fintweight - self.wzeros.to(tensor.device)) * qscales\n\n return fweight.view(self.out_features, self.in_features).bfloat16()\n\n def pack(self, scales, zeros):\n scales = scales.contiguous().bfloat16().reshape(-1, 1)\n self.wscales = scales\n zeros = zeros.contiguous().bfloat16().reshape(-1, 1)\n self.wzeros = zeros\n scale_zeros = zeros.reshape(-1,1) * scales.reshape(-1,1)\n intweight = torch.round((self.weight.data.view(-1, self.groupsize)) / self.wscales + self.wzeros).to(torch.int)\n delattr(self, \"weight\")\n if self.wbits < 8:\n self.qweight = python_compress(intweight)\n else:\n self.qweight = intweight\n def prepare_actfun(self):\n if self.qconfig[\"act_quant\"] is None:\n return\n if self.qconfig[\"act_quant\"][\"method\"] == \"static\":\n self.act_quant = partial(quantize_activation_static,absmax=self.amax)\n # self.act_quant = quantize_activation_static\n elif self.qconfig[\"act_quant\"][\"method\"] == \"per_tensor\":\n self.act_quant = quantize_activation_per_tensor_absmax\n elif self.qconfig[\"act_quant\"][\"method\"] == \"per_token\":\n self.act_quant = quantize_activation_per_token_absmax\n else:\n raise NotImplemented\n def packW4W8(self, scales, zeros, scales8):\n scales = scales.contiguous().char().reshape(-1, 1)\n self.wscales = scales\n zeros = zeros.contiguous().char().reshape(-1, 1)\n self.wzeros = zeros\n scales8 = scales8.contiguous().bfloat16().reshape(-1, 1)\n self.wscales8 = scales8.reshape(-1, 1)\n qscales = (self.wscales.view(self.out_features, -1) * self.wscales8).view(-1, 1)\n intweight = torch.round((self.weight.data.view(-1, self.groupsize).float()) / qscales.reshape(-1, 1) + self.wzeros).to(torch.int)\n self.qweight = python_compress(intweight)\n delattr(self, \"weight\")\n\n def setquant(self, actq, wtq):\n self.actq = actq\n self.wtq = wtq\n\n def forward(self, x):\n out_shape = x.shape[:-1] + (self.out_features, )\n if self.actq:\n x = self.act_quant(x)\n if self.wtq:\n weight = self.unpack(self.qweight)\n else:\n weight = self.weight\n out = x.reshape(-1, x.shape[-1]) @ weight.t()\n out = out + self.bias if self.bias is not None else out \n return out.reshape(out_shape).to(x.dtype)"
},
{
"identifier": "QuantizerHelper",
"path": "dgq/quant/quantizer_helper.py",
"snippet": "class QuantizerHelper:\n\n def __init__(self, layer, observe=False):\n self.layer = layer\n self.dev = self.layer.weight.device\n W = layer.weight.data.clone()\n if isinstance(self.layer, nn.Conv2d):\n W = W.flatten(1)\n if isinstance(self.layer, transformers.Conv1D):\n W = W.t()\n self.rows = W.shape[0]\n self.columns = W.shape[1]\n self.H = torch.zeros((self.columns, self.columns), device=self.dev)\n self.nsamples = 0\n self.quantizer = Quantizer()\n self.observe = observe\n self.inp_absmax = None\n\n def add_batch(self, inp, out):\n # Hessian H = 2 X XT + λ I\n hidden_dim = inp.shape[-1]\n comming_max = torch.max(inp.view(-1, hidden_dim).abs().detach(), dim=0)[0].float().cpu()\n # print(comming_max)\n if self.inp_absmax is None:\n self.inp_absmax = comming_max\n self.inp_absmax2 = comming_max\n self.cnt = 1\n else:\n self.inp_absmax = self.inp_absmax.min( comming_max)\n self.inp_absmax2 = (self.inp_absmax+comming_max*self.cnt)/(self.cnt+1)\n self.cnt += 1\n self.layer.inp_absmax = self.inp_absmax #+ (self.inp_absmax2-self.inp_absmax)*0.2\n\n if len(inp.shape) == 2:\n inp = inp.unsqueeze(0)\n tmp = inp.shape[0]\n self.inp1 = inp.squeeze()\n self.out1 = None\n if isinstance(self.layer, nn.Linear) or isinstance(self.layer, transformers.Conv1D) or isinstance(self.layer, QuantLinear):\n if len(inp.shape) == 3:\n inp = inp.reshape((-1, inp.shape[-1]))\n inp = inp.t()\n if isinstance(self.layer, nn.Conv2d):\n unfold = nn.Unfold(self.layer.kernel_size, dilation=self.layer.dilation, padding=self.layer.padding, stride=self.layer.stride)\n inp = unfold(inp)\n inp = inp.permute([1, 0, 2])\n inp = inp.flatten(1)\n self.H *= self.nsamples / (self.nsamples + tmp)\n self.nsamples += tmp\n inp = math.sqrt(2 / self.nsamples) * inp.float()\n self.H += inp.matmul(inp.t())\n\n def print_loss(self, name, q_weight, weight_error, timecost):\n table = Texttable()\n name += ' ' * (16 - len(name))\n\n table.header(['name', 'weight_error', 'fp_inp_SNR', 'q_inp_SNR', 'time'])\n\n # assign weight\n self.layer.weight.data = q_weight.reshape(self.layer.weight.shape).to(self.layer.weight.data.dtype)\n\n if self.out1 is not None:\n # quantize input to int8\n quantizer = Quantizer()\n quantizer.configure(8, perchannel=False, sym=True, mse=False)\n quantizer.find_params(self.inp1)\n q_in = quantizer.quantize(self.inp1).type(torch.float16)\n q_out = self.layer(q_in)\n\n # get kinds of SNR\n q_SNR = torch_snr_error(q_out, self.out1).item()\n fp_SNR = torch_snr_error(self.layer(self.inp1), self.out1).item()\n else:\n q_SNR = '-'\n fp_SNR = '-'\n\n table.add_row([name, weight_error, fp_SNR, q_SNR, timecost])\n print(table.draw().split('\\n')[-2])\n\n\n def naivequant(self, groupsize=-1):\n self.method = 'naive'\n self.layer.to(self.dev)\n\n W = self.layer.weight.data.clone()\n org_shape = W.shape\n W = W.float()\n if groupsize >0:\n org_shape = W.shape\n tmp_W = W.view(-1, groupsize)\n self.quantizer.find_params(tmp_W, True)\n self.layer.weight.data = self.quantizer.quantize(tmp_W).to(self.layer.weight.data.dtype).view(org_shape)\n else:\n self.quantizer.find_params(W, weight=True)\n self.layer.weight.data = self.quantizer.quantize(W).to(self.layer.weight.data.dtype)\n\n scale = self.quantizer.scale.view(org_shape[0], -1)\n zero = self.quantizer.zero.view(org_shape[0], -1)\n return scale, zero\n\n def searchquant(self, groupsize=-1, W4W8=False):\n self.method = 'search'\n W = self.layer.weight.data.clone()\n org_shape = W.shape\n\n device, dtype = W.device, W.dtype\n if groupsize > 0:\n g_idx = [i // groupsize for i in range(org_shape[-1])]\n g_idx = torch.tensor(g_idx, dtype=torch.int32, device=device)\n else:\n g_idx = torch.tensor([])\n \n groupsize = groupsize if groupsize > 0 else org_shape[-1]\n\n grid = 20\n best_scale = torch.ones([W.shape[1] // groupsize, W.shape[0]],dtype=torch.bfloat16, device=device)\n best_zero = torch.ones([W.shape[1] // groupsize, W.shape[0]],dtype=torch.bfloat16, device=device)\n assert org_shape[1] % groupsize == 0\n assert self.quantizer.sym == False\n for nn in range(org_shape[1] // groupsize):\n W_t = W[:,nn*groupsize:(nn+1)*groupsize]\n inp_t = self.inp1[:,nn*groupsize:(nn+1)*groupsize]\n org_out = inp_t@(W_t.t())\n W_max = W_t.amax(dim=-1, keepdim=True)\n W_min = W_t.amin(dim=-1, keepdim=True)\n best = torch.full([W.shape[0]], float('inf'), device=device, dtype=dtype)\n for i in range(grid):\n ratio = 1.02 - (i+1) / grid*0.22\n W_t = W_t.clamp(W_min*ratio, W_max*ratio)\n qscale = (W_max*ratio - W_min*ratio) / self.quantizer.maxq\n qzero = torch.round(- W_min*ratio / qscale)\n qtensor = torch.clamp(torch.round(W_t/qscale)+qzero,0,self.quantizer.maxq)\n W_qt = qscale*(qtensor-qzero)\n out = inp_t@(W_qt.t())\n mse = (org_out - out).abs().pow(2).mean(dim=0).view(-1)\n best_idx = (best > mse).view(-1)\n best[best_idx] = mse[best_idx]\n best_scale[nn][best_idx] = qscale[best_idx].view(-1)\n best_zero[nn][best_idx] = qzero[best_idx].view(-1) \n\n best_scale = best_scale.t()\n best_zero = best_zero.t()\n self.quantizer.scale = best_scale.reshape(-1, 1)\n self.quantizer.zero = best_zero.reshape(-1, 1)\n self.layer.weight.data = self.quantizer.quantize(W.view(-1, groupsize)).to(self.layer.weight.data.dtype).view(org_shape)\n best_scale8 = torch.zeros((W.shape[0],), dtype=torch.bfloat16, device=device)\n if W4W8:\n grid = 80\n # best_scale = torch.ones([W.shape[0], 1], dtype=torch.float16, device=device)\n org_out = [email protected]()\n best = torch.full([W.shape[0]], float('inf'), device=device, dtype=dtype)\n for i in range(grid):\n ratio = 1.02 - (i+1) / grid*0.82\n # W_max = torch.abs(W_t).max() * ratio\n # \n W_max = W.abs().amax(dim=-1, keepdim=True) * ratio\n qscale_8 = W_max / (2 ** (8-1) - 1)\n qscale = torch.round(best_scale / qscale_8).clamp(min=1.)\n # qtensor = torch.clamp(torch.round(W_t/qscale)+qzero,0,self.quantizer.maxq)\n int_max = torch.floor(127 / qscale)\n # upper = torch.minimum(15, best_zero+int_max)\n # lower = torch.maximum(0, best_zero-int_max)\n inp_t = self.inp1\n upper = torch.clamp(best_zero+int_max, max=15.).reshape(-1, 1)\n lower = torch.clamp(best_zero-int_max, min=0.).reshape(-1, 1)\n qscale_q = (qscale * qscale_8).reshape(-1, 1)\n W_t = W.clamp(-W_max, W_max).view(-1, groupsize)\n q_tensor = torch.clamp(torch.round(W_t/qscale_q) + best_zero.reshape(-1, 1), lower, upper) \n W_qt = qscale_q*(q_tensor-best_zero.reshape(-1, 1))\n W_qt = W_qt.view(org_shape)\n out = inp_t@(W_qt.t())\n mse = (org_out - out).abs().pow(2).mean(dim=0).view(-1)\n best_idx = (best > mse).view(-1)\n best[best_idx] = mse[best_idx]\n best_scale8[best_idx] = qscale_8[best_idx].view(-1) \n W = W.clamp(best_scale8.view(-1, 1) * -127, best_scale8.view(-1, 1) * 127)\n best_scale = torch.round(best_scale / best_scale8.view(-1, 1)).clamp(min=1.)\n int_max = torch.floor(127 / best_scale)\n best_scale_q = (best_scale * best_scale8.view(-1, 1)).reshape(-1, 1)\n upper = torch.clamp(best_zero+int_max, max=15.).reshape(-1, 1)\n lower = torch.clamp(best_zero-int_max, min=0.).reshape(-1, 1)\n q_tensor = torch.clamp(torch.round(W.view(-1, groupsize)/ best_scale_q) + best_zero.reshape(-1, 1), lower, upper)\n self.layer.weight.data = best_scale_q*(q_tensor-best_zero.reshape(-1, 1))\n self.inp1 = None\n return best_scale, best_zero, best_scale8\n\n def gptqquant(self, blocksize=128, percdamp=.01, groupsize=-1, actorder=False, name=''):\n self.layer.to(self.dev)\n\n W = self.layer.weight.data.clone()\n if isinstance(self.layer, nn.Conv2d):\n W = W.flatten(1)\n if isinstance(self.layer, transformers.Conv1D):\n W = W.t()\n W = W.float()\n\n tick = time.time()\n\n if not self.quantizer.ready():\n self.quantizer.find_params(W, weight=True)\n\n H = self.H\n if not self.observe:\n del self.H\n dead = torch.diag(H) == 0\n H[dead, dead] = 1\n W[:, dead] = 0\n\n if actorder:\n perm = torch.argsort(torch.diag(H), descending=True)\n W = W[:, perm]\n H = H[perm][:, perm]\n\n Losses = torch.zeros_like(W)\n Q = torch.zeros_like(W)\n\n damp = percdamp * torch.mean(torch.diag(H))\n diag = torch.arange(self.columns, device=self.dev)\n H[diag, diag] += damp\n H = torch.linalg.cholesky(H)\n H = torch.cholesky_inverse(H)\n H = torch.linalg.cholesky(H, upper=True)\n Hinv = H\n\n g_idx = []\n scale = []\n zero = []\n now_idx = 1\n\n for i1 in range(0, self.columns, blocksize):\n i2 = min(i1 + blocksize, self.columns)\n count = i2 - i1\n\n W1 = W[:, i1:i2].clone()\n Q1 = torch.zeros_like(W1)\n Err1 = torch.zeros_like(W1)\n Losses1 = torch.zeros_like(W1)\n Hinv1 = Hinv[i1:i2, i1:i2]\n\n for i in range(count):\n w = W1[:, i]\n d = Hinv1[i, i]\n\n if groupsize != -1:\n if (i1 + i) % groupsize == 0:\n self.quantizer.find_params(W[:, (i1 + i):(i1 + i + groupsize)], weight=True)\n\n if ((i1 + i) // groupsize) - now_idx == -1:\n scale.append(self.quantizer.scale)\n zero.append(self.quantizer.zero)\n now_idx += 1\n\n q = self.quantizer.quantize(w.unsqueeze(1)).flatten()\n Q1[:, i] = q\n Losses1[:, i] = (w - q)**2 / d**2\n\n err1 = (w - q) / d\n W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0))\n Err1[:, i] = err1\n\n Q[:, i1:i2] = Q1\n Losses[:, i1:i2] = Losses1 / 2\n\n W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:])\n\n torch.cuda.synchronize()\n error = torch.sum(Losses).item()\n\n groupsize = groupsize if groupsize != -1 else self.columns\n g_idx = [i // groupsize for i in range(self.columns)]\n g_idx = torch.tensor(g_idx, dtype=torch.int32, device=Q.device)\n if actorder:\n invperm = torch.argsort(perm)\n Q = Q[:, invperm]\n g_idx = g_idx[invperm]\n\n if isinstance(self.layer, transformers.Conv1D):\n Q = Q.t()\n\n self.print_loss(name=name, q_weight=Q, weight_error=error, timecost=(time.time() - tick))\n\n if scale == []:\n scale.append(self.quantizer.scale)\n zero.append(self.quantizer.zero)\n scale = torch.cat(scale, dim=1)\n zero = torch.cat(zero, dim=1)\n return scale, zero, g_idx, error\n\n def free(self):\n self.inp1 = None\n self.out1 = None\n self.H = None\n self.Losses = None\n self.Trace = None\n torch.cuda.empty_cache()"
},
{
"identifier": "kvquant",
"path": "dgq/quant/kvquanter.py",
"snippet": "def kvquant(layer):\n for mod in layer.modules():\n if isinstance(mod, ATTENTION_CLASS):\n mod.q_quant.scale = 2 * mod.q_quant.qkv_absmax.max() / mod.q_quant.maxq\n mod.q_quant.zero = torch.full_like(mod.q_quant.scale, (mod.q_quant.maxq + 1) / 2)\n mod.k_quant.scale = 2 * mod.k_quant.qkv_absmax.max() / mod.k_quant.maxq\n mod.k_quant.zero = torch.full_like(mod.k_quant.scale, (mod.k_quant.maxq + 1) / 2)\n mod.v_quant.scale = 2 * mod.v_quant.qkv_absmax.max() / mod.v_quant.maxq\n mod.v_quant.zero = torch.full_like(mod.v_quant.scale, (mod.v_quant.maxq + 1) / 2)\n delattr(mod.q_quant, \"qkv_absmax\")\n delattr(mod.k_quant, \"qkv_absmax\")\n delattr(mod.v_quant, \"qkv_absmax\")"
},
{
"identifier": "find_layers",
"path": "dgq/utils/modelutils.py",
"snippet": "def find_layers(module, layers=[nn.Conv2d, nn.Linear], name=''):\n if type(module) in layers:\n return {name: module}\n res = {}\n for name1, child in module.named_children():\n res.update(find_layers(child, layers=layers, name=name + '.' + name1 if name != '' else name1))\n return res"
},
{
"identifier": "move_embed",
"path": "dgq/utils/modelutils.py",
"snippet": "def move_embed(model, device):\n if isinstance(model, LlamaForCausalLM):\n model.model.embed_tokens = model.model.embed_tokens.to(device)\n elif isinstance(model, OPTForCausalLM):\n model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.to(device)\n model.model.decoder.embed_positions = model.model.decoder.embed_positions.to(device)\n elif isinstance(model, BloomForCausalLM):\n model.transformer.word_embeddings = model.transformer.word_embeddings.to(device)\n model.transformer.word_embeddings_layernorm = model.transformer.word_embeddings_layernorm.to(device)\n elif \"mpt\" in str(model.__class__).lower():\n model.transformer.wte = model.transformer.wte.to(device)\n model.transformer.emb_drop = model.transformer.emb_drop.to(device)\n elif \"falcon\" in str(model.__class__).lower():\n model.transformer.word_embeddings = model.transformer.word_embeddings.to(device)\n else:\n raise NotImplementedError(type(model))"
},
{
"identifier": "get_blocks",
"path": "dgq/utils/modelutils.py",
"snippet": "def get_blocks(model):\n if isinstance(model, LlamaForCausalLM):\n layers = model.model.layers\n elif isinstance(model, OPTForCausalLM):\n layers = model.model.decoder.layers\n elif isinstance(model, BloomForCausalLM):\n layers = model.transformer.h\n elif \"mpt\" in str(model.__class__).lower():\n layers = model.transformer.blocks\n elif \"falcon\" in str(model.__class__).lower():\n layers = model.transformer.h\n else:\n raise NotImplementedError(type(model))\n return layers"
}
] | import torch
import torch.nn as nn
from dgq.quant.smooth_hooker import prepare_hook
from dgq.quant.smooth import mean_bias, smooth_module
from dgq.quant.quant_linear import QuantLinear
from dgq.quant.quantizer_helper import QuantizerHelper
from dgq.quant.kvquanter import kvquant
from dgq.utils.modelutils import find_layers, move_embed, get_blocks | 8,274 |
__all__ = ["quant_sequential"]
def set_quant_state(module, actq, wtq):
for mod in module.modules():
if isinstance(mod, QuantLinear):
mod.setquant(actq, wtq)
@torch.no_grad()
def PTQ(model, enc,
qconfig,
nsamples=128, seqlen=2048):
dev = "cuda:0"
layers = get_blocks(model)
layer_kwargs = {}
cache={'i': 0}
layers[0] = layers[0].cuda()
move_embed(model, dev)
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros((nsamples, seqlen, model.config.hidden_size), dtype=dtype, device=dev)
outs = torch.zeros_like(inps)
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
layer_kwargs.update(kwargs)
raise ValueError
layers[0] = Catcher(layers[0])
for batch in enc:
try:
model(batch[0].to(dev))
except ValueError:
pass
del enc
layers[0] = layers[0].module # restore
# inps = inps[0]
layers[0] = layers[0].cpu()
move_embed(model, "cpu")
for i in range(len(layers)):
print(i)
layer = layers[i].to(dev)
full = find_layers(layer, [QuantLinear])
sequential = [list(full.keys())]
set_quant_state(layer, False, False)
|
__all__ = ["quant_sequential"]
def set_quant_state(module, actq, wtq):
for mod in module.modules():
if isinstance(mod, QuantLinear):
mod.setquant(actq, wtq)
@torch.no_grad()
def PTQ(model, enc,
qconfig,
nsamples=128, seqlen=2048):
dev = "cuda:0"
layers = get_blocks(model)
layer_kwargs = {}
cache={'i': 0}
layers[0] = layers[0].cuda()
move_embed(model, dev)
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros((nsamples, seqlen, model.config.hidden_size), dtype=dtype, device=dev)
outs = torch.zeros_like(inps)
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
layer_kwargs.update(kwargs)
raise ValueError
layers[0] = Catcher(layers[0])
for batch in enc:
try:
model(batch[0].to(dev))
except ValueError:
pass
del enc
layers[0] = layers[0].module # restore
# inps = inps[0]
layers[0] = layers[0].cpu()
move_embed(model, "cpu")
for i in range(len(layers)):
print(i)
layer = layers[i].to(dev)
full = find_layers(layer, [QuantLinear])
sequential = [list(full.keys())]
set_quant_state(layer, False, False) | prepare_hook(layer, inps, qconfig, layer_kwargs) | 0 | 2023-11-01 13:45:16+00:00 | 12k |
m4rkw/monzo-utils | monzo_utils/lib/monzo_sync.py | [
{
"identifier": "Config",
"path": "monzo_utils/lib/config.py",
"snippet": "class Config(metaclass=Singleton):\n\n def __init__(self, config=None, config_path=None):\n if config_path is None:\n homedir = pwd.getpwuid(os.getuid()).pw_dir\n config_path = f\"{homedir}/.monzo\"\n\n if not os.path.exists(config_path):\n os.mkdir(config_path, 0o755)\n\n self.config_file = f\"{config_path}/config.yaml\"\n\n if config:\n self.config = config\n else:\n if not os.path.exists(self.config_file):\n sys.stderr.write(f\"config file not found: {self.config_file}, run setup first.\\n\")\n sys.exit(1)\n\n self.config = yaml.safe_load(open(self.config_file).read())\n\n\n def __getattr__(self, name):\n if name in self.config:\n return self.config[name]\n\n return object.__getattribute__(self, name)\n\n\n def set(self, key, value):\n self.config[key] = value\n\n\n @property\n def keys(self):\n return self.config.keys()\n\n\n def save(self):\n with open(self.config_file, 'w') as f:\n f.write(yaml.dump(self.config))"
},
{
"identifier": "DB",
"path": "monzo_utils/lib/db.py",
"snippet": "class DB(metaclass=Singleton):\n\n def __init__(self, db_config=None, config_path=None):\n if db_config:\n self.config = db_config\n else:\n self.config = Config(None, config_path).db\n\n self.driver = getattr(importlib.import_module(f\"monzo_utils.lib.db_driver.{self.config['driver']}\"), self.config['driver'])(self.config)\n\n self.columns = {}\n\n\n def __getattr__(self, name):\n match = re.match('^find_([\\w]+)_by_(.*?)$', name)\n\n if match:\n table = match.group(1)\n\n if table[0:4] == 'all_':\n table = table[4:]\n find_all = True\n else:\n find_all = False\n\n fields = match.group(2).split('_and_')\n\n def find_object_by_fields(*args, **kwargs):\n sql = \"select * from `\" + table + \"` where (\"\n\n sql_args = []\n\n for i in range(0, len(fields)):\n if i >0:\n sql += \" and \"\n\n if type(args[i]) == list:\n sql += \"(\"\n for j in range(0, len(args[i])):\n if j >0:\n sql += \" or \"\n\n if 'search' in kwargs and type(kwargs['search']) == list and fields[i] in kwargs['search']:\n sql += f\"`{fields[i]}` like %s\"\n sql_args.append('%' + args[i][j] + '%')\n else:\n sql += f\"`{fields[i]}` = %s\"\n sql_args.append(args[i][j])\n\n sql += \")\"\n else:\n if 'search' in kwargs and type(kwargs['search']) == list and fields[i] in kwargs['search']:\n sql += \"`\" + fields[i] + \"` like %s\"\n sql_args.append('%' + args[i] + '%')\n else:\n sql += \"`\" + fields[i] + \"` = %s\"\n sql_args.append(args[i])\n\n sql += \")\"\n\n if 'where' in kwargs:\n for where_clause in kwargs['where']:\n sql += f\" and {where_clause['clause']}\"\n\n if 'params' in where_clause:\n sql_args += where_clause['params']\n\n if 'orderby' in kwargs:\n sql += f\" order by {kwargs['orderby']}\"\n\n if 'orderdir' in kwargs:\n sql += f\" {kwargs['orderdir']}\"\n\n if 'limit' in kwargs:\n sql += f\" limit {kwargs['limit']}\"\n\n if find_all:\n return self.query(sql, sql_args)\n else:\n return self.one(sql, sql_args)\n\n return find_object_by_fields\n else:\n print(\"DB class method missing: %s\" % (name))\n sys.exit(1)\n\n\n def json_params(self, params):\n json_params = []\n\n for param in params:\n if type(param) == datetime.date:\n json_params.append(param.strftime('%Y-%M-%d'))\n elif type(param) == datetime.datetime:\n json_params.append(param.strftime('%Y-%M-%d %H:%M:%S'))\n else:\n json_params.append(param)\n\n return json_params\n\n\n def query(self, sql, params=[]):\n if 'DEBUG' in os.environ and os.environ['DEBUG'] == '1':\n print(\"SQL: %s\" % (sql))\n print(\"PARAMS: %s\" % (json.dumps(self.json_params(params),indent=4)))\n\n result = self.driver.query(sql, params)\n\n if type(result) == list:\n rows = []\n\n for row in result:\n rows.append(self.fix_dates(row))\n\n result = rows\n\n return result\n\n\n def fix_dates(self, row):\n fixed_row = {}\n\n for key in row:\n if type(row[key]) == str:\n m = re.match('^([\\d]{4})-([\\d]{2})-([\\d]{2})$', row[key])\n\n if m:\n fixed_row[key] = datetime.date(int(m.group(1)), int(m.group(2)), int(m.group(3)))\n continue\n\n m = re.match('^([\\d]{4})-([\\d]{2})-([\\d]{2}) ([\\d]{2}):([\\d]{2}):([\\d]{2})$', row[key])\n\n if m:\n fixed_row[key] = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)), int(m.group(6)))\n continue\n\n fixed_row[key] = row[key]\n\n return fixed_row\n\n\n def one(self, sql, params=[]):\n rows = self.query(sql, params)\n\n if len(rows) >0:\n return rows[0]\n\n return False\n\n\n def find(self, table):\n self.query_table = table\n self.sel = []\n self.whereClauses = []\n self.whereParams = []\n self.andWhereClauses = []\n self._orderBy = None\n self._orderDir = None\n self._join = []\n self._leftJoin = []\n self._groupBy = None\n\n return self\n\n\n def select(self, select):\n self.sel.append(select)\n\n return self\n\n\n def where(self, where, whereParams):\n self.whereClauses.append(where)\n self.whereParams += whereParams\n\n return self\n\n\n def andWhere(self, where, whereParams):\n self.andWhereClauses.append(where)\n self.whereParams += whereParams\n\n return self\n\n\n def orderBy(self, field, direction='asc'):\n self._orderBy = field\n self._orderDir = direction\n\n return self\n\n\n def join(self, join_table, join_left_col, join_right_col=None):\n if join_right_col:\n self._join.append({\n 'table': join_table,\n 'join_left_col': join_left_col,\n 'join_right_col': join_right_col\n })\n else:\n self._join.append({\n 'table': join_table,\n 'clause': join_left_col\n })\n\n return self\n\n\n def leftJoin(self, join_table, join_left_col, join_right_col, where=None):\n self._leftJoin.append({\n 'table': join_table,\n 'join_left_col': join_left_col,\n 'join_right_col': join_right_col,\n 'where': where\n })\n\n return self\n\n\n def orWhere(self, whereClause, whereParams=[]):\n self.whereType = 'or'\n\n return self.where(whereClause, whereParams)\n\n\n def groupBy(self, groupBy):\n self._groupBy = groupBy\n\n return self\n\n\n def prepare(self):\n if self.sel == []:\n select = '*'\n else:\n select = ''\n\n for i in range(0, len(self.sel)):\n if i >0:\n select += ','\n select += f\"{self.sel[i]}\"\n\n sql = \"select \" + select + \" from `\" + self.query_table + \"`\"\n\n for join in self._join:\n sql += \" join `\" + join['table'] + \"` on \"\n\n if 'clause' in join:\n sql += join['clause']\n else:\n sql += join['join_left_col'] + \" = \" + join['join_right_col']\n\n for join in self._leftJoin:\n sql += \" left join `\" + join['table'] + \"` on \"\n\n if 'clause' in join:\n sql += join['clause']\n else:\n sql += join['join_left_col'] + \" = \" + join['join_right_col']\n\n if len(self.whereClauses) >0:\n sql += \" where (\"\n\n for i in range(0, len(self.whereClauses)):\n if i >0:\n sql += \" or \"\n sql += self.whereClauses[i]\n\n sql += \")\"\n\n for i in range(0, len(self.andWhereClauses)):\n sql += \" and (\" + self.andWhereClauses[i] + \") \"\n\n if self._groupBy:\n sql += \" group by \" + self._groupBy\n\n if self._orderBy:\n sql += \" order by \"\n order_by_fields = self._orderBy.split(',')\n\n for i in range(0, len(order_by_fields)):\n if i >0:\n sql += \",\"\n sql += f\" `{order_by_fields[i].strip()}`\"\n\n if self._orderDir:\n sql += \" \" + self._orderDir\n\n return sql\n\n\n def getone(self):\n sql = self.prepare() + \" limit 1\"\n\n return self.one(sql, self.whereParams)\n\n\n def getall(self):\n rows = []\n\n for row in self.query(self.prepare(), self.whereParams):\n rows.append(row)\n\n return rows\n\n\n def get_raw_query(self):\n sql = self.prepare()\n\n raw_sql = ''\n\n n = 0\n skip = False\n\n for i in range(0, len(sql)):\n if skip:\n skip = False\n continue\n\n if sql[i:i+2] == '%s':\n raw_sql += \"'\" + self.whereParams[n] + \"'\"\n n += 1\n skip = True\n else:\n raw_sql += sql[i]\n\n return raw_sql\n\n\n def update(self, table, _id, data):\n if table not in self.columns:\n self.columns[table] = self.driver.get_columns(table, exclude=['id'])\n\n sql = f\"update `{table}` set\"\n params = []\n\n for i in range(0, len(self.columns[table])):\n if i >0:\n sql += \", \"\n\n sql += f\" `{self.columns[table][i]}` = %s\"\n params.append(data[self.columns[table][i]] if self.columns[table][i] in data else None)\n\n sql += f\" where `id` = %s\"\n params.append(_id)\n\n self.query(sql, params)\n\n\n def create(self, table, data):\n if table not in self.columns:\n self.columns[table] = self.driver.get_columns(table, exclude=['id'])\n\n sql = f\"insert into `{table}` (\"\n params = []\n\n for i in range(0, len(self.columns[table])):\n if i >0:\n sql += \",\"\n\n sql += f\"`{self.columns[table][i]}`\"\n params.append(data[self.columns[table][i]] if self.columns[table][i] in data else None)\n\n sql += f\") VALUES (\"\n\n for i in range(0, len(self.columns[table])):\n if i >0:\n sql += \",\"\n sql += \"%s\"\n\n sql += \")\"\n\n return self.query(sql, params)"
},
{
"identifier": "Log",
"path": "monzo_utils/lib/log.py",
"snippet": "class Log(metaclass=Singleton):\n\n def __init__(self):\n homedir = pwd.getpwuid(os.getuid()).pw_dir\n self.logfile = f\"{homedir}/.monzo/logfile\"\n\n\n def info(self, message):\n self.log(inspect.currentframe().f_code.co_name, message)\n\n\n def warning(self, message):\n self.log(inspect.currentframe().f_code.co_name, message)\n\n\n def error(self, message):\n self.log(inspect.currentframe().f_code.co_name, message)\n\n\n def fatal(self, message):\n self.log(inspect.currentframe().f_code.co_name, message)\n\n\n def log(self, level, message):\n log_line = \"%s: %s - %s\\n\" % (\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n level.upper(),\n message\n )\n\n with open(self.logfile, 'a+') as f:\n f.write(log_line)\n\n if sys.stdin.isatty():\n if level == 'info':\n sys.stdout.write(log_line)\n sys.stdout.flush()\n else:\n sys.stderr.write(log_line)\n sys.stderr.flush()\n\n self.rotate()\n\n\n def rotate(self):\n if os.stat(self.logfile).st_size >= MAX_SIZE_MB * 1024 * 1024:\n for i in reversed(list(range(1, MAX_FILES))):\n filename = '%s.%d' % (self.logfile, i)\n next_filename = '%s.%d' % (self.logfile, i+1)\n\n if i+1 == MAX_FILES:\n if os.path.exists(filename):\n os.remove(filename)\n else:\n if os.path.exists(filename):\n os.rename(filename, next_filename)\n\n if os.path.exists(self.logfile):\n os.rename(self.logfile, '%s.1' % (self.logfile))"
},
{
"identifier": "MonzoAPI",
"path": "monzo_utils/lib/monzo_api.py",
"snippet": "class MonzoAPI:\n\n def __init__(self):\n homedir = pwd.getpwuid(os.getuid()).pw_dir\n monzo_dir = f\"{homedir}/.monzo\"\n self.token_file = f\"{monzo_dir}/tokens\"\n\n self.load_tokens()\n\n self.client = self.get_client()\n\n\n def load_tokens(self):\n if os.path.exists(self.token_file):\n data = json.loads(open(self.token_file).read())\n\n self.access_token = data['access_token']\n self.access_token_expiry = data['expiry']\n self.refresh_token = data['refresh_token']\n else:\n self.authenticate()\n\n \n def authenticate(self):\n client = Authentication(\n client_id=Config().client_id,\n client_secret=Config().client_secret,\n redirect_url=Config().redirect_url\n )\n\n if not sys.stdout.isatty():\n if 'email' in Config().keys:\n os.system(\"echo '%s'| mail -s 'Monzo auth required' '%s'\" % (client.authentication_url, Config().email))\n Log().error('Authentication required, unable to sync.')\n sys.exit(1)\n\n print(\"\\nAuthentication required, check email or visit:\\n\")\n print(client.authentication_url)\n\n if os.path.exists(Config().oauth_token_file):\n os.remove(Config().oauth_token_file)\n\n while not os.path.exists(Config().oauth_token_file):\n time.sleep(1)\n\n data = json.loads(open(Config().oauth_token_file).read().rstrip())\n\n os.remove(Config().oauth_token_file)\n\n try:\n client.authenticate(authorization_token=data['token'], state_token=data['state'])\n except MonzoAuthenticationError:\n Log().error('State code does not match')\n exit(1)\n except MonzoServerError:\n Log().error('Monzo Server Error')\n exit(1)\n\n self.access_token = client.access_token\n self.access_token_expiry = client.access_token_expiry\n self.refresh_token = client.refresh_token\n\n self.save_tokens()\n\n self.client = self.get_client()\n\n print(\"\\nwaiting for authorisation...\")\n\n while 1:\n time.sleep(1)\n\n try:\n self.accounts()\n break\n except MonzoPermissionsError:\n pass\n\n\n def save_tokens(self):\n with open(self.token_file,'w') as f:\n f.write(json.dumps({\n 'access_token': self.access_token,\n 'expiry': self.access_token_expiry,\n 'refresh_token': self.refresh_token\n }))\n\n\n def get_client(self):\n return Authentication(\n client_id=Config().client_id,\n client_secret=Config().client_secret,\n redirect_url=Config().redirect_url,\n access_token=self.access_token,\n access_token_expiry=self.access_token_expiry,\n refresh_token=self.refresh_token\n )\n\n\n def account(self, account_id):\n return monzo.endpoints.account.Account.fetch(self.client, account_id=account_id)\n\n\n def accounts(self, first=True):\n for i in range(0, 3):\n try:\n accounts = monzo.endpoints.account.Account.fetch(self.client)\n\n self.update_tokens()\n\n return accounts\n\n except MonzoHTTPError:\n if first:\n if 'NO_AUTH' in os.environ:\n raise Exception(\"token expired\")\n\n self.authenticate()\n\n return self.accounts(False)\n\n Log().error('auth failed')\n sys.exit(1)\n except MonzoAuthenticationError:\n if first:\n self.authenticate()\n\n return self.accounts(False)\n\n Log().error(\"auth failed\")\n sys.exit(1)\n except MonzoServerError:\n Log().error(\"server error\")\n\n if i == 2:\n sys.exit(1)\n\n time.sleep(5)\n\n except TimeoutError:\n Log().error(\"timeout\")\n\n if i == 2:\n sys.exit(1)\n\n time.sleep(5)\n\n raise Exception(\"failed to retrieve accounts after 3 attempts\")\n\n\n def update_tokens(self):\n if self.access_token == self.client.access_token and \\\n self.access_token_expiry == self.client.access_token_expiry and \\\n self.refresh_token == self.client.refresh_token:\n return\n\n self.access_token = self.client.access_token\n self.access_token_expiry = self.client.access_token_expiry\n self.refresh_token = self.client.refresh_token\n\n self.save_tokens()\n\n\n def transactions(self, account_id, days=3):\n error = None\n\n now = datetime.datetime.utcnow()\n since = now - datetime.timedelta(days=days)\n\n for i in range(0, 3):\n try:\n return monzo.endpoints.transaction.Transaction.fetch(self.client, account_id=account_id, expand=['merchant'], since=since)\n except MonzoPermissionsError as e:\n raise e\n except Exception as e:\n error = str(e)\n\n if i != 2:\n time.sleep(5)\n else:\n raise e\n\n Log().error(\"failed to retrieve transactions: %s\" % (error))\n sys.exit(1)\n\n\n def pots(self, account_id, first=True):\n try:\n pots = monzo.endpoints.pot.Pot.fetch(self.client, account_id=account_id)\n except MonzoHTTPError:\n if first:\n if 'NO_AUTH' in os.environ:\n raise Exception(\"token expired\")\n\n self.authenticate()\n self.client = self.get_client()\n\n return self.pots(account_id, False)\n\n Log().error(\"auth failed\")\n sys.exit(1)\n except MonzoAuthenticationError:\n if first:\n self.authenticate()\n self.client = self.get_client()\n\n return self.pots(account_id, False)\n\n Log().error(\"auth failed\")\n sys.exit(1)\n except TimeoutError:\n Log().error(\"timeout\")\n sys.exit(1)\n\n return pots\n\n\n def withdraw_credit(self, account_id, pot, credit):\n self.load_tokens()\n\n self.client = self.get_client()\n\n pot = monzo.endpoints.pot.Pot.fetch_single(self.client, account_id=account_id, pot_id=pot.pot_id)\n\n dedupe_code = '%s_%s' % (\n pot.pot_id,\n datetime.datetime.now().strftime('%Y%m%d%H')\n )\n\n amount = round(credit * 100)\n\n for i in range(0, 3):\n try:\n monzo.endpoints.pot.Pot.withdraw(self.client, pot=pot, account_id=account_id, amount=amount, dedupe_id=dedupe_code)\n return True\n except Exception as e:\n print(\"failed to withdraw pot money: %s\" % (str(e)))\n\n if i <2:\n time.sleep(3)\n\n return False\n\n\n def deposit_to_pot(self, account_id, pot, shortfall):\n self.load_tokens()\n\n self.client = self.get_client()\n\n pot = monzo.endpoints.pot.Pot.fetch_single(self.client, account_id=account_id, pot_id=pot.pot_id)\n\n dedupe_code = '%s_%s' % (\n pot.pot_id,\n datetime.datetime.now().strftime('%Y%m%d%H')\n )\n\n amount = round(shortfall * 100)\n\n for i in range(0, 3):\n try:\n monzo.endpoints.pot.Pot.deposit(self.client, pot=pot, account_id=account_id, amount=amount, dedupe_id=dedupe_code)\n return True\n except Exception as e:\n print(\"failed to deposit pot money: %s\" % (str(e)))\n\n if i <2:\n time.sleep(3)\n\n return False"
},
{
"identifier": "Provider",
"path": "monzo_utils/model/provider.py",
"snippet": "class Provider(BaseModel):\n\n def accounts(self, orderby='name', orderdir='asc', limit=None, order=None):\n accounts = super().related('Account', 'provider_id', self.id, orderby, orderdir, limit)\n\n # return accounts in a specific order\n if order:\n sorted_accounts = []\n\n for account_name in order:\n for account in accounts:\n if account.name == account_name:\n sorted_accounts.append(account)\n break\n\n return sorted_accounts\n\n return accounts"
},
{
"identifier": "Account",
"path": "monzo_utils/model/account.py",
"snippet": "class Account(BaseModel):\n\n DISPLAY_KEYS = ['name','sortcode','account_no','balance','available']\n\n\n def __init__(self, attrs={}):\n super().__init__(attrs)\n\n\n def transactions(self, orderby='created_at', orderdir='asc', limit=None):\n return super().related('Transaction', 'account_id', self.id, orderby, orderdir, limit)\n\n\n def pots(self, orderby='name', orderdir='asc', limit=None):\n return super().related('Pot', 'account_id', self.id, orderby, orderdir, limit, deleted=0)\n\n\n @property\n def __dict__(self):\n attrs = {'attrs': self.attrs}\n\n for pot in self.pots(orderby='name'):\n attrs['attrs'][pot.name] = pot.balance\n\n return attrs\n\n\n @property\n def keys(self):\n keys = []\n\n for key in self.DISPLAY_KEYS.copy():\n if '-t' in sys.argv and ((key == 'sortcode' and self.sortcode is None) or \\\n (key == 'account_no' and self.account_no is None)):\n continue\n\n keys.append(key)\n\n for pot in self.pots(orderby='name'):\n if pot.name not in keys:\n keys.append(pot.name)\n\n return keys\n\n\n def last_salary_transaction(self, description, payment_day, salary_minimum):\n return DB().find_transaction_by_account_id_and_declined_and_description(\n self.id,\n 0,\n description,\n orderby='created_at',\n orderdir='desc',\n limit=1,\n search=['description'],\n where=[{\n 'clause': 'money_in >= %s',\n 'params': [salary_minimum]\n }]\n )"
},
{
"identifier": "Merchant",
"path": "monzo_utils/model/merchant.py",
"snippet": "class Merchant(BaseModel):\n\n pass"
},
{
"identifier": "MerchantAddress",
"path": "monzo_utils/model/merchant_address.py",
"snippet": "class MerchantAddress(BaseModel):\n\n pass"
},
{
"identifier": "Pot",
"path": "monzo_utils/model/pot.py",
"snippet": "class Pot(BaseModel):\n pass"
},
{
"identifier": "Transaction",
"path": "monzo_utils/model/transaction.py",
"snippet": "class Transaction(BaseModel):\n\n DISPLAY_KEYS = ['date','type','money_in','money_out','pending','description']\n RELATIONSHIPS = {\n 'account': ['`transaction`.account_id', 'account.id'],\n 'transaction_metadata': ['`transaction`.id', 'transaction_metadata.transaction_id'],\n 'pot': ['`transaction`.pot_id', 'pot.id']\n }"
},
{
"identifier": "Counterparty",
"path": "monzo_utils/model/counterparty.py",
"snippet": "class Counterparty(BaseModel):\n\n pass"
},
{
"identifier": "TransactionMetadata",
"path": "monzo_utils/model/transaction_metadata.py",
"snippet": "class TransactionMetadata(BaseModel):\n\n pass"
}
] | import os
import sys
import time
import json
import yaml
import re
import datetime
import pwd
from pathlib import Path
from monzo_utils.lib.config import Config
from monzo_utils.lib.db import DB
from monzo_utils.lib.log import Log
from monzo_utils.lib.monzo_api import MonzoAPI
from monzo_utils.model.provider import Provider
from monzo_utils.model.account import Account
from monzo_utils.model.merchant import Merchant
from monzo_utils.model.merchant_address import MerchantAddress
from monzo_utils.model.pot import Pot
from monzo_utils.model.transaction import Transaction
from monzo_utils.model.counterparty import Counterparty
from monzo_utils.model.transaction_metadata import TransactionMetadata
from monzo.exceptions import MonzoAuthenticationError, MonzoServerError, MonzoHTTPError, MonzoPermissionsError | 8,563 |
def get_or_create_counterparty(self, mo_counterparty):
counterparty = Counterparty().find_by_user_id(mo_counterparty['user_id'])
if not counterparty:
Log().info(f"creating counterparty: {mo_counterparty['name']} ({mo_counterparty['user_id']})")
counterparty = Counterparty()
counterparty.update(mo_counterparty)
counterparty.save()
return counterparty
def get_provider(self):
provider = Provider().find_by_name(PROVIDER)
if not provider:
Log().info(f"creating provider: {PROVIDER}")
provider = Provider()
provider.name = PROVIDER
provider.save()
return provider
def sync(self, days=3):
mo_accounts = self.api.accounts()
accounts = []
for mo_account in mo_accounts:
if 'monzoflexbackingloan' in mo_account.description:
continue
if mo_account.account_id not in Config().accounts:
continue
account = self.get_or_create_account(mo_account, Config().accounts[mo_account.account_id])
Log().info(f"syncing account: {account.name}")
Log().info(f"getting pots for account: {account.name}")
mo_pots = self.api.pots(account_id=account.account_id)
pot_lookup = {}
for mo_pot in mo_pots:
pot = Pot().find_by_account_id_and_pot_id(account.id, mo_pot.pot_id)
if not pot:
Log().info(f"creating pot: {mo_pot.name}")
pot = Pot()
pot.account_id = account.id
pot.pot_id = mo_pot.pot_id
pot.name = mo_pot.name
pot.balance = mo_pot.balance / 100
pot.deleted = mo_pot.deleted
pot.save()
pot_lookup[pot.pot_id] = pot
try:
Log().info(f'syncing transactions for account: {account.name}')
mo_transactions = self.api.transactions(account.account_id, days=days)
except MonzoPermissionsError as e:
Log().error(f"permissions error: {str(e)}")
if sys.stdin.isatty():
Log().info("Need to refresh permissions in the app, Settings -> Privacy & Security -> Manage Apps")
else:
os.system("echo 'Need to refresh permissions in the app, Settings -> Privacy & Security -> Manage Apps'| mail -s 'Monzo permission refresh required' '%s'" % (Config().email))
sys.exit(1)
except MonzoServerError as e:
Log().error(f"server error: {str(e)}")
continue
seen = {}
total = 0
pot_account_ids = {}
for mo_transaction in mo_transactions:
transaction = self.add_transaction(account, mo_transaction, pot_account_ids)
seen[transaction.id] = 1
total += 1
seen = {}
for pot_account_id in pot_account_ids:
if pot_lookup[pot_account_ids[pot_account_id]].deleted:
continue
Log().info(f"syncing transactions for pot: {pot_lookup[pot_account_ids[pot_account_id]].name}")
mo_pot_transactions = self.api.transactions(pot_account_id, days=days)
for mo_pot_transaction in mo_pot_transactions:
transaction = self.add_transaction(account, mo_pot_transaction, pot_account_ids, pot_lookup[pot_account_ids[pot_account_id]].id)
seen[transaction.id] = 1
total += 1
Log().info(f"account {account.name} synced {total} transactions")
if 'touch_file' in Config().keys:
Path(Config().touch_file).touch()
def get_or_create_account(self, mo_account, account_config):
| #!/usr/bin/env python3
PROVIDER = 'Monzo'
class MonzoSync:
def __init__(self, no_init=False):
homedir = pwd.getpwuid(os.getuid()).pw_dir
self.monzo_dir = f"{homedir}/.monzo"
if not os.path.exists(self.monzo_dir):
os.mkdir(self.monzo_dir, 0o755)
self.config_file = f"{self.monzo_dir}/config.yaml"
self.token_file = f"{self.monzo_dir}/tokens"
if no_init:
return
Config()
self.api = MonzoAPI()
self.db = DB()
self.provider = self.get_provider()
def setup(self):
print("\n========================")
print("Monzo Utils Setup Wizard")
print("========================\n")
print("Requirements:\n")
print("1) You must have created an OAuth client here: https://developers.monzo.com/apps/new")
print(" Note: confidentiality must be set to Confidential\n")
print("2) The database (MySQL/MariaDB or SQLite3) must be created and ready (see README.md)\n")
print("3) The machine we are running on must be reachable on a known port from the internet.")
print(" The webserver must be configured with the CGI script to capture the oauth tokens.")
print(" This is only required during setup for the initial oauth authentication flow.")
print(" Once this is complete and the tokens are stored this can be removed.\n")
self.prompt_continue()
if os.path.exists(self.config_file):
sys.stdout.write(f"\nWARNING! Config file already exists at: {self.config_file}\n\n")
sys.stdout.write("If we continue this will be erased.\n\n")
self.prompt_continue()
sys.stdout.write("\n")
sys.stdout.write("Which database do you want to use?\n\n")
sys.stdout.write("1. MySQL/MariaDB (recommended)\n")
sys.stdout.write("2. SQLite3\n\n")
while 1:
db_backend = self.prompt_input('DB choice')
if db_backend in ['1','2']:
break
if db_backend == '1':
mysql_host = self.prompt_input('MySQL host', '127.0.0.1')
mysql_port = self.prompt_input('MySQL port', '3306', False, 'int')
mysql_db = self.prompt_input('MySQL database', 'monzo')
mysql_user = self.prompt_input('MySQL username', 'monzo')
mysql_password = self.prompt_input('MySQL password', 'monzo')
db = {
'driver': 'mysql',
'host': mysql_host,
'port': mysql_port,
'user': mysql_user,
'password': mysql_password,
'database': mysql_db
}
else:
db = {
'driver': 'sqlite',
'path': f"{self.monzo_dir}/data.db"
}
self.test_db_access(db)
sys.stdout.write("\n")
client_id = self.prompt_input('Monzo Client ID')
client_secret = self.prompt_input('Monzo Client Secret')
redirect_url = self.prompt_input('Monzo Client redirect URL')
sys.stdout.write("Enter the path where the CGI script will store the token file:\n")
token_path = self.prompt_input('Token path', '/var/www/monzo/token')
sys.stdout.write("\nIf the auth token expires or stops working the sync script can send\n")
sys.stdout.write("an email to notify you. Enter this email below or leave blank if not required.\n")
email = self.prompt_input('Email', None, True)
Config({
'oauth_token_file': token_path,
'db': db,
'client_id': client_id,
'client_secret': client_secret,
'redirect_url': redirect_url,
'email': email
})
Config().save()
self.__init__()
self.scan_accounts()
sys.stdout.write("Performing initial transaction sync ...\n\n")
sys.stdout.flush()
self.sync(days=89)
sys.stdout.write("\nSetup complete!\n\n")
def scan_accounts(self):
sys.stdout.write("\nFinding accounts...\n\n")
accounts = self.api.accounts()
found_accounts = []
for account in accounts:
if account.balance is None:
continue
if 'accounts' in Config().keys and account.account_id in Config().accounts:
continue
if 'Joint account between' in account.description:
account_type = 'Joint Current Account'
else:
account_type = account.account_type()
print(f" id: {account.account_id}")
print(f" balance: £{account.balance.balance/100:.2f}")
print(f"description: {account.description}")
print(f" type: {account_type}")
sys.stdout.write("\n")
resp = self.prompt_continue('Sync this account? [y/N] ', True)
if resp == 'n':
continue
account_name = self.prompt_input('name for this account')
if 'accounts' not in Config().keys:
Config().set('accounts', {})
Config().accounts[account.account_id] = {
'name': account_name
}
if account_type == 'Flex':
Config().accounts[account.account_id]['credit_limit'] = self.prompt_input('credit limit', None, False, 'int')
else:
Config().accounts[account.account_id]['sortcode'] = self.prompt_input('sort code')
Config().accounts[account.account_id]['account_no'] = self.prompt_input('account no')
sys.stdout.write("\n")
Config().save()
def prompt_continue(self, prompt='Continue? [y/N] ', boolean=False):
while 1:
sys.stdout.write(prompt)
sys.stdout.flush()
resp = sys.stdin.readline().rstrip().lower()
if resp == 'n':
if boolean:
return False
print("\nStopping at user request.\n")
sys.exit(0)
if resp == 'y':
break
return True
def prompt_input(self, prompt, default=None, none_allowed=False, validation=None):
while 1:
if default is None:
sys.stdout.write(f"Enter {prompt}: ")
else:
sys.stdout.write(f"Enter {prompt} [{default}]: ")
sys.stdout.flush()
resp = sys.stdin.readline().rstrip()
if len(resp) == 0:
if default is None and none_allowed is False:
continue
resp = default
if validation == 'int' and resp is not None:
try:
resp = int(resp)
except:
sys.stderr.write("\nerror: value must be an integer\n\n")
sys.stderr.flush()
continue
return resp
def test_db_access(self, db_config):
try:
db = DB(db_config)
except Exception as e:
Log().error(f"failed to initialise the database: {str(e)}")
sys.exit(1)
try:
if db_config['driver'] == 'mysql':
resp = db.query("show tables")
else:
resp = db.query("pragma table_info(`provider`)")
except Exception as e:
Log().error(f"Failed to connect to the database: {str(e)}")
sys.exit(1)
def get_or_create_merchant(self, mo_merchant):
if 'metadata' in mo_merchant and 'website' in mo_merchant['metadata']:
website = mo_merchant['metadata']['website']
else:
website = None
merchant_id = mo_merchant['id']
mo_merchant['merchant_id'] = mo_merchant['id']
mo_merchant.pop('id')
mo_address = mo_merchant.pop('address')
merchant = Merchant().find_by_merchant_id(merchant_id)
if not merchant:
Log().info(f"creating merchant: {mo_merchant['name']} ({mo_merchant['merchant_id']})")
merchant = Merchant()
merchant.update(mo_merchant)
merchant.save()
mo_address['merchant_id'] = merchant.id
address = MerchantAddress().find_by_merchant_id(merchant.id)
if not address:
address = MerchantAddress()
address.update(mo_address)
address.save()
return merchant
def sanitise(self, string):
return re.sub('[\s\t]+', ' ', string)
def add_transaction(self, account, mo_transaction, pot_account_ids, pot_id=None):
counterparty = None
if mo_transaction.counterparty:
counterparty = self.get_or_create_counterparty(mo_transaction.counterparty)
if counterparty.name != mo_transaction.description:
description = self.sanitise('%s %s' % (counterparty.name, mo_transaction.description))
else:
description = mo_transaction.description
else:
description = self.sanitise(mo_transaction.description)
amount = mo_transaction.amount
if amount >0:
money_in = amount / 100
money_out = None
verb = 'from'
_type = 'credit'
else:
money_in = None
money_out = 0 - (amount / 100)
verb = 'to'
_type = 'debit'
if pot_id:
where = [{
'clause': 'pot_id = %s',
'params': [pot_id]
}]
else:
where = [{
'clause': 'pot_id is null'
}]
transaction = Transaction().find_by_account_id_and_transaction_id(
account.id,
mo_transaction.transaction_id,
where=where
)
date = mo_transaction.created.strftime('%Y-%m-%d')
if not transaction:
Log().info(f"creating transaction: {account.name} {date} -{money_in} +{money_out} {description}")
transaction = Transaction()
if pot_id is None and mo_transaction.metadata and 'pot_account_id' in mo_transaction.metadata and mo_transaction.metadata['pot_account_id'] not in pot_account_ids:
pot_account_ids[mo_transaction.metadata['pot_account_id']] = mo_transaction.metadata['pot_id']
if mo_transaction.merchant:
merchant = self.get_or_create_merchant(mo_transaction.merchant)
else:
merchant = None
transaction.update({
'account_id': account.id,
'transaction_id': mo_transaction.transaction_id,
'date': date,
'type': _type,
'description': description,
'ref': mo_transaction.description,
'money_in': money_in,
'money_out': money_out,
'pending': mo_transaction.amount_is_pending,
'created_at': mo_transaction.created,
'updated_at': mo_transaction.updated,
'currency': mo_transaction.currency,
'local_currency': mo_transaction.local_currency,
'local_amount': mo_transaction.local_amount,
'merchant_id': merchant.id if merchant else None,
'notes': mo_transaction.notes,
'originator': mo_transaction.originator,
'scheme': mo_transaction.scheme,
'settled': mo_transaction.settled,
'declined': 1 if len(mo_transaction.decline_reason) >0 else 0,
'decline_reason': mo_transaction.decline_reason,
'counterparty_id': counterparty.id if counterparty else None,
'pot_id': pot_id
})
transaction.save()
metadata = {}
if type(mo_transaction.atm_fees_detailed) == dict:
for key in mo_transaction.atm_fees_detailed:
metadata['atm_fees_detailed_%s' % (key)] = mo_transaction.atm_fees_detailed[key]
if type(mo_transaction.categories) == dict:
for key in mo_transaction.categories:
metadata['categories_%s' % (key)] = mo_transaction.categories[key]
if type(mo_transaction.fees) == dict:
for key in mo_transaction.fees:
metadata['fees_%s' % (key)] = mo_transaction.fees[key]
if type(mo_transaction.metadata) == dict:
for key in mo_transaction.metadata:
metadata['metadata_%s' % (key)] = mo_transaction.metadata[key]
for key in metadata:
transaction_metadata = TransactionMetadata().find_by_transaction_id_and_key(transaction.id, key)
if not transaction_metadata:
transaction_metadata = TransactionMetadata()
transaction_metadata.transaction_id = transaction.id
transaction_metadata.key = key
transaction_metadata.value = metadata[key]
transaction_metadata.save()
for transaction_metadata in TransactionMetadata().find_all_by_transaction_id(transaction.id):
if transaction_metadata.key not in metadata:
transaction_metadata.delete()
return transaction
def get_or_create_counterparty(self, mo_counterparty):
counterparty = Counterparty().find_by_user_id(mo_counterparty['user_id'])
if not counterparty:
Log().info(f"creating counterparty: {mo_counterparty['name']} ({mo_counterparty['user_id']})")
counterparty = Counterparty()
counterparty.update(mo_counterparty)
counterparty.save()
return counterparty
def get_provider(self):
provider = Provider().find_by_name(PROVIDER)
if not provider:
Log().info(f"creating provider: {PROVIDER}")
provider = Provider()
provider.name = PROVIDER
provider.save()
return provider
def sync(self, days=3):
mo_accounts = self.api.accounts()
accounts = []
for mo_account in mo_accounts:
if 'monzoflexbackingloan' in mo_account.description:
continue
if mo_account.account_id not in Config().accounts:
continue
account = self.get_or_create_account(mo_account, Config().accounts[mo_account.account_id])
Log().info(f"syncing account: {account.name}")
Log().info(f"getting pots for account: {account.name}")
mo_pots = self.api.pots(account_id=account.account_id)
pot_lookup = {}
for mo_pot in mo_pots:
pot = Pot().find_by_account_id_and_pot_id(account.id, mo_pot.pot_id)
if not pot:
Log().info(f"creating pot: {mo_pot.name}")
pot = Pot()
pot.account_id = account.id
pot.pot_id = mo_pot.pot_id
pot.name = mo_pot.name
pot.balance = mo_pot.balance / 100
pot.deleted = mo_pot.deleted
pot.save()
pot_lookup[pot.pot_id] = pot
try:
Log().info(f'syncing transactions for account: {account.name}')
mo_transactions = self.api.transactions(account.account_id, days=days)
except MonzoPermissionsError as e:
Log().error(f"permissions error: {str(e)}")
if sys.stdin.isatty():
Log().info("Need to refresh permissions in the app, Settings -> Privacy & Security -> Manage Apps")
else:
os.system("echo 'Need to refresh permissions in the app, Settings -> Privacy & Security -> Manage Apps'| mail -s 'Monzo permission refresh required' '%s'" % (Config().email))
sys.exit(1)
except MonzoServerError as e:
Log().error(f"server error: {str(e)}")
continue
seen = {}
total = 0
pot_account_ids = {}
for mo_transaction in mo_transactions:
transaction = self.add_transaction(account, mo_transaction, pot_account_ids)
seen[transaction.id] = 1
total += 1
seen = {}
for pot_account_id in pot_account_ids:
if pot_lookup[pot_account_ids[pot_account_id]].deleted:
continue
Log().info(f"syncing transactions for pot: {pot_lookup[pot_account_ids[pot_account_id]].name}")
mo_pot_transactions = self.api.transactions(pot_account_id, days=days)
for mo_pot_transaction in mo_pot_transactions:
transaction = self.add_transaction(account, mo_pot_transaction, pot_account_ids, pot_lookup[pot_account_ids[pot_account_id]].id)
seen[transaction.id] = 1
total += 1
Log().info(f"account {account.name} synced {total} transactions")
if 'touch_file' in Config().keys:
Path(Config().touch_file).touch()
def get_or_create_account(self, mo_account, account_config): | account = Account().find_by_provider_id_and_account_id(self.provider.id, mo_account.account_id) | 5 | 2023-11-05 12:48:18+00:00 | 12k |
WolfgangFahl/dcm | tests/test_api.py | [
{
"identifier": "CompetenceCmd",
"path": "dcm/dcm_cmd.py",
"snippet": "class CompetenceCmd(WebserverCmd):\n \"\"\"\n Command line for diagrams server\n \"\"\"\n\n def getArgParser(self, description: str, version_msg) -> ArgumentParser:\n \"\"\"\n override the default argparser call\n \"\"\"\n parser = super().getArgParser(description, version_msg)\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"show verbose output [default: %(default)s]\",\n )\n parser.add_argument(\n \"-rp\",\n \"--root_path\",\n default=DynamicCompetenceMap.examples_path(),\n help=\"path to example dcm definition files [default: %(default)s]\",\n )\n return parser"
},
{
"identifier": "CompetenceTree",
"path": "dcm/dcm_core.py",
"snippet": "class CompetenceTree(CompetenceElement, YamlAble[\"CompetenceTree\"]):\n \"\"\"\n Represents the entire structure of competencies, including various aspects and levels.\n\n Attributes:\n competence_aspects (List[CompetenceAspect]): A list of CompetenceAspect objects.\n competence_levels (List[CompetenceLevel]): A list of CompetenceLevel objects representing the different levels in the competence hierarchy.\n element_names (Dict[str, str]): A dictionary holding the names for tree, aspects, facets, and levels. The key is the type (\"tree\", \"aspect\", \"facet\", \"level\").\n \"\"\"\n\n lookup_url: Optional[str] = None\n aspects: List[CompetenceAspect] = field(default_factory=list)\n levels: List[CompetenceLevel] = field(default_factory=list)\n element_names: Dict[str, str] = field(default_factory=dict)\n\n def __post_init__(self):\n \"\"\"\n initalize the path variables of my hierarchy\n \"\"\"\n super().__post_init__()\n self.path = self.id\n # Loop through each competence aspect and set their paths and parent references\n for aspect in self.aspects:\n aspect.competence_tree = self\n aspect.path = f\"{self.id}/{aspect.id}\"\n for area in aspect.areas:\n area.competence_tree = self\n area.aspect = aspect\n area.path = f\"{self.id}/{aspect.id}/{area.id}\"\n for facet in area.facets:\n facet.competence_tree = self\n facet.area = area\n facet.path = f\"{self.id}/{aspect.id}/{area.id}/{facet.id}\"\n\n @classmethod\n def required_keys(cls) -> Tuple:\n keys = {\"name\", \"id\", \"url\", \"description\", \"element_names\"}\n return keys\n\n def lookup_by_path(\n self, path: str, lenient: bool = True\n ) -> Optional[CompetenceElement]:\n \"\"\"\n Look up and return a competence element (tree,aspect of facet)\n based on the given path.\n\n The path is expected to be in the format \"tree_id/aspect_id/facet_id\".\n This method parses the path and retrieves the corresponding competence aspect or facet.\n\n Args:\n path (str): The path in the format \"tree_id/aspect_id/facet_id\".\n\n lenient(bool): if not lenient raise Exceptions for invalid paths and ids\n Returns:\n Optional[CompetenceElement]: The competence aspect or facet corresponding to the given path.\n \"\"\"\n\n def handle_error(msg):\n if not lenient:\n raise ValueError(msg)\n\n parts = path.split(\"/\")\n if len(parts) < 1:\n return None\n\n tree_id = parts[0]\n if tree_id != self.id:\n handle_error(f\"invalid tree_id for lookup {tree_id}\")\n return None\n if len(parts) == 1:\n return self\n if len(parts) > 1:\n aspect_id = parts[1]\n # Retrieve the aspect\n aspect = next((aspect for aspect in self.aspects if aspect.id==aspect_id), None)\n if aspect:\n if len(parts) == 2:\n return aspect\n if len(parts) > 2:\n area_id = parts[2]\n area = next((area for area in aspect.areas if area.id == area_id), None)\n if area:\n if len(parts) == 3:\n return area\n if len(parts) > 3:\n facet_id = parts[3]\n facet = next(\n (facet for facet in area.facets if facet.id == facet_id), None\n )\n if facet:\n return facet\n handle_error(f\"invalid path for lookup {path}\")\n return None\n\n def to_pretty_json(self):\n \"\"\"\n Converts the CompetenceTree object to a pretty JSON string, handling null values.\n \"\"\"\n json_str = self.to_json()\n json_dict = json.loads(json_str)\n\n def remove_none_values(data):\n \"\"\"\n Recursively removes keys with None values from a dictionary, list, or nested structure.\n \"\"\"\n if isinstance(data, dict):\n return {\n k: remove_none_values(v) for k, v in data.items() if v is not None\n }\n elif isinstance(data, list):\n return [remove_none_values(item) for item in data]\n return data\n\n none_free_dict = remove_none_values(json_dict)\n null_free_json_str = json.dumps(none_free_dict, indent=2)\n return null_free_json_str\n\n def add_legend(self, svg: SVG) -> None:\n \"\"\"\n Add a legend to the SVG explaining the color codes for levels and aspects.\n Args:\n svg (SVG): The SVG object to which the legend will be added.\n \"\"\"\n # Starting x position for the legends, starting 10 pixels from the left edge\n x_start = 10\n # y position for the legends, starting 20 pixels from the bottom edge\n y = svg.config.total_height - svg.config.legend_height + 20\n # Width and height of each legend color box\n box_width, box_height = 30, 20\n # Padding between legend items and between the color box and the text\n padding = 5\n\n # Add the competence level legend\n level_items = [(level.color_code, level.name) for level in self.levels]\n svg.add_legend_column(\n level_items,\n self.element_names.get(\"level\", \"Level\"),\n x_start,\n y,\n box_width,\n box_height,\n )\n\n # Calculate the x position for the aspect legend based on the width of the level legend\n x_aspect_start = (\n x_start\n + box_width\n + padding\n + max(svg.get_text_width(level.name) for level in self.levels)\n + padding\n )\n\n # Add the competence aspect legend\n aspect_items = [(aspect.color_code, aspect.name) for aspect in self.aspects]\n svg.add_legend_column(\n aspect_items,\n self.element_names.get(\"aspect\", \"Aspect\"),\n x_aspect_start,\n y,\n box_width,\n box_height,\n )"
},
{
"identifier": "DynamicCompetenceMap",
"path": "dcm/dcm_core.py",
"snippet": "class DynamicCompetenceMap:\n \"\"\"\n a visualization of a competence map\n \"\"\"\n\n def __init__(self, competence_tree: CompetenceTree):\n \"\"\"\n constructor\n \"\"\"\n self.competence_tree = competence_tree\n self.svg = None\n\n @property\n def main_id(self):\n main_id = self.competence_tree.id\n return main_id\n\n @classmethod\n def examples_path(cls) -> str:\n # the root directory (default: examples)\n path = os.path.join(os.path.dirname(__file__), \"../dcm_examples\")\n path = os.path.abspath(path)\n return path\n\n @classmethod\n def get_example_dcm_definitions(\n cls,\n markup: str = \"json\",\n required_keys: Optional[Tuple] = None,\n as_text: bool = True,\n ) -> dict:\n \"\"\"\n Retrieve example Dynamic Competence Map (DCM) definitions from files in the specified markup format (either JSON or YAML).\n\n Args:\n markup (str): The markup format of the input files. Defaults to 'json'. Supported values are 'json' and 'yaml'.\n required_keys (Optional[Tuple]): A tuple of keys required to validate the data. If not provided, all keys will be considered valid.\n as_text (bool): If True, returns the file content as text; if False, returns parsed data. Defaults to True.\n\n Returns:\n dict: A dictionary where each key is the prefix of the file name and the value is the file content as text or parsed data, depending on the value of 'as_text'.\n\n Raises:\n Exception: If there's an error in reading or parsing the file, or if the file does not meet the required validation criteria.\n \"\"\"\n example_dcm_defs = {}\n file_ext = f\".{markup}\"\n examples_path = cls.examples_path()\n for dirpath, _dirnames, filenames in os.walk(examples_path):\n for filename in filenames:\n if filename.endswith(file_ext):\n filepath = os.path.join(dirpath, filename)\n with open(filepath, \"r\") as definition_file:\n file_prefix = filename.replace(file_ext, \"\")\n definition_text = definition_file.read()\n try:\n definition_data = cls.parse_markup(definition_text, markup)\n if cls.is_valid_definition(definition_data, required_keys):\n if as_text:\n example_dcm_defs[file_prefix] = definition_text\n else:\n example_dcm_defs[file_prefix] = definition_data\n except Exception as ex:\n cls.handle_markup_issue(\n filename, definition_text, ex, markup\n )\n return example_dcm_defs\n\n @classmethod\n def parse_markup(cls, text: str, markup: str) -> Union[dict, list]:\n \"\"\"\n Parse the given text as JSON or YAML based on the specified markup type.\n\n Args:\n text (str): The string content to be parsed.\n markup (str): The type of markup to use for parsing. Supported values are 'json' and 'yaml'.\n\n Returns:\n Union[dict, list]: The parsed data, which can be either a dictionary or a list, depending on the content.\n\n Raises:\n ValueError: If an unsupported markup format is specified.\n \"\"\"\n if markup == \"json\":\n data=json.loads(text)\n return data\n elif markup == \"yaml\":\n data=yaml.safe_load(text)\n return data\n else:\n raise ValueError(f\"Unsupported markup format: {markup}\")\n\n @classmethod\n def handle_markup_issue(cls, name: str, definition_string: str, ex, markup: str):\n if isinstance(ex, JSONDecodeError):\n lines = definition_string.splitlines() # Split the string into lines\n err_line = lines[ex.lineno - 1] # JSONDecodeError gives 1-based lineno\n pointer = (\n \" \" * (ex.colno - 1) + \"^\"\n ) # Create a pointer string to indicate the error position\n error_message = (\n f\"{name}:JSON parsing error on line {ex.lineno} column {ex.colno}:\\n\"\n f\"{err_line}\\n\"\n f\"{pointer}\\n\"\n f\"{ex.msg}\"\n )\n raise ValueError(error_message) # Raise a new exception with this message\n else:\n error_message = f\"error in {name}: {str(ex)}\"\n raise ValueError(error_message)\n\n @classmethod\n def is_valid_definition(cls, definition_data, required_keys: Tuple):\n return all(key in definition_data for key in required_keys)\n\n @classmethod\n def get_examples(cls, content_class=CompetenceTree, markup: str = \"json\") -> dict:\n examples = {}\n for name, definition_string in cls.get_example_dcm_definitions(\n required_keys=content_class.required_keys(), markup=markup\n ).items():\n example = cls.from_definition_string(\n name, definition_string, content_class, markup=markup\n )\n # check the type of the example\n example_id = example.main_id\n examples[example_id] = example\n return examples\n\n @classmethod\n def from_definition_string(\n cls, name: str, definition_string: str, content_class, markup: str = \"json\"\n ) -> Any:\n \"\"\"\n Load a DynamicCompetenceMap or Learner instance from a definition string (either JSON or YAML).\n\n Args:\n name (str): A name identifier for the data source.\n definition_string (str): The string content of the definition.\n content_class (dataclass_json): The class which will be instantiated with the parsed data.\n markup (str): The markup format of the data. Defaults to 'json'. Supported values are 'json' and 'yaml'.\n\n Returns:\n DynamicCompetenceMap: An instance of DynamicCompetenceMap loaded with the parsed data.\n\n Raises:\n ValueError: If there's an error in parsing the data.\n \"\"\"\n try:\n data = cls.parse_markup(definition_string, markup)\n content = content_class.from_dict(data)\n if isinstance(content, CompetenceTree):\n return DynamicCompetenceMap(content)\n else:\n return content\n except Exception as ex:\n cls.handle_markup_issue(name, definition_string, ex, markup)"
},
{
"identifier": "DynamicCompentenceMapWebServer",
"path": "dcm/dcm_webserver.py",
"snippet": "class DynamicCompentenceMapWebServer(InputWebserver):\n \"\"\"\n server to supply Dynamic Competence Map Visualizations\n \"\"\"\n\n @classmethod\n def get_config(cls) -> WebserverConfig:\n \"\"\"\n get the configuration for this Webserver\n \"\"\"\n copy_right = \"(c)2023-2024 Wolfgang Fahl\"\n config = WebserverConfig(\n copy_right=copy_right, version=Version(), default_port=8885\n )\n return config\n\n def __init__(self):\n \"\"\"Constructs all the necessary attributes for the WebServer object.\"\"\"\n InputWebserver.__init__(\n self, config=DynamicCompentenceMapWebServer.get_config()\n )\n self.examples = DynamicCompetenceMap.get_examples(markup=\"yaml\")\n self.dcm = None\n self.assessment = None\n\n @app.post(\"/svg/\")\n async def render_svg(svg_render_request: SVGRenderRequest) -> HTMLResponse:\n \"\"\"\n render the given request\n \"\"\"\n return await self.render_svg(svg_render_request)\n\n @app.get(\"/description/{tree_id}/{aspect_id}/{area_id}/{facet_id}\")\n async def get_description_for_facet(\n tree_id: str, aspect_id: str = None, area_id:str=None, facet_id: str = None\n ) -> HTMLResponse:\n \"\"\"\n Endpoints to get the description of a competence facet\n \n\n Args:\n tree_id (str): ID of the tree\n area_id (str): ID of the area\n aspect_id (str, optional): ID of the aspect. Defaults to None.\n facet_id (str, optional): ID of the facet. Defaults to None.\n\n Returns:\n HTMLResponse: HTML content of the description.\n \"\"\"\n path=f\"{tree_id}/{aspect_id}/{area_id}/{facet_id}\"\n return await self.show_description(path)\n\n @app.get(\"/description/{tree_id}/{aspect_id}/{area_id}\")\n async def get_description_for_area(\n tree_id: str, aspect_id: str = None, area_id:str=None\n ) -> HTMLResponse:\n \"\"\"\n Endpoints to get the description of a \n competence area\n\n Args:\n tree_id (str): ID of the tree\n area_id (str): ID of the area\n aspect_id (str, optional): ID of the aspect. Defaults to None.\n \n Returns:\n HTMLResponse: HTML content of the description.\n \"\"\"\n path=f\"{tree_id}/{aspect_id}/{area_id}\"\n return await self.show_description(path)\n\n @app.get(\"/description/{tree_id}/{aspect_id}\")\n async def get_description_for_aspect(\n tree_id: str, aspect_id: str = None\n ) -> HTMLResponse:\n \"\"\"\n Endpoint to get the description of a competence aspect\n\n Args:\n tree_id (str): ID of the tree\n area_id (str): ID of the area\n \n Returns:\n HTMLResponse: HTML content of the description.\n \"\"\"\n path=f\"{tree_id}/{aspect_id}\"\n return await self.show_description(path)\n \n @app.get(\"/description/{tree_id}\")\n async def get_description_for_tree(\n tree_id: str\n ) -> HTMLResponse:\n \"\"\"\n Endpoint to get the description of a competence tree\n\n Args:\n tree_id (str): ID of the tree\n \n Returns:\n HTMLResponse: HTML content of the description.\n \"\"\"\n path=f\"{tree_id}\"\n return await self.show_description(path)\n\n async def show_description(\n self, path:str=None\n ) -> HTMLResponse:\n \"\"\"\n Show the HTML description of a specific \n competence element given by the path\n\n Args:\n path(str): the path identifying the element\n \n Returns:\n HTMLResponse: The response object containing the HTML-formatted description.\n\n Raises:\n HTTPException: If the example name provided does not exist in the examples collection.\n \"\"\"\n path_parts=path.split(\"/\")\n tree_id=path_parts[0]\n if tree_id in self.examples:\n example = self.examples[tree_id]\n element = example.competence_tree.lookup_by_path(path)\n if element:\n content = element.as_html()\n return HTMLResponse(content=content)\n else:\n content = (\n f\"No element found for {path} in {tree_id}\"\n )\n return HTMLResponse(content=content, status_code=404)\n else:\n msg = f\"unknown competence tree {tree_id}\"\n raise HTTPException(status_code=404, detail=msg)\n\n async def render_svg(self, svg_render_request: SVGRenderRequest) -> HTMLResponse:\n \"\"\"\n render the given request\n \"\"\"\n r = svg_render_request\n dcm = DynamicCompetenceMap.from_definition_string(\n r.name, r.definition, content_class=CompetenceTree, markup=r.markup\n )\n dcm_chart = DcmChart(dcm)\n svg_markup = dcm_chart.generate_svg_markup(\n config=r.config, with_java_script=True\n )\n response = HTMLResponse(content=svg_markup)\n return response\n\n def get_basename_without_extension(self, url) -> str:\n # Parse the URL to get the path component\n path = urlparse(url).path\n # Extract the base name (e.g., \"example.html\" from \"/dir/example.html\")\n basename = os.path.basename(path)\n # Split the base name and extension and return just the base name\n return os.path.splitext(basename)[0]\n\n async def render(self, _click_args=None):\n \"\"\"\n Renders the json content as an SVG visualization\n\n Args:\n click_args (object): The click event arguments.\n \"\"\"\n try:\n input_source = self.input\n if input_source:\n name = self.get_basename_without_extension(input_source)\n ui.notify(f\"rendering {name}\")\n definition = self.do_read_input(input_source)\n # Determine the format based on the file extension\n markup = \"json\" if input_source.endswith(\".json\") else \"yaml\"\n if \"learner_id\" in definition:\n content_class = Learner\n else:\n content_class = CompetenceTree\n item = DynamicCompetenceMap.from_definition_string(\n name, definition, content_class=content_class, markup=markup\n )\n if isinstance(item, DynamicCompetenceMap):\n self.render_dcm(item)\n else:\n self.learner = item\n self.assess(item)\n except BaseException as ex:\n self.handle_exception(ex, self.do_trace)\n\n def render_dcm(self, dcm, learner: Learner = None, clear_assessment: bool = True):\n \"\"\"\n render the dynamic competence map\n \"\"\"\n if clear_assessment and self.assessment:\n try:\n self.assessment_row.clear()\n except Exception as ex:\n ui.notify(str(ex))\n self.assessment = None\n self.dcm = dcm\n self.assessment_button.enable()\n dcm_chart = DcmChart(dcm)\n svg = dcm_chart.generate_svg_markup(learner=learner, with_java_script=False)\n # Use the new get_java_script method to get the JavaScript\n self.svg_view.content = svg\n self.svg_view.update()\n\n async def home(self, _client: Client):\n \"\"\"Generates the home page with a selection of examples and\n svg display\n \"\"\"\n svg = SVG()\n java_script = svg.get_java_script()\n\n # Add the script using ui.add_head_html()\n ui.add_head_html(java_script)\n\n self.setup_menu()\n\n with ui.element(\"div\").classes(\"w-full\"):\n with ui.splitter() as splitter:\n with splitter.before:\n extensions = {\"json\": \".json\", \"yaml\": \".yaml\"}\n self.example_selector = FileSelector(\n path=self.root_path,\n extensions=extensions,\n handler=self.read_and_optionally_render,\n )\n with ui.grid(columns=1).classes(\"w-full\") as self.left_grid:\n with ui.row() as self.input_row:\n self.input_input = ui.input(\n value=self.input, on_change=self.input_changed\n ).props(\"size=100\")\n with ui.row() as self.button_row:\n self.tool_button(\n tooltip=\"reload\",\n icon=\"refresh\",\n handler=self.reload_file,\n )\n self.assessment_button = self.tool_button(\n tooltip=\"assessment\",\n icon=\"query_stats\",\n handler=self.new_assess,\n )\n self.assessment_button.disable()\n if self.is_local:\n self.tool_button(\n tooltip=\"open\",\n icon=\"file_open\",\n handler=self.open_file,\n )\n with splitter.after:\n self.svg_view = ui.html(\"\")\n await self.setup_footer()\n\n def assess_learner(self, dcm, learner):\n \"\"\"\n assess the given Dynamic Competence Map and learner\n\n Args:\n dcm(DynamicCompetenceMap): the competence map\n learner(Learner): the learner to get the self assessment for\n\n \"\"\"\n if self.assessment is not None:\n self.assessment.reset(dcm=dcm, learner=learner)\n else:\n with self.left_grid:\n with ui.row() as self.assessment_row:\n self.assessment = Assessment(self, dcm=dcm, learner=learner)\n self.assessment.update_achievement_view()\n\n def new_assess(self):\n \"\"\"\n run a new assessment for a new learner\n \"\"\"\n learner = Learner(learner_id=\"?\")\n self.assess_learner(self.dcm, learner)\n\n def assess(self, learner: Learner, tree_id: str = None):\n \"\"\"\n run an assessment for the given learner\n\n Args:\n learner(Learner): the learner to get the self assessment for\n tree_id(str): the identifier for the competence tree\n \"\"\"\n if tree_id is None:\n tree_ids = learner.get_competence_tree_ids()\n if len(tree_ids) != 1:\n raise Exception(\n f\"There must be exactly one competence tree referenced but there are: {tree_ids}\"\n )\n tree_id = tree_ids[0]\n if not tree_id in self.examples:\n raise Exception(f\"invalid competence tree_id {tree_id}\")\n dcm = self.examples[tree_id]\n # assess_learner will render ...\n # self.render_dcm(dcm,learner=learner)\n self.assess_learner(dcm, learner)\n\n def configure_run(self):\n \"\"\"\n configure the allowed urls\n \"\"\"\n self.allowed_urls = [\n # \"https://raw.githubusercontent.com/JuanIrache/DJI_SRT_Parser/master/samples/\",\n # \"https://raw.githubusercontent.com/JuanIrache/dji-srt-viewer/master/samples/\",\n # \"https://cycle.travel/gpx/\",\n # \"https://cycle.travel/map/journey/\",\n DynamicCompetenceMap.examples_path(),\n self.root_path,\n ]"
},
{
"identifier": "SVGConfig",
"path": "dcm/svg.py",
"snippet": "class SVGConfig:\n \"\"\"\n Configuration class for SVG generation.\n\n Attributes:\n width (int): Width of the SVG canvas in pixels.\n height (int): Height of the SVG canvas in pixels.\n legend_height (int): Height reserved for the legend in pixels.\n font (str): Font family for text elements.\n font_size (int): Font size in points for text elements.\n indent (str): Indentation string, default is two spaces.\n default_color (str): Default color code for SVG elements.\n \"\"\"\n\n width: int = 600\n height: int = 600\n legend_height: int = 150\n font: str = \"Arial\"\n font_size: int = 12\n indent: str = \" \"\n default_color: str = \"#C0C0C0\"\n\n @property\n def total_height(self) -> int:\n \"\"\"\n Calculate total height of the SVG canvas including the legend.\n\n Returns:\n int: Total height of the SVG canvas.\n \"\"\"\n return self.height + self.legend_height"
},
{
"identifier": "MarkupCheck",
"path": "tests/markup_check.py",
"snippet": "class MarkupCheck:\n \"\"\"\n This class provides methods to check the integrity of SVG markups based on given criteria\n from a Dynamic Competence Map (DCM).\n\n Attributes:\n test_case (unittest.TestCase): The instance of the test case using this class,\n which provides access to the assert methods.\n dcm (DynamicCompetenceMap): An instance of the DynamicCompetenceMap containing the competence tree.\n \"\"\"\n\n def __init__(self, test_case: unittest.TestCase, dcm: DynamicCompetenceMap):\n \"\"\"\n Initialize the MarkupCheck with a test case instance and a DynamicCompetenceMap.\n\n Args:\n test_case (unittest.TestCase): An instance of the test case.\n dcm (DynamicCompetenceMap): An instance of the DynamicCompetenceMap.\n \"\"\"\n self.test_case = test_case\n self.dcm = dcm\n\n def parse_svg(\n self, svg_content: Optional[str] = None, svg_file: Optional[str] = None\n ) -> ET.Element:\n \"\"\"\n Parse the SVG content from a string or file.\n\n Args:\n svg_content (str, optional): The SVG content as a text string.\n svg_file (str, optional): The file path of the SVG file to parse.\n\n Returns:\n ET.Element: The root element of the SVG content.\n\n Raises:\n ValueError: If neither svg_content nor svg_file is provided, or if both are provided.\n \"\"\"\n if svg_content and svg_file:\n raise ValueError(\n \"Please provide either SVG content or file path, not both.\"\n )\n\n if svg_content:\n root = ET.fromstring(svg_content)\n elif svg_file:\n tree = ET.parse(svg_file)\n root = tree.getroot()\n else:\n raise ValueError(\"No SVG content or file path provided.\")\n\n self.test_case.assertEqual(\n root.tag,\n \"{http://www.w3.org/2000/svg}svg\",\n \"The root element of the SVG is not 'svg'.\",\n )\n return root\n\n def check_svg_root(self, svg_file: str) -> ET.Element:\n \"\"\"\n Check that the SVG root element is correct.\n\n Args:\n svg_file (str): The file path of the SVG file to parse.\n\n Returns:\n ET.Element: The root element of the SVG file.\n \"\"\"\n tree = ET.parse(svg_file)\n root = tree.getroot()\n\n self.test_case.assertEqual(\n root.tag,\n \"{http://www.w3.org/2000/svg}svg\",\n \"The root element of the SVG is not 'svg'.\",\n )\n return root\n\n def check_svg_elements(self, root: ET.Element) -> None:\n \"\"\"\n Check SVG elements against the competence aspects in the DCM.\n\n Args:\n root (ET.Element): The root element of the SVG file.\n \"\"\"\n namespaces = {\n \"svg\": \"http://www.w3.org/2000/svg\",\n \"xlink\": \"http://www.w3.org/1999/xlink\",\n }\n\n for aspect in self.dcm.competence_tree.aspects:\n # ignore aspects without areas ..\n if len(aspect.areas) == 0:\n continue\n element = root.find(f\"svg:g[@id='{aspect.id}']\", namespaces=namespaces)\n self.test_case.assertIsNotNone(\n element, f\"Aspect with ID '{aspect.id}' not found in SVG.\"\n )\n\n link = element.find(\"svg:a\", namespaces=namespaces)\n if aspect.url:\n self.test_case.assertIsNotNone(\n link,\n f\"Link element for aspect with ID '{aspect.id}' not found in SVG.\",\n )\n self.test_case.assertEqual(\n link.get(\"{http://www.w3.org/1999/xlink}href\"),\n aspect.url,\n f\"URL for aspect with ID '{aspect.id}' is incorrect.\",\n )\n\n def check_svg_titles(self, root: ET.Element) -> None:\n \"\"\"\n Optionally check for the presence of titles within the SVG.\n\n Args:\n root (ET.Element): The root element of the SVG file.\n \"\"\"\n titles = root.findall(\".//{{http://www.w3.org/2000/svg}}title\")\n for title in titles:\n self.test_case.assertIn(\n title.text,\n [\n aspect.name\n for aspect in self.dcm.competence_tree.competence_aspects.values()\n ],\n \"A title element has an unexpected text.\",\n )\n\n def check_svg_config(\n self, root: ET.Element, expected_config: Optional[SVGConfig] = None\n ) -> None:\n \"\"\"\n Check if the SVG root has the correct width and height as specified in the SVGConfig.\n\n Args:\n root (ET.Element): The root element of the SVG content.\n expected_config (SVGConfig, optional): The expected SVG configuration.\n \"\"\"\n if expected_config:\n # Check if the 'width' and 'height' of the SVG match the expected configuration\n svg_width = root.get(\"width\")\n svg_height = root.get(\"height\")\n self.test_case.assertEqual(\n svg_width,\n str(expected_config.width),\n f\"SVG width is {svg_width} but expected {expected_config.width}\",\n )\n height = int(svg_height)\n self.test_case.assertEqual(\n height,\n expected_config.total_height,\n f\"SVG height is {svg_height} but expected {expected_config.total_height}\",\n )\n\n def check_markup(\n self,\n svg_content: Optional[str] = None,\n svg_file: Optional[str] = None,\n svg_config: Optional[SVGConfig] = None,\n ) -> None:\n \"\"\"\n Conduct all checks on the SVG content, including configuration checks.\n\n Args:\n svg_content (str, optional): The SVG content as a text string.\n svg_file (str, optional): The file path of the SVG file to parse.\n svg_config (SVGConfig, optional): The expected SVG configuration.\n \"\"\"\n root = self.parse_svg(svg_content=svg_content, svg_file=svg_file)\n self.check_svg_elements(root)\n self.check_svg_titles(root)\n self.check_svg_config(root, svg_config) # Include the config check"
}
] | from ngwidgets.webserver_test import WebserverTest
from dcm.dcm_cmd import CompetenceCmd
from dcm.dcm_core import CompetenceTree, DynamicCompetenceMap
from dcm.dcm_webserver import DynamicCompentenceMapWebServer
from dcm.svg import SVGConfig
from tests.markup_check import MarkupCheck | 7,860 | """
Created on 2023-11-08
@author: wf
"""
class TestAPI(WebserverTest):
"""
test the dcm RESTFul API
"""
def setUp(self, debug=False, profile=True):
server_class = DynamicCompentenceMapWebServer
cmd_class = CompetenceCmd
WebserverTest.setUp(
self,
debug=debug,
profile=profile,
server_class=server_class,
cmd_class=cmd_class,
)
self.example_definitions = {}
for markup in ["json", "yaml"]:
self.example_definitions[
markup
] = DynamicCompetenceMap.get_example_dcm_definitions(
markup=markup, required_keys=CompetenceTree.required_keys()
)
def test_svg_render(self):
"""
test the rendering
"""
path = "/svg"
| """
Created on 2023-11-08
@author: wf
"""
class TestAPI(WebserverTest):
"""
test the dcm RESTFul API
"""
def setUp(self, debug=False, profile=True):
server_class = DynamicCompentenceMapWebServer
cmd_class = CompetenceCmd
WebserverTest.setUp(
self,
debug=debug,
profile=profile,
server_class=server_class,
cmd_class=cmd_class,
)
self.example_definitions = {}
for markup in ["json", "yaml"]:
self.example_definitions[
markup
] = DynamicCompetenceMap.get_example_dcm_definitions(
markup=markup, required_keys=CompetenceTree.required_keys()
)
def test_svg_render(self):
"""
test the rendering
"""
path = "/svg" | svg_config = SVGConfig(width=666, height=666) | 4 | 2023-11-06 09:24:24+00:00 | 12k |
fortelex/hiveline | hiveline/routing/vc_router.py | [
{
"identifier": "JobHandler",
"path": "hiveline/jobs/jobs.py",
"snippet": "class JobHandler:\n def __init__(self, service_name: str, sim_id: str, data_source: JobsDataSource):\n self.service_name = service_name\n self.sim_id = sim_id\n self.data_source = data_source\n\n def create_jobs(self, job_ids: list[str]):\n self.data_source.create_jobs(self.sim_id, self.service_name, job_ids)\n\n def reset_jobs(self):\n self.data_source.reset_jobs(self.sim_id, self.service_name)\n\n def reset_timed_out_jobs(self):\n self.data_source.reset_jobs(self.sim_id, self.service_name, status=[JobStatus.STARTED],\n max_started_date=datetime.datetime.now() - datetime.timedelta(minutes=5))\n\n def reset_failed_jobs(self):\n self.data_source.reset_jobs(self.sim_id, self.service_name, status=[JobStatus.FAILED])\n\n def iterate_jobs(self, handler: Callable[[str], None], threads=4, debug_progress=True, max_consecutive_errors=5):\n if threads > 1:\n self._spawn_threads(handler, threads, debug_progress, max_consecutive_errors)\n return\n\n self._iterate_jobs(handler, debug_progress, max_consecutive_errors)\n\n def _spawn_threads(self, handler: Callable[[str], None], num_threads=4, debug_progress=True,\n max_consecutive_errors=5):\n threads = []\n for i in range(num_threads):\n t = threading.Thread(target=self._iterate_jobs,\n args=(handler, debug_progress and i == 0, max_consecutive_errors))\n t.start()\n threads.append(t)\n\n for t in threads:\n t.join()\n\n def _iterate_jobs(self, handler: Callable[[str], None], debug_progress=True, max_consecutive_errors=5):\n # get the total number of jobs\n total_jobs = 0\n if debug_progress:\n total_jobs = self.data_source.count_jobs(self.sim_id, self.service_name, status=JobStatus.PENDING)\n\n # by default, we will not stop the process if there is one error, but if there are multiple consecutive errors,\n # we will stop the process\n consecutive_error_number = 0\n\n last_print = 0\n\n while True:\n job_id = self.data_source.pop_job(self.sim_id, self.service_name)\n if job_id is None:\n break\n\n current_time = time.time()\n if debug_progress and current_time - last_print > 1:\n last_print = current_time\n pending_jobs = self.data_source.count_jobs(self.sim_id, self.service_name, status=JobStatus.PENDING)\n print(\"Progress: ~{:.2f}% {:}\".format(100 * (1 - pending_jobs / total_jobs), job_id))\n\n try:\n handler(job_id)\n\n consecutive_error_number = 0\n\n self.data_source.update_job(self.sim_id, self.service_name, job_id, JobStatus.FINISHED)\n except Exception as e:\n consecutive_error_number += 1\n print(f\"Error processing job {job_id}: {e}\")\n\n # set status to failed\n self.data_source.update_job(self.sim_id, self.service_name, job_id, JobStatus.FAILED, str(e))\n\n if consecutive_error_number > max_consecutive_errors:\n raise e\n\n def count_jobs(self, status):\n return self.data_source.count_jobs(self.sim_id, self.service_name, status=status)"
},
{
"identifier": "JobStatus",
"path": "hiveline/jobs/jobs.py",
"snippet": "class JobStatus(Enum):\n PENDING = \"pending\"\n STARTED = \"started\"\n FINISHED = \"finished\"\n FAILED = \"failed\"\n\n def __str__(self):\n return self.value\n\n def __repr__(self):\n return self.value\n\n def to_str(self):\n return self.value\n\n @staticmethod\n def from_str(s: str):\n if s == \"pending\":\n return JobStatus.PENDING\n elif s == \"started\":\n return JobStatus.STARTED\n elif s == \"finished\":\n return JobStatus.FINISHED\n elif s == \"failed\":\n return JobStatus.FAILED\n else:\n raise ValueError(f\"Invalid job status: {s}\")"
},
{
"identifier": "MongoJobsDataSource",
"path": "hiveline/jobs/mongo.py",
"snippet": "class MongoJobsDataSource(JobsDataSource):\n def __init__(self, db=None):\n self.db = db\n if self.db is None:\n self.db = get_database()\n self.coll = self.db[\"jobs\"]\n\n def create_jobs(self, sim_id: str, service_name: str, job_ids: list[str]):\n for job_id in job_ids:\n try:\n self.coll.insert_one(MongoJob(\n service_name=service_name,\n sim_id=sim_id,\n job_id=job_id,\n status=\"pending\",\n created=datetime.datetime.now()\n ).to_dict())\n except pymongo.errors.DuplicateKeyError:\n pass\n\n def reset_jobs(self, sim_id: str, service_name: str, status: list[JobStatus] = None, max_started_date=None):\n jobs_filter = {\n \"service-name\": service_name,\n \"sim-id\": sim_id\n }\n\n if status is not None:\n jobs_filter[\"status\"] = {\n \"$in\": [str(s) for s in status]\n }\n\n if max_started_date is not None:\n jobs_filter[\"started\"] = {\n \"$lte\": max_started_date\n }\n\n self.coll.update_many(jobs_filter, {\n \"$set\": {\n \"status\": \"pending\"\n },\n \"$unset\": {\n \"error\": \"\",\n \"started\": \"\",\n \"finished\": \"\"\n }\n })\n\n def pop_job(self, sim_id: str, service_name: str) -> str | None:\n job = self.coll.find_one_and_update({\n \"service-name\": service_name,\n \"sim-id\": sim_id,\n \"status\": \"pending\"\n }, {\n \"$set\": {\n \"status\": \"started\",\n \"started\": datetime.datetime.now()\n }\n })\n return job[\"job-id\"] if job is not None else None\n\n def update_job(self, sim_id: str, service_name: str, job_id: str, status: JobStatus, error: str | None = None):\n update = {\n \"$set\": {\n \"status\": str(status),\n \"finished\": datetime.datetime.now()\n }\n }\n\n if error is not None:\n update[\"$set\"][\"error\"] = error\n\n if status == JobStatus.STARTED:\n update[\"$set\"][\"started\"] = datetime.datetime.now()\n\n if status == JobStatus.FINISHED or status == JobStatus.FAILED:\n update[\"$set\"][\"finished\"] = datetime.datetime.now()\n\n self.coll.update_one({\n \"service-name\": service_name,\n \"sim-id\": sim_id,\n \"job-id\": job_id\n }, update)\n\n def count_jobs(self, sim_id: str, service_name: str, status: JobStatus = None) -> int:\n jobs_filter = {\n \"service-name\": service_name,\n \"sim-id\": sim_id\n }\n\n if status is not None:\n jobs_filter[\"status\"] = str(status)\n\n return self.coll.count_documents(jobs_filter)\n\n def delete_jobs(self, sim_id: str, service_name: str):\n self.coll.delete_many({\n \"service-name\": service_name,\n \"sim-id\": sim_id\n })"
},
{
"identifier": "fptf",
"path": "hiveline/models/fptf.py",
"snippet": "def _remove_empty_keys(d):\ndef read_datetime(time_str):\ndef format_datetime(dt):\n def __init__(self, name=None, address=None, longitude=None, latitude=None, altitude=None):\n def to_dict(self):\n def to_json(self):\n def from_dict(json_str):\ndef location_from_json(data: dict | str | None):\n def __init__(self, id: str, name: str, location: Location = None, regions: list = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef station_from_json(data: dict | str | None):\n def __init__(self, id: str, station: Station, name: str, location: Location = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef stop_from_json(data: dict | str | None):\ndef place_from_json(data: dict | str | None):\n def __init__(self, id: str, name: str, stations: list[Station] = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef region_from_json(data: dict | str | None):\n def __init__(self, mode: str):\n def __str__(self):\n def __repr__(self):\n def to_string(self):\n def to_json(self):\n def from_string(mode):\n def __init__(self, id: str, name: str):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef operator_from_json(data: dict | str | None):\n def __init__(self, id: str, name: str, mode: Mode, routes: list, operator: Operator = None, sub_mode: str = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef line_from_json(data: dict | str | None):\n def __init__(self, id: str, line: Line, mode: Mode, stops: list[Station | Stop | Location], sub_mode: str = None):\n def to_dict(self):\n def to_json(self):\ndef route_from_json(data: dict | str | None):\n def __init__(self, arrival: int = None, departure: int = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\n def __init__(self, id: str, route: Route, mode: Mode, sequence: list[ScheduleSequenceElement], starts,\n sub_mode=None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef schedule_from_json(data: dict | str | None):\n def __init__(self, stop: Stop | Station | Location, arrival: datetime.datetime = None, arrival_delay: int = None,\n arrival_platform: str = None,\n departure: datetime.datetime = None, departure_delay: int = None, departure_platform: str = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef stopover_from_json(data: dict | str | None):\ndef get_location(place: Location | Station | Stop | Stopover) -> Location | None:\n def __init__(self, amount: float, currency: str):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef price_from_json(data: dict | str | None):\n def __init__(self, origin: Stop | Station | Location, destination: Stop | Station | Location,\n departure: datetime.datetime, arrival: datetime.datetime, mode: Mode, sub_mode: str = None,\n departure_delay: int = None,\n departure_platform: str = None,\n arrival_delay: int = None, arrival_platform: str = None, line: Line = None, direction: str = None,\n stopovers: list[Stopover] = None, schedule: Schedule = None, public: bool = True,\n operator: Operator = None,\n price: Price = None, polyline: str = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\n def get_departure(self, realtime=True):\n def get_arrival(self, realtime=True):\n def duration(self, realtime=True):\ndef leg_from_json(data: dict | str | None):\n def __init__(self, id: str, legs: list[Leg], price: Price = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\n def get_departure(self, realtime=True):\n def get_arrival(self, realtime=True):\n def duration(self, realtime=True):\n def get_trace(self) -> list[tuple[tuple[float, float], datetime.datetime, Mode, bool]]:\ndef journey_from_json(data: dict | str | None):\ndef from_json(data: dict | str | None):\nclass Location:\nclass Station:\nclass Stop:\nclass Region:\nclass Mode(Enum):\nclass Operator:\nclass Line:\nclass Route:\nclass ScheduleSequenceElement:\nclass Schedule:\nclass Stopover:\nclass Price:\nclass Leg:\nclass Journey:\n TRAIN = 'train'\n BUS = 'bus'\n WATERCRAFT = 'watercraft'\n TAXI = 'taxi'\n GONDOLA = 'gondola'\n AIRCRAFT = 'aircraft'\n CAR = 'car'\n BICYCLE = 'bicycle'\n WALKING = 'walking'\n UNKNOWN = ''"
},
{
"identifier": "Option",
"path": "hiveline/models/options.py",
"snippet": "class Option:\n def __init__(self, id: str, origin: fptf.Location, destination: fptf.Location, departure: datetime.datetime,\n modes: list[fptf.Mode], journey: fptf.Journey,\n trace: list[tuple[tuple[float, float], datetime.datetime, fptf.Mode, bool]] | None = None):\n self.id = id\n self.origin = origin\n self.destination = destination\n self.departure = departure\n self.modes = modes\n self.journey = journey\n self.trace = trace\n\n def to_dict(self):\n return {\n \"route-option-id\": self.id,\n \"origin\": [self.origin.longitude, self.origin.latitude],\n \"destination\": [self.destination.longitude, self.destination.latitude],\n \"departure\": fptf.format_datetime(self.departure),\n \"modes\": [m.to_string() for m in self.modes],\n \"journey\": self.journey.to_dict()\n }\n\n @staticmethod\n def from_dict(result):\n id = result[\"route-option-id\"]\n origin = fptf.Location(longitude=result[\"origin\"][0], latitude=result[\"origin\"][1])\n destination = fptf.Location(longitude=result[\"destination\"][0], latitude=result[\"destination\"][1])\n departure = fptf.read_datetime(result[\"departure\"])\n modes = [fptf.Mode.from_string(m) for m in result[\"modes\"]]\n journey = fptf.journey_from_json(result[\"journey\"])\n trace = None\n return Option(id, origin, destination, departure, modes, journey, trace)\n\n def has_car(self):\n \"\"\"\n Check if a route option has a car leg\n :return: True if the route option has a car leg, False otherwise\n \"\"\"\n\n for leg in self.journey.legs:\n mode = leg.mode\n\n if mode == fptf.Mode.CAR:\n return True\n\n return False\n\n def get_trace(self) -> list[tuple[tuple[float, float], datetime.datetime, fptf.Mode, bool]]:\n if self.trace is None:\n self.trace = self.journey.get_trace()\n return self.trace"
},
{
"identifier": "get_database",
"path": "hiveline/mongo/db.py",
"snippet": "def get_database():\n dotenv.load_dotenv()\n\n user = os.getenv(\"UP_MONGO_USER\")\n password = os.getenv(\"UP_MONGO_PASSWORD\")\n domain = os.getenv(\"UP_MONGO_DOMAIN\")\n database = os.getenv(\"UP_MONGO_DATABASE\")\n\n connection_string = \"mongodb://%s:%s@%s/%s?authSource=admin\" % (user, password, domain, database)\n\n client = MongoClient(connection_string)\n\n return client[database]"
},
{
"identifier": "resource_builder",
"path": "hiveline/routing/resource_builder.py",
"snippet": "def build_resources(data_dir: str, place, sim_date: datetime.date) -> RoutingServerConfig:\ndef __get_closest_link(link_list, target_date: datetime.date, ignore_future: bool = False):\ndef __ensure_data_downloaded(data_dir: str, link_object, file_name_extension: str):\ndef __ensure_closest_pbf_downloaded(data_dir, place, sim_date):\ndef __ensure_closest_gtfs_downloaded(data_dir, place, sim_date):"
},
{
"identifier": "DelayedRoutingClient",
"path": "hiveline/routing/clients/delayed.py",
"snippet": "class DelayedRoutingClient(RoutingClient):\n def __init__(self, base: RoutingClient):\n # This dictionary stores the delay data for each operator\n self.delay_data = _read_delay_statistics()\n self.base = base\n\n def __get_random_delay(self, operator_name):\n \"\"\"\n This function returns a random delay for the specified operator. The delay is either cancelled or a random value\n between the specified interval.\n\n :param operator_name: the name of the operator\n :return: a dictionary with the keys \"cancelled\" and \"delay\"\n \"\"\"\n\n operator_name = operator_name.lower()\n if operator_name not in self.delay_data:\n operator_name = \"average\"\n\n cancelled_percent = self.delay_data[operator_name][\"cancelled_percent\"]\n\n if np.random.random() * 100 < cancelled_percent:\n return {\n \"cancelled\": True\n }\n\n starts = self.delay_data[operator_name][\"starts\"]\n identity = self.delay_data[operator_name][\"identity\"]\n weights = self.delay_data[operator_name][\"weights\"]\n\n key = np.random.choice(identity, p=weights)\n interval_start = starts[key]\n interval_end = interval_start + 5\n if key < len(starts) - 1:\n interval_end = starts[key + 1]\n\n val = np.random.randint(interval_start, interval_end)\n\n return {\n \"cancelled\": False,\n \"delay\": val\n }\n\n time_dependent_modes = [fptf.Mode.TRAIN, fptf.Mode.BUS, fptf.Mode.WATERCRAFT, fptf.Mode.AIRCRAFT, fptf.Mode.GONDOLA]\n\n def get_journeys(self, from_lat, from_lon, to_lat, to_lon, departure, modes):\n \"\"\"\n This function returns a delayed itinerary for the specified parameters. It uses the fastest itinerary from OTP\n and adds a random delay to each leg of the itinerary. If a leg is cancelled or the traveller cannot catch the\n next connection, OTP may be queried multiple times.\n\n :param from_lat: latitude of the start location\n :param from_lon: longitude of the start location\n :param to_lat: latitude of the destination\n :param to_lon: longitude of the destination\n :param departure: departure time as datetime object\n :param modes: list of modes to use for the trip (e.g. [\"WALK\", \"TRANSIT\"])\n :return: a delayed journey\n \"\"\"\n raise Exception(\"delays not currently supported. please use the --no-delays flag or set use_delays=False\")\n\n journey = _get_fastest_journey(self.base.get_journeys(from_lat, from_lon, to_lat, to_lon, departure, modes))\n\n if journey is None:\n return None\n\n result_legs = []\n\n max_calls = 20\n re_calc_count = 0\n\n for call in range(max_calls):\n steps = 0\n\n current_delay = 0 # in minutes\n\n # iterate legs\n leg_count = len(journey.legs)\n while steps < leg_count:\n time_independent_start = steps\n\n while steps < leg_count:\n leg = journey.legs[steps]\n leg.departure_delay = current_delay * 60\n leg.arrival_delay = current_delay * 60\n if leg.mode in self.time_dependent_modes:\n break\n steps += 1\n\n if steps >= leg_count:\n # we can catch the last connection\n break\n\n # point in time when the traveller arrives at the station\n real_min_departure = journey.legs[0].departure\n if steps > 0:\n real_min_departure = journey.legs[steps - 1].arrival\n\n # legs[steps] is a time dependent leg\n leg = journey.legs[steps]\n\n # get the operator name\n operator_name = leg.operator.name\n\n # get the delay\n delay = self.__get_random_delay(operator_name)\n\n # check if the connection is cancelled\n if delay[\"cancelled\"]:\n # trip is cancelled, reset the steps to the start of the time independent legs\n steps = time_independent_start\n break\n\n delay_seconds = int(delay[\"delay\"]) * 60\n real_departure = leg.departure + datetime.timedelta(seconds=delay_seconds)\n\n if real_departure < real_min_departure:\n # we cannot catch the connection, reset the steps to the start of the time independent legs\n steps = time_independent_start\n break\n\n current_delay = delay[\"delay\"]\n leg.departure_delay = delay_seconds\n leg.arrival_delay = delay_seconds\n steps += 1\n\n if steps >= leg_count:\n # we can catch the last connection\n result_legs += journey.legs\n break\n\n # we cannot catch the last connection\n result_legs += journey.legs[:steps]\n\n # route from the last station to the destination\n last_leg = journey.legs[0]\n position = last_leg.origin\n new_dep = last_leg.departure\n if steps > 0:\n last_leg = journey.legs[steps - 1]\n position = last_leg.destination\n new_dep = last_leg.arrival + datetime.timedelta(seconds=last_leg.arrival_delay)\n\n pos_lon = position.longitude\n pos_lat = position.latitude\n\n journey = _get_fastest_journey(self.base.get_journeys(pos_lat, pos_lon, to_lat, to_lon, new_dep, modes))\n re_calc_count += 1\n\n if journey is None:\n return None\n\n return fptf.Journey(\n id=None,\n legs=result_legs\n )"
},
{
"identifier": "RoutingClient",
"path": "hiveline/routing/clients/routing_client.py",
"snippet": "class RoutingClient(ABC):\n @abstractmethod\n def get_journeys(self, from_lat: float, from_lon: float, to_lat: float, to_lon: float, departure: datetime.datetime,\n modes: list[fptf.Mode]) -> list[fptf.Journey] | None:\n \"\"\"\n Get routes from the router\n :param from_lat: the latitude of the starting point\n :param from_lon: the longitude of the starting point\n :param to_lat: the latitude of the destination\n :param to_lon: the longitude of the destination\n :param departure: the departure time as datetime object\n :param modes: the fptf modes to use for routing\n :return: a list of fptf journey\n \"\"\"\n pass"
},
{
"identifier": "RoutingServer",
"path": "hiveline/routing/servers/routing_server.py",
"snippet": "class RoutingServer(ABC):\n @abstractmethod\n def build(self, config: RoutingServerConfig, force_rebuild=False) -> list[str]:\n \"\"\"\n Build the graph for the routing server. This function returns a list of files that are required for routing.\n :param config: the configuration for the routing server\n :param force_rebuild: if True, the graph will be rebuilt even if it already exists in the cache\n :return: a list of files that are required for routing\n \"\"\"\n pass\n\n @abstractmethod\n def start(self, config: RoutingServerConfig, built_files: list[str]):\n \"\"\"\n Start the routing server. It should return when the server is ready to accept requests.\n :param built_files: the files that were built for the routing server\n :param config: the configuration for the routing server\n \"\"\"\n pass\n\n @abstractmethod\n def stop(self):\n \"\"\"\n Stop the routing server.\n \"\"\"\n pass\n\n @abstractmethod\n def get_meta(self):\n \"\"\"\n Get the metadata of the routing server. Includes the version, name, etc.\n \"\"\"\n pass"
},
{
"identifier": "vc_extract",
"path": "hiveline/vc/vc_extract.py",
"snippet": "def extract_origin_loc(vc):\ndef extract_destination_loc(vc):\ndef extract_departure(vc, sim) -> datetime:\ndef has_motor_vehicle(vc):\ndef has_motorcycle(vc):\ndef extract_traveller(vc):\ndef would_use_motorized_vehicle(vc):\ndef __validate_location(loc):\ndef should_route(vc):"
}
] | import sys
import os
import argparse
import datetime
import time
import uuid
import pymongo.errors
from dotenv import load_dotenv
from hiveline.jobs.jobs import JobHandler, JobStatus
from hiveline.jobs.mongo import MongoJobsDataSource
from hiveline.models import fptf
from hiveline.models.options import Option
from hiveline.mongo.db import get_database
from hiveline.routing import resource_builder
from hiveline.routing.clients.delayed import DelayedRoutingClient
from hiveline.routing.clients.routing_client import RoutingClient
from hiveline.routing.servers.routing_server import RoutingServer
from hiveline.vc import vc_extract
from hiveline.routing.servers.otp import OpenTripPlannerRoutingServer
from hiveline.routing.clients.otp import OpenTripPlannerRoutingClient
from hiveline.routing.servers.bifrost import BifrostRoutingServer
from hiveline.routing.clients.bifrost import BifrostRoutingClient | 7,378 | :param client: The routing client
:param vc: The virtual commuter
:param sim: The simulation
:param modes: The modes to use
:return:
"""
origin = vc_extract.extract_origin_loc(vc)
destination = vc_extract.extract_destination_loc(vc)
departure = vc_extract.extract_departure(vc, sim)
journeys = client.get_journeys(origin[1], origin[0], destination[1], destination[0], departure, modes)
if journeys is None:
return None
origin_fptf = fptf.Location(longitude=origin[0], latitude=origin[1])
destination_fptf = fptf.Location(longitude=destination[0], latitude=destination[1])
return [Option(str(uuid.uuid4()), origin_fptf, destination_fptf, departure, modes, journey) for journey in journeys]
def __route_virtual_commuter(client: RoutingClient, vc: dict, sim: dict) -> list[Option]:
"""
Route a virtual commuter. It will calculate available mode combinations and then calculate routes for each of them.
:param client: The routing client
:param vc: The virtual commuter
:param sim: The simulation
:return:
"""
mode_combinations = [[fptf.Mode.WALKING, fptf.Mode.BUS, fptf.Mode.TRAIN, fptf.Mode.GONDOLA]]
# if vc_extract.has_motor_vehicle(vc):
mode_combinations += [[fptf.Mode.WALKING, fptf.Mode.CAR]]
option_lists = [__get_route_results(client, vc, sim, modes) for modes in mode_combinations]
options = []
for option_list in option_lists:
if option_list is None:
continue
options += option_list
options = [option for option in options if option is not None]
return options
def __process_virtual_commuter(client, route_results_coll, vc_coll, vc_id, sim, meta):
vc = vc_coll.find_one({"vc-id": vc_id, "sim-id": sim["sim-id"]})
should_route = vc_extract.should_route(vc)
if not should_route:
return
options = __route_virtual_commuter(client, vc, sim)
if options is None or len(options) == 0:
print("No route found for virtual commuter " + vc["vc-id"])
raise Exception("No route found")
# dump options to route-results collection
route_results = {
"vc-id": vc["vc-id"],
"sim-id": vc["sim-id"],
"created": datetime.datetime.now(),
"options": [option.to_dict() for option in options],
"traveller": vc_extract.extract_traveller(vc),
"meta": meta
}
try:
route_results_coll.insert_one(route_results)
except pymongo.errors.DuplicateKeyError:
if "_id" in route_results:
del route_results["_id"]
route_results_coll.update_one({"vc-id": vc["vc-id"], "sim-id": vc["sim-id"]}, {"$set": route_results})
def __route_virtual_commuters(server: RoutingServer, client: RoutingClient, sim_id, data_dir="./cache", use_delays=True,
force_graph_rebuild=False, num_threads=4, reset_jobs=False, reset_failed=False, db=None):
"""
Run the routing algorithm for a virtual commuter set. It will spawn a new OTP process and run the routing algorithm
for all open jobs in the database. It will also update the database with the results of the routing algorithm.
:param server: The routing server
:param client: The routing client
:param sim_id: The virtual commuter set id
:param data_dir: The directory where the data should be stored
:param use_delays: Whether to use delays or not
:param force_graph_rebuild: Whether to force a rebuild of the graph or not
:param num_threads: The number of threads to use for sending route requests to the server
:param reset_jobs: Whether to reset all jobs to pending or not
:param reset_failed: Whether to reset all failed jobs to pending or not
:param db: The database
:return:
"""
if db is None:
db = get_database()
job_handler = JobHandler("routing", sim_id, MongoJobsDataSource(db=db))
if reset_jobs:
job_handler.reset_jobs()
if reset_failed and not reset_jobs:
job_handler.reset_failed_jobs()
__create_route_calculation_jobs(db, sim_id, job_handler)
job_handler.reset_timed_out_jobs()
if job_handler.count_jobs(status=JobStatus.PENDING) == 0:
print("No active jobs, stopping")
return
sim = db["simulations"].find_one({"sim-id": sim_id})
place_resources = db["place-resources"].find_one({"place-id": sim["place-id"]})
sim_date = datetime.datetime.strptime(sim["sim-date"], "%Y-%m-%d").date()
print("Building resources")
| if __name__ == "__main__":
load_dotenv()
sys.path.append(os.getenv("PROJECT_PATH"))
def __create_route_calculation_jobs(db, sim_id, job_handler):
"""
Create route calculation jobs for all virtual commuters of a given simulation that do not have a job yet.
:param db: the database
:param sim_id: the simulation id
:return:
"""
pipeline = [
{
"$match": {
"sim-id": sim_id
}
},
{
"$project": {
"vc-id": "$vc-id",
}
}
]
coll = db["virtual-commuters"]
result = coll.aggregate(pipeline)
job_ids = [vc["vc-id"] for vc in result]
job_handler.create_jobs(job_ids)
def __get_route_results(client: RoutingClient, vc: dict, sim: dict, modes: list[fptf.Mode]) -> list[Option] | None:
"""
Get a route for a virtual commuter.
:param client: The routing client
:param vc: The virtual commuter
:param sim: The simulation
:param modes: The modes to use
:return:
"""
origin = vc_extract.extract_origin_loc(vc)
destination = vc_extract.extract_destination_loc(vc)
departure = vc_extract.extract_departure(vc, sim)
journeys = client.get_journeys(origin[1], origin[0], destination[1], destination[0], departure, modes)
if journeys is None:
return None
origin_fptf = fptf.Location(longitude=origin[0], latitude=origin[1])
destination_fptf = fptf.Location(longitude=destination[0], latitude=destination[1])
return [Option(str(uuid.uuid4()), origin_fptf, destination_fptf, departure, modes, journey) for journey in journeys]
def __route_virtual_commuter(client: RoutingClient, vc: dict, sim: dict) -> list[Option]:
"""
Route a virtual commuter. It will calculate available mode combinations and then calculate routes for each of them.
:param client: The routing client
:param vc: The virtual commuter
:param sim: The simulation
:return:
"""
mode_combinations = [[fptf.Mode.WALKING, fptf.Mode.BUS, fptf.Mode.TRAIN, fptf.Mode.GONDOLA]]
# if vc_extract.has_motor_vehicle(vc):
mode_combinations += [[fptf.Mode.WALKING, fptf.Mode.CAR]]
option_lists = [__get_route_results(client, vc, sim, modes) for modes in mode_combinations]
options = []
for option_list in option_lists:
if option_list is None:
continue
options += option_list
options = [option for option in options if option is not None]
return options
def __process_virtual_commuter(client, route_results_coll, vc_coll, vc_id, sim, meta):
vc = vc_coll.find_one({"vc-id": vc_id, "sim-id": sim["sim-id"]})
should_route = vc_extract.should_route(vc)
if not should_route:
return
options = __route_virtual_commuter(client, vc, sim)
if options is None or len(options) == 0:
print("No route found for virtual commuter " + vc["vc-id"])
raise Exception("No route found")
# dump options to route-results collection
route_results = {
"vc-id": vc["vc-id"],
"sim-id": vc["sim-id"],
"created": datetime.datetime.now(),
"options": [option.to_dict() for option in options],
"traveller": vc_extract.extract_traveller(vc),
"meta": meta
}
try:
route_results_coll.insert_one(route_results)
except pymongo.errors.DuplicateKeyError:
if "_id" in route_results:
del route_results["_id"]
route_results_coll.update_one({"vc-id": vc["vc-id"], "sim-id": vc["sim-id"]}, {"$set": route_results})
def __route_virtual_commuters(server: RoutingServer, client: RoutingClient, sim_id, data_dir="./cache", use_delays=True,
force_graph_rebuild=False, num_threads=4, reset_jobs=False, reset_failed=False, db=None):
"""
Run the routing algorithm for a virtual commuter set. It will spawn a new OTP process and run the routing algorithm
for all open jobs in the database. It will also update the database with the results of the routing algorithm.
:param server: The routing server
:param client: The routing client
:param sim_id: The virtual commuter set id
:param data_dir: The directory where the data should be stored
:param use_delays: Whether to use delays or not
:param force_graph_rebuild: Whether to force a rebuild of the graph or not
:param num_threads: The number of threads to use for sending route requests to the server
:param reset_jobs: Whether to reset all jobs to pending or not
:param reset_failed: Whether to reset all failed jobs to pending or not
:param db: The database
:return:
"""
if db is None:
db = get_database()
job_handler = JobHandler("routing", sim_id, MongoJobsDataSource(db=db))
if reset_jobs:
job_handler.reset_jobs()
if reset_failed and not reset_jobs:
job_handler.reset_failed_jobs()
__create_route_calculation_jobs(db, sim_id, job_handler)
job_handler.reset_timed_out_jobs()
if job_handler.count_jobs(status=JobStatus.PENDING) == 0:
print("No active jobs, stopping")
return
sim = db["simulations"].find_one({"sim-id": sim_id})
place_resources = db["place-resources"].find_one({"place-id": sim["place-id"]})
sim_date = datetime.datetime.strptime(sim["sim-date"], "%Y-%m-%d").date()
print("Building resources") | config = resource_builder.build_resources(data_dir, place_resources, sim_date) | 6 | 2023-11-07 15:34:04+00:00 | 12k |
uhppoted/uhppoted-app-home-assistant | custom_components/uhppoted/config_flow.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/uhppoted/const.py",
"snippet": "DOMAIN = 'uhppoted'"
},
{
"identifier": "CONF_BIND_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_BIND_ADDR = 'bind_address'"
},
{
"identifier": "CONF_BROADCAST_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_BROADCAST_ADDR = 'broadcast_address'"
},
{
"identifier": "CONF_LISTEN_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_LISTEN_ADDR = 'listen_address'"
},
{
"identifier": "CONF_DEBUG",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_DEBUG = 'debug'"
},
{
"identifier": "CONF_CONTROLLERS",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CONTROLLERS = 'controllers'"
},
{
"identifier": "CONF_CONTROLLER_ID",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CONTROLLER_ID = 'controller_id'"
},
{
"identifier": "CONF_CONTROLLER_SERIAL_NUMBER",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CONTROLLER_SERIAL_NUMBER = 'controller_serial_number'"
},
{
"identifier": "CONF_CONTROLLER_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CONTROLLER_ADDR = 'controller_address'"
},
{
"identifier": "CONF_CONTROLLER_TIMEZONE",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CONTROLLER_TIMEZONE = 'controller_timezone'"
},
{
"identifier": "CONF_DOORS",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_DOORS = 'doors'"
},
{
"identifier": "CONF_DOOR_ID",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_DOOR_ID = 'door_id'"
},
{
"identifier": "CONF_DOOR_CONTROLLER",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_DOOR_CONTROLLER = 'door_controller'"
},
{
"identifier": "CONF_DOOR_NUMBER",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_DOOR_NUMBER = 'door_number'"
},
{
"identifier": "CONF_CARDS",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARDS = 'cards'"
},
{
"identifier": "CONF_CARD_UNIQUE_ID",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARD_UNIQUE_ID = 'card_unique_id'"
},
{
"identifier": "CONF_CARD_NUMBER",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARD_NUMBER = 'card_number'"
},
{
"identifier": "CONF_CARD_NAME",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARD_NAME = 'card_name'"
},
{
"identifier": "CONF_CARD_STARTDATE",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARD_STARTDATE = 'card_startdate'"
},
{
"identifier": "CONF_CARD_ENDDATE",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARD_ENDDATE = 'card_enddate'"
},
{
"identifier": "CONF_CARD_DOORS",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARD_DOORS = 'card_doors'"
},
{
"identifier": "DEFAULT_CONTROLLER_ID",
"path": "custom_components/uhppoted/const.py",
"snippet": "DEFAULT_CONTROLLER_ID = 'Alpha'"
},
{
"identifier": "DEFAULT_CONTROLLER_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "DEFAULT_CONTROLLER_ADDR = '192.168.1.100'"
},
{
"identifier": "DEFAULT_CONTROLLER_TIMEZONE",
"path": "custom_components/uhppoted/const.py",
"snippet": "DEFAULT_CONTROLLER_TIMEZONE = 'LOCAL'"
},
{
"identifier": "DEFAULT_DOOR1",
"path": "custom_components/uhppoted/const.py",
"snippet": "DEFAULT_DOOR1 = 'Gryffindor'"
},
{
"identifier": "DEFAULT_DOOR2",
"path": "custom_components/uhppoted/const.py",
"snippet": "DEFAULT_DOOR2 = 'Ravenclaw'"
},
{
"identifier": "DEFAULT_DOOR3",
"path": "custom_components/uhppoted/const.py",
"snippet": "DEFAULT_DOOR3 = 'Hufflepuff'"
},
{
"identifier": "DEFAULT_DOOR4",
"path": "custom_components/uhppoted/const.py",
"snippet": "DEFAULT_DOOR4 = 'Slytherin'"
},
{
"identifier": "UhppotedOptionsFlow",
"path": "custom_components/uhppoted/options_flow.py",
"snippet": "class UhppotedOptionsFlow(OptionsFlow):\n\n def __init__(self, entry: ConfigEntry) -> None:\n self.config_entry = entry\n self.data = dict(entry.data)\n # self.options = dict(entry.options)\n self.options = copy.deepcopy(dict(entry.options))\n self.controllers = []\n self.doors = []\n self.configuration = {'doors': []}\n\n async def async_step_init(self, user_input: dict[str, Any] | None = None) -> FlowResult:\n return await self.async_step_IPv4()\n\n async def async_step_IPv4(self, user_input: Optional[Dict[str, Any]] = None):\n errors: Dict[str, str] = {}\n\n if user_input is not None:\n if not errors:\n self.options.update(user_input)\n return await self.async_step_controllers()\n\n bind = self.options[CONF_BIND_ADDR]\n broadcast = self.options[CONF_BROADCAST_ADDR]\n listen = self.options[CONF_LISTEN_ADDR]\n debug = self.options[CONF_DEBUG]\n\n schema = vol.Schema({\n vol.Optional(CONF_BIND_ADDR, default=bind): str,\n vol.Optional(CONF_BROADCAST_ADDR, default=broadcast): str,\n vol.Optional(CONF_LISTEN_ADDR, default=listen): str,\n vol.Optional(CONF_DEBUG, default=debug): bool,\n })\n\n return self.async_show_form(step_id=\"IPv4\", data_schema=schema, errors=errors)\n\n async def async_step_controllers(self, user_input: Optional[Dict[str, Any]] = None):\n\n def g(v):\n if self.options and CONF_CONTROLLERS in self.options:\n for c in self.options[CONF_CONTROLLERS]:\n if f'{c[CONF_CONTROLLER_SERIAL_NUMBER]}' == f'{v}':\n if c[CONF_CONTROLLER_ID] != '':\n return {\n 'label': f'{v} ({c[CONF_CONTROLLER_ID]})',\n 'value': f'{v}',\n }\n break\n return {\n 'label': f'{v}',\n 'value': f'{v}',\n }\n\n errors: Dict[str, str] = {}\n\n if user_input is not None:\n if not errors:\n for v in user_input[CONF_CONTROLLERS]:\n self.controllers.append({\n 'controller': {\n 'name': '',\n 'serial_no': v,\n 'configured': False,\n },\n 'doors': None,\n })\n\n return await self.async_step_controller()\n\n controllers = get_all_controllers(self.options)\n if len(controllers) < 1:\n return await self.async_step_door()\n\n configured = set()\n if self.options and CONF_CONTROLLERS in self.options:\n for v in self.options[CONF_CONTROLLERS]:\n configured.add(int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}'))\n\n configured = sorted(list(configured), reverse=True)\n\n try:\n validate_all_controllers(self.options)\n except ValueError as err:\n errors['base'] = f'{err}'\n\n schema = vol.Schema({\n vol.Required(CONF_CONTROLLERS, default=[f'{v}' for v in configured]):\n SelectSelector(\n SelectSelectorConfig(options=[g(v) for v in controllers],\n multiple=True,\n custom_value=False,\n mode=SelectSelectorMode.LIST)),\n })\n\n return self.async_show_form(step_id=\"controllers\", data_schema=schema, errors=errors)\n\n async def async_step_controller(self, user_input: Optional[Dict[str, Any]] = None):\n it = next((v for v in self.controllers if not v['controller']['configured']), None)\n if it == None:\n try:\n validate_all_controllers(self.options)\n return await self.async_step_doors()\n except ValueError as err:\n return await self.async_step_controllers()\n else:\n controller = it['controller']\n serial_no = controller['serial_no']\n\n errors: Dict[str, str] = {}\n\n if user_input is not None:\n name = user_input[CONF_CONTROLLER_ID]\n address = user_input[CONF_CONTROLLER_ADDR]\n timezone = user_input[CONF_CONTROLLER_TIMEZONE]\n\n try:\n validate_controller_id(serial_no, name, None)\n except ValueError as err:\n errors[CONF_CONTROLLER_ID] = f'{err}'\n\n if not errors:\n controllers = self.options[CONF_CONTROLLERS]\n\n for v in self.options[CONF_CONTROLLERS]:\n if int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}') == int(f'{serial_no}'):\n if user_input[CONF_CONTROLLER_ID].strip() == '-':\n controllers.remove(v)\n else:\n v[CONF_CONTROLLER_ID] = name\n v[CONF_CONTROLLER_SERIAL_NUMBER] = serial_no\n v[CONF_CONTROLLER_ADDR] = address\n v[CONF_CONTROLLER_TIMEZONE] = timezone\n break\n else:\n if user_input[CONF_CONTROLLER_ID].strip() != '-':\n controllers.append({\n CONF_CONTROLLER_ID: name,\n CONF_CONTROLLER_SERIAL_NUMBER: serial_no,\n CONF_CONTROLLER_ADDR: address,\n CONF_CONTROLLER_TIMEZONE: timezone,\n })\n\n self.options.update({CONF_CONTROLLERS: controllers})\n\n controller['name'] = user_input[CONF_CONTROLLER_ID]\n controller['configured'] = True\n\n return await self.async_step_controller()\n\n defaults = {\n CONF_CONTROLLER_ID: DEFAULT_CONTROLLER_ID,\n CONF_CONTROLLER_ADDR: DEFAULT_CONTROLLER_ADDR,\n CONF_CONTROLLER_TIMEZONE: DEFAULT_CONTROLLER_TIMEZONE,\n }\n\n if CONF_CONTROLLERS in self.options:\n for v in self.options[CONF_CONTROLLERS]:\n if int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}') == int(f'{serial_no}'):\n for k in [CONF_CONTROLLER_ID, CONF_CONTROLLER_ADDR, CONF_CONTROLLER_TIMEZONE]:\n if k in v:\n defaults[k] = v[k]\n break\n\n if user_input is not None:\n for k in [CONF_CONTROLLER_ID, CONF_CONTROLLER_ADDR, CONF_CONTROLLER_TIMEZONE]:\n if k in user_input:\n defaults[k] = user_input[k]\n\n schema = vol.Schema({\n vol.Required(CONF_CONTROLLER_ID, default=defaults[CONF_CONTROLLER_ID]): str,\n vol.Optional(CONF_CONTROLLER_ADDR, default=defaults[CONF_CONTROLLER_ADDR]): str,\n vol.Optional(CONF_CONTROLLER_TIMEZONE, default=defaults[CONF_CONTROLLER_TIMEZONE]): str,\n })\n\n return self.async_show_form(step_id=\"controller\",\n data_schema=schema,\n errors=errors,\n description_placeholders={\n \"serial_no\": serial_no,\n })\n\n async def async_step_doors(self, user_input: Optional[Dict[str, Any]] = None):\n\n def f(v):\n return v[CONF_CONTROLLER_ID] in [u['controller'] for u in self.configuration['doors']]\n\n def g(d):\n door = d[CONF_DOOR_ID]\n no = d[CONF_DOOR_NUMBER]\n return {\n 'label': f'Door {no} ({door})' if door else f'Door {no}',\n 'value': f'{no}',\n }\n\n all_doors = get_all_doors(self.options)\n it = next((v for v in all_doors if not f(v)), None)\n if it == None:\n return await self.async_step_door()\n else:\n controller = it[CONF_CONTROLLER_ID]\n serial_no = it[CONF_CONTROLLER_SERIAL_NUMBER]\n doors = it['doors']\n\n errors: Dict[str, str] = {}\n try:\n validate_all_doors(self.options)\n except ValueError as err:\n errors['base'] = f'{err}'\n\n if user_input is not None:\n self.configuration['doors'].append({\n 'controller': controller,\n 'serial_no': serial_no,\n 'doors': [int(f'{v}') for v in user_input['doors']],\n 'configured': False,\n })\n\n return await self.async_step_doors()\n\n select = SelectSelectorConfig(options=[g(v) for v in doors],\n multiple=True,\n custom_value=False,\n mode=SelectSelectorMode.LIST) # yapf: disable\n\n schema = vol.Schema({\n vol.Required('doors', default=[f'{v[CONF_DOOR_NUMBER]}' for v in doors if v[CONF_DOOR_ID]]):\n SelectSelector(select),\n })\n\n placeholders = {\n 'controller': f'{controller}',\n 'serial_no': f'{serial_no}',\n }\n\n return self.async_show_form(step_id=\"doors\",\n data_schema=schema,\n errors=errors,\n description_placeholders=placeholders)\n\n async def async_step_door(self, user_input: Optional[Dict[str, Any]] = None):\n\n def f(v):\n return len(v['doors']) > 0 and not v['configured']\n\n it = next((v for v in self.configuration['doors'] if f(v)), None)\n if it == None:\n try:\n validate_all_doors(self.options)\n return await self.async_step_cards()\n except ValueError as err:\n self.configuration['doors'] = []\n\n return await self.async_step_doors()\n\n else:\n controller = it['controller']\n serial_no = it['serial_no']\n doors = it['doors']\n\n errors: Dict[str, str] = {}\n if user_input is not None:\n l = [user_input[f'door{v}_id'] for v in doors]\n for d in doors:\n try:\n k = f'door{d}_id'\n v = user_input[k]\n validate_door_id(v, None)\n validate_door_duplicates(v, l)\n except ValueError as err:\n errors[k] = f'{err}'\n\n if not errors:\n l = self.options[CONF_DOORS]\n\n for door in doors:\n k = f'door{door}_id'\n for d in l:\n if d[CONF_DOOR_CONTROLLER] == controller and f'{d[CONF_DOOR_NUMBER]}' == f'{door}':\n if user_input[k].strip() == '-':\n l.remove(d)\n else:\n d[CONF_DOOR_ID] = user_input[k]\n break\n else:\n if user_input[k].strip() != '-':\n l.append({\n CONF_DOOR_ID: user_input[k],\n CONF_DOOR_CONTROLLER: controller,\n CONF_DOOR_NUMBER: door,\n })\n\n self.options.update({CONF_DOORS: l})\n it['configured'] = True\n\n return await self.async_step_door()\n\n defaults = {\n 'door1_id': DEFAULT_DOOR1,\n 'door2_id': DEFAULT_DOOR2,\n 'door3_id': DEFAULT_DOOR3,\n 'door4_id': DEFAULT_DOOR4,\n }\n\n if user_input is not None:\n for v in ['door1_id', 'door2_id', 'door3_id', 'door4_id']:\n if k in user_input:\n defaults[k] = user_input[k]\n\n for v in self.options[CONF_DOORS]:\n if v[CONF_DOOR_CONTROLLER] == controller and v[CONF_DOOR_NUMBER] == 1:\n defaults['door1_id'] = v[CONF_DOOR_ID]\n\n if v[CONF_DOOR_CONTROLLER] == controller and v[CONF_DOOR_NUMBER] == 2:\n defaults['door2_id'] = v[CONF_DOOR_ID]\n\n if v[CONF_DOOR_CONTROLLER] == controller and v[CONF_DOOR_NUMBER] == 3:\n defaults['door3_id'] = v[CONF_DOOR_ID]\n\n if v[CONF_DOOR_CONTROLLER] == controller and v[CONF_DOOR_NUMBER] == 4:\n defaults['door4_id'] = v[CONF_DOOR_ID]\n\n schema = vol.Schema({})\n\n if 1 in doors:\n schema = schema.extend({vol.Optional('door1_id', default=defaults['door1_id']): str})\n\n if 2 in doors:\n schema = schema.extend({vol.Optional('door2_id', default=defaults['door2_id']): str})\n\n if 3 in doors:\n schema = schema.extend({vol.Optional('door3_id', default=defaults['door3_id']): str})\n\n if 4 in doors:\n schema = schema.extend({vol.Optional('door4_id', default=defaults['door4_id']): str})\n\n placeholders = {\n 'controller': f'{controller}',\n 'serial_no': f'{serial_no}',\n }\n\n return self.async_show_form(step_id=\"door\",\n data_schema=schema,\n errors=errors,\n description_placeholders=placeholders)\n\n async def async_step_cards(self, user_input: Optional[Dict[str, Any]] = None):\n\n def g(c):\n card = c[CONF_CARD_NUMBER]\n cardholder = c[CONF_CARD_NAME]\n return {\n 'label': f'{card} ({cardholder})' if cardholder and cardholder.strip() != '' else f'{card}',\n 'value': f'{card}',\n }\n\n errors: Dict[str, str] = {}\n if user_input is not None:\n if not errors:\n self.configuration['cards'] = [{\n 'card': get_card(v, self.options),\n 'configured': False,\n } for v in user_input[CONF_CARDS]]\n\n return await self.async_step_card()\n\n cards = get_all_cards(self.options)\n defaults = [f'{v[CONF_CARD_NUMBER]}' for v in self.options[CONF_CARDS]] if CONF_CARDS in self.options else []\n\n # if len(cards) < 2:\n # self.configuration['cards'] = [{\n # 'card': v,\n # 'configured': False,\n # } for v in cards]\n #\n # return await self.async_step_card()\n\n select = SelectSelectorConfig(options=[g(v) for v in cards],\n multiple=True,\n custom_value=False,\n mode=SelectSelectorMode.LIST) # yapf: disable\n\n schema = vol.Schema({\n vol.Required(CONF_CARDS, default=defaults): SelectSelector(select),\n })\n\n return self.async_show_form(step_id=\"cards\", data_schema=schema, errors=errors)\n\n async def async_step_card(self, user_input: Optional[Dict[str, Any]] = None):\n\n def f(v):\n return not v['configured']\n\n it = next((v for v in self.configuration['cards'] if f(v)), None)\n if it == None:\n try:\n validate_all_cards(self.options)\n return self.async_create_entry(title=\"uhppoted\", data=self.options)\n except ValueError as err:\n self.configuration['cards'] = []\n return await self.async_step_cards()\n\n else:\n card = it['card'][CONF_CARD_NUMBER]\n cardholder = it['card'][CONF_CARD_NAME]\n unique_id = it['card'][CONF_CARD_UNIQUE_ID]\n\n errors: Dict[str, str] = {}\n if user_input is not None:\n try:\n validate_card_id(user_input[CONF_CARD_NAME])\n except ValueError as err:\n errors[CONF_CARD_NAME] = f'{err}'\n\n if not errors:\n v = self.options[CONF_CARDS] if CONF_CARDS in self.options else []\n\n for c in v:\n if int(f'{c[CONF_CARD_NUMBER]}') == int(f'{card}'):\n c[CONF_CARD_NAME] = user_input[CONF_CARD_NAME]\n break\n else:\n v.append({\n CONF_CARD_NUMBER: card,\n CONF_CARD_NAME: user_input[CONF_CARD_NAME],\n CONF_CARD_UNIQUE_ID: unique_id,\n })\n\n self.options.update({CONF_CARDS: v})\n it['configured'] = True\n\n return await self.async_step_card()\n\n defaults = {\n CONF_CARD_NAME: f'{cardholder}',\n }\n\n if user_input is not None:\n for v in [CONF_CARD_NAME]:\n if k in user_input:\n defaults[k] = user_input[k]\n\n schema = vol.Schema({\n vol.Required(CONF_CARD_NAME, default=defaults[CONF_CARD_NAME]): str,\n })\n\n placeholders = {\n 'card': f'{card}',\n 'cardholder': f'{cardholder}',\n }\n\n return self.async_show_form(step_id=\"card\",\n data_schema=schema,\n errors=errors,\n description_placeholders=placeholders)"
},
{
"identifier": "validate_controller_id",
"path": "custom_components/uhppoted/config.py",
"snippet": "def validate_controller_id(serial_no, name, options):\n if not name or name.strip() == '':\n raise ValueError(ERR_INVALID_CONTROLLER_ID)\n\n if options and CONF_CONTROLLERS in options:\n for v in options[CONF_CONTROLLERS]:\n if normalise(v[CONF_CONTROLLER_ID]) == normalise(name):\n if int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}') != int(f'{serial_no}'):\n raise ValueError(ERR_DUPLICATE_CONTROLLER_ID)"
},
{
"identifier": "validate_door_id",
"path": "custom_components/uhppoted/config.py",
"snippet": "def validate_door_id(name, options):\n if not name or name.strip() == '':\n raise ValueError(ERR_INVALID_DOOR_ID)\n\n if name.strip() != '-' and options and CONF_DOORS in options:\n for v in options[CONF_DOORS]:\n if normalise(v[CONF_DOOR_ID]) == normalise(name):\n raise ValueError(ERR_DUPLICATE_DOOR_ID)"
},
{
"identifier": "validate_door_duplicates",
"path": "custom_components/uhppoted/config.py",
"snippet": "def validate_door_duplicates(name, doors):\n normalised = [normalise(v) for v in doors]\n normalised = [v for v in normalised if v != '']\n\n if normalised.count(normalise(name)) > 1:\n raise ValueError(ERR_DUPLICATE_DOOR_ID)"
},
{
"identifier": "validate_card_id",
"path": "custom_components/uhppoted/config.py",
"snippet": "def validate_card_id(name):\n if not name or name.strip() == '':\n raise ValueError(ERR_INVALID_CARD_ID)"
},
{
"identifier": "validate_all_cards",
"path": "custom_components/uhppoted/config.py",
"snippet": "def validate_all_cards(options):\n pass"
},
{
"identifier": "get_IPv4",
"path": "custom_components/uhppoted/config.py",
"snippet": "def get_IPv4(defaults):\n bind = '0.0.0.0'\n broadcast = '255.255.255.255:60000'\n listen = '0.0.0.0:60001'\n debug = False\n\n if CONF_BIND_ADDR in defaults:\n bind = defaults[CONF_BIND_ADDR]\n\n if CONF_BROADCAST_ADDR in defaults:\n broadcast = defaults[CONF_BROADCAST_ADDR]\n\n if CONF_LISTEN_ADDR in defaults:\n listen = defaults[CONF_LISTEN_ADDR]\n\n if CONF_DEBUG in defaults:\n debug = defaults[CONF_DEBUG]\n\n return {\n CONF_BIND_ADDR: bind,\n CONF_BROADCAST_ADDR: broadcast,\n CONF_LISTEN_ADDR: listen,\n CONF_DEBUG: debug,\n }"
},
{
"identifier": "get_all_controllers",
"path": "custom_components/uhppoted/config.py",
"snippet": "def get_all_controllers(options):\n controllers = set()\n if CONF_CONTROLLERS in options:\n for v in options[CONF_CONTROLLERS]:\n controllers.add(int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}'))\n\n try:\n bind = options[CONF_BIND_ADDR]\n broadcast = options[CONF_BROADCAST_ADDR]\n listen = options[CONF_LISTEN_ADDR]\n debug = options[CONF_DEBUG]\n u = uhppote.Uhppote(bind, broadcast, listen, debug)\n\n response = u.get_all_controllers()\n\n for v in response:\n controllers.add(v.controller)\n\n except Exception as e:\n _LOGGER.exception(f'error retrieving list of controllers ({e})')\n\n return sorted(list(controllers), reverse=True)"
},
{
"identifier": "get_all_cards",
"path": "custom_components/uhppoted/config.py",
"snippet": "def get_all_cards(options):\n cards = dict()\n\n # ... get controller cards\n bind = options[CONF_BIND_ADDR]\n broadcast = options[CONF_BROADCAST_ADDR]\n listen = options[CONF_LISTEN_ADDR]\n debug = options[CONF_DEBUG]\n u = uhppote.Uhppote(bind, broadcast, listen, debug)\n\n controllers = options[CONF_CONTROLLERS]\n\n for c in controllers:\n controller = int(f'{c[CONF_CONTROLLER_SERIAL_NUMBER]}'.strip())\n\n try:\n response = u.get_cards(controller)\n _LOGGER.info(f'{controller}: {response.cards} cards')\n\n N = min(response.cards, MAX_CARDS)\n ix = 1\n count = 0\n errors = 0\n\n while count < N and ix < MAX_CARD_INDEX and len(cards) < MAX_CARDS and errors < MAX_ERRORS:\n try:\n response = u.get_card_by_index(controller, ix)\n cards[response.card_number] = {\n CONF_CARD_NUMBER: response.card_number,\n CONF_CARD_UNIQUE_ID: uuid.uuid4(),\n CONF_CARD_NAME: None,\n CONF_CARD_STARTDATE: None,\n CONF_CARD_ENDDATE: None,\n CONF_CARD_DOORS: [],\n }\n count += 1\n ix += 1\n except Exception as e:\n errors += 1\n _LOGGER.warning(f'{controller} error retrieving card at index {ix} ({e})')\n\n except Exception as e:\n _LOGGER.warning(f'{controller} error retrieving list of cards ({e})')\n\n # ... add cards from options\n if options and CONF_CARDS in options:\n for v in options[CONF_CARDS]:\n k = int(f'{v[CONF_CARD_NUMBER]}')\n cards[k] = v\n\n # ... convert cards list to records\n\n return [cards[k] for k in sorted(cards.keys())]"
},
{
"identifier": "default_card_start_date",
"path": "custom_components/uhppoted/config.py",
"snippet": "def default_card_start_date():\n return datetime.date.today()"
},
{
"identifier": "default_card_end_date",
"path": "custom_components/uhppoted/config.py",
"snippet": "def default_card_end_date():\n today = datetime.date.today()\n end_date = today + datetime.timedelta(days=180)\n year = end_date.year\n month = end_date.month\n day = calendar.monthrange(end_date.year, end_date.month)[1]\n\n return datetime.date(year, month, day)"
}
] | import logging
import re
import uuid
import voluptuous as vol
from typing import Any
from typing import Dict
from typing import Optional
from homeassistant.core import HomeAssistant
from homeassistant.core import callback
from homeassistant.config_entries import ConfigFlow
from homeassistant.config_entries import OptionsFlow
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers import selector
from homeassistant.helpers.selector import SelectSelector
from homeassistant.helpers.selector import SelectSelectorConfig
from homeassistant.helpers.selector import SelectSelectorMode
from homeassistant.helpers import config_validation as cv
from uhppoted import uhppote
from .const import DOMAIN
from .const import CONF_BIND_ADDR
from .const import CONF_BROADCAST_ADDR
from .const import CONF_LISTEN_ADDR
from .const import CONF_DEBUG
from .const import CONF_CONTROLLERS
from .const import CONF_CONTROLLER_ID
from .const import CONF_CONTROLLER_SERIAL_NUMBER
from .const import CONF_CONTROLLER_ADDR
from .const import CONF_CONTROLLER_TIMEZONE
from .const import CONF_DOORS
from .const import CONF_DOOR_ID
from .const import CONF_DOOR_CONTROLLER
from .const import CONF_DOOR_NUMBER
from .const import CONF_CARDS
from .const import CONF_CARD_UNIQUE_ID
from .const import CONF_CARD_NUMBER
from .const import CONF_CARD_NAME
from .const import CONF_CARD_STARTDATE
from .const import CONF_CARD_ENDDATE
from .const import CONF_CARD_DOORS
from .const import DEFAULT_CONTROLLER_ID
from .const import DEFAULT_CONTROLLER_ADDR
from .const import DEFAULT_CONTROLLER_TIMEZONE
from .const import DEFAULT_DOOR1
from .const import DEFAULT_DOOR2
from .const import DEFAULT_DOOR3
from .const import DEFAULT_DOOR4
from .options_flow import UhppotedOptionsFlow
from .config import validate_controller_id
from .config import validate_door_id
from .config import validate_door_duplicates
from .config import validate_card_id
from .config import validate_all_cards
from .config import get_IPv4
from .config import get_all_controllers
from .config import get_all_cards
from .config import default_card_start_date
from .config import default_card_end_date | 8,521 |
return await self.async_step_doors()
doors = []
if re.match('^[1234].*', f"{controller['serial_no']}"):
doors.append(1)
if re.match('^[234].*', f"{controller['serial_no']}"):
doors.append(2)
if re.match('^[34].*', f"{controller['serial_no']}"):
doors.append(3)
if re.match('^[4].*', f"{controller['serial_no']}"):
doors.append(4)
select = selector.SelectSelectorConfig(options=[{ 'label': f'Door {v}', 'value': f'{v}' } for v in doors],
multiple=True,
custom_value=False,
mode=selector.SelectSelectorMode.LIST) # yapf: disable
schema = vol.Schema({
vol.Required('doors', default=[f'{v}' for v in doors]): selector.SelectSelector(select),
})
placeholders = {
'controller': f'{controller["name"]}',
'serial_no': f'{controller["serial_no"]}',
}
return self.async_show_form(step_id="doors",
data_schema=schema,
errors=errors,
description_placeholders=placeholders)
async def async_step_door(self, user_input: Optional[Dict[str, Any]] = None):
def f(v):
return len(v['doors']) > 0 and not v['configured']
it = next((v for v in self.controllers if f(v['doors'])), None)
if it == None:
return await self.async_step_cards()
else:
controller = it['controller']['name']
serial_no = it['controller']['serial_no']
doors = it['doors']['doors']
errors: Dict[str, str] = {}
if user_input is not None:
l = [user_input[f'door{v}_id'] for v in doors]
for d in doors:
try:
k = f'door{d}_id'
v = user_input[k]
validate_door_id(v, self.options)
validate_door_duplicates(v, l)
except ValueError as err:
errors[k] = f'{err}'
if not errors:
v = self.options[CONF_DOORS]
for d in doors:
v.append({
CONF_DOOR_ID: user_input[f'door{d}_id'],
CONF_DOOR_CONTROLLER: controller,
CONF_DOOR_NUMBER: int(f'{d}'),
})
self.options.update({CONF_DOORS: v})
it['doors']['configured'] = True
return await self.async_step_door()
defaults = {
'door1_id': DEFAULT_DOOR1,
'door2_id': DEFAULT_DOOR2,
'door3_id': DEFAULT_DOOR3,
'door4_id': DEFAULT_DOOR4,
}
if user_input is not None:
for k in ['door1_id', 'door2_id', 'door3_id', 'door4_id']:
if k in user_input:
defaults[k] = user_input[k]
schema = vol.Schema({})
if 1 in doors:
schema = schema.extend({vol.Required('door1_id', default=defaults['door1_id']): str})
if 2 in doors:
schema = schema.extend({vol.Required('door2_id', default=defaults['door2_id']): str})
if 3 in doors:
schema = schema.extend({vol.Required('door3_id', default=defaults['door3_id']): str})
if 4 in doors:
schema = schema.extend({vol.Required('door4_id', default=defaults['door4_id']): str})
placeholders = {
'controller': f'{it["controller"]["name"]}',
'serial_no': f'{it["controller"]["serial_no"]}',
}
return self.async_show_form(step_id="door",
data_schema=schema,
errors=errors,
description_placeholders=placeholders)
async def async_step_cards(self, user_input: Optional[Dict[str, Any]] = None):
errors: Dict[str, str] = {}
if user_input is not None:
if not errors:
self.configuration['cards'] = [{
'card': int(f'{v}'),
'unique_id': uuid.uuid4(),
'configured': False,
|
_LOGGER = logging.getLogger(__name__)
class UhppotedConfigFlow(ConfigFlow, domain=DOMAIN):
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> UhppotedOptionsFlow:
return UhppotedOptionsFlow(config_entry)
async def async_step_user(self, user_input: Optional[Dict[str, Any]] = None):
defaults = self.hass.data[DOMAIN] if DOMAIN in self.hass.data else {}
self.data = {}
self.options = {}
self.controllers = []
self.doors = []
self.configuration = {'cards': []}
self.options.update(get_IPv4(defaults))
self.options.update({
CONF_CONTROLLERS: [],
CONF_DOORS: [],
})
return await self.async_step_IPv4()
async def async_step_IPv4(self, user_input: Optional[Dict[str, Any]] = None):
errors: Dict[str, str] = {}
if user_input is not None:
if not errors:
self.options.update(user_input)
return await self.async_step_controllers()
bind = self.options[CONF_BIND_ADDR]
broadcast = self.options[CONF_BROADCAST_ADDR]
listen = self.options[CONF_LISTEN_ADDR]
debug = self.options[CONF_DEBUG]
schema = vol.Schema({
vol.Optional(CONF_BIND_ADDR, default=bind): str,
vol.Optional(CONF_BROADCAST_ADDR, default=broadcast): str,
vol.Optional(CONF_LISTEN_ADDR, default=listen): str,
vol.Optional(CONF_DEBUG, default=debug): bool,
})
return self.async_show_form(step_id="IPv4", data_schema=schema, errors=errors)
async def async_step_controllers(self, user_input: Optional[Dict[str, Any]] = None):
errors: Dict[str, str] = {}
if user_input is not None:
if not errors:
for v in user_input[CONF_CONTROLLERS]:
self.controllers.append({
'controller': {
'name': '',
'serial_no': v,
'configured': False,
},
'doors': None,
})
return await self.async_step_controller()
controllers = get_all_controllers(self.options)
if len(controllers) < 2:
for v in controllers:
self.controllers.append({
'controller': {
'name': '',
'serial_no': v,
'configured': False,
},
'doors': None,
})
return await self.async_step_controller()
schema = vol.Schema({
vol.Required(CONF_CONTROLLERS, default=[f'{v}' for v in controllers]):
SelectSelector(
SelectSelectorConfig(options=[f'{v}' for v in controllers],
multiple=True,
custom_value=False,
mode=SelectSelectorMode.LIST)),
})
return self.async_show_form(step_id="controllers", data_schema=schema, errors=errors)
async def async_step_controller(self, user_input: Optional[Dict[str, Any]] = None):
it = next((v for v in self.controllers if not v['controller']['configured']), None)
if it == None:
return await self.async_step_doors()
else:
controller = it['controller']
errors: Dict[str, str] = {}
if user_input is not None:
name = user_input[CONF_CONTROLLER_ID]
serial_no = controller['serial_no']
address = user_input[CONF_CONTROLLER_ADDR]
timezone = user_input[CONF_CONTROLLER_TIMEZONE]
try:
validate_controller_id(serial_no, name, self.options)
except ValueError as err:
errors[CONF_CONTROLLER_ID] = f'{err}'
if not errors:
v = self.options[CONF_CONTROLLERS]
v.append({
CONF_CONTROLLER_ID: name,
CONF_CONTROLLER_SERIAL_NUMBER: serial_no,
CONF_CONTROLLER_ADDR: address,
CONF_CONTROLLER_TIMEZONE: timezone,
})
self.options.update({CONF_CONTROLLERS: v})
controller['name'] = user_input[CONF_CONTROLLER_ID]
controller['configured'] = True
return await self.async_step_controller()
defaults = {
CONF_CONTROLLER_ID: DEFAULT_CONTROLLER_ID,
CONF_CONTROLLER_ADDR: DEFAULT_CONTROLLER_ADDR,
CONF_CONTROLLER_TIMEZONE: DEFAULT_CONTROLLER_TIMEZONE,
}
if user_input is not None:
for k in [CONF_CONTROLLER_ID, CONF_CONTROLLER_ADDR, CONF_CONTROLLER_TIMEZONE]:
if k in user_input:
defaults[k] = user_input[k]
schema = vol.Schema({
vol.Required(CONF_CONTROLLER_ID, default=defaults[CONF_CONTROLLER_ID]): str,
vol.Optional(CONF_CONTROLLER_ADDR, default=defaults[CONF_CONTROLLER_ADDR]): str,
vol.Optional(CONF_CONTROLLER_TIMEZONE, default=defaults[CONF_CONTROLLER_TIMEZONE]): str,
})
return self.async_show_form(step_id="controller",
data_schema=schema,
errors=errors,
description_placeholders={
"serial_no": controller['serial_no'],
})
async def async_step_doors(self, user_input: Optional[Dict[str, Any]] = None):
it = next((v for v in self.controllers if not v['doors']), None)
if it == None:
return await self.async_step_door()
else:
controller = it['controller']
errors: Dict[str, str] = {}
if user_input is not None:
if not errors:
it['doors'] = {
'doors': [int(f'{v}') for v in user_input['doors']],
'configured': False,
}
return await self.async_step_doors()
doors = []
if re.match('^[1234].*', f"{controller['serial_no']}"):
doors.append(1)
if re.match('^[234].*', f"{controller['serial_no']}"):
doors.append(2)
if re.match('^[34].*', f"{controller['serial_no']}"):
doors.append(3)
if re.match('^[4].*', f"{controller['serial_no']}"):
doors.append(4)
select = selector.SelectSelectorConfig(options=[{ 'label': f'Door {v}', 'value': f'{v}' } for v in doors],
multiple=True,
custom_value=False,
mode=selector.SelectSelectorMode.LIST) # yapf: disable
schema = vol.Schema({
vol.Required('doors', default=[f'{v}' for v in doors]): selector.SelectSelector(select),
})
placeholders = {
'controller': f'{controller["name"]}',
'serial_no': f'{controller["serial_no"]}',
}
return self.async_show_form(step_id="doors",
data_schema=schema,
errors=errors,
description_placeholders=placeholders)
async def async_step_door(self, user_input: Optional[Dict[str, Any]] = None):
def f(v):
return len(v['doors']) > 0 and not v['configured']
it = next((v for v in self.controllers if f(v['doors'])), None)
if it == None:
return await self.async_step_cards()
else:
controller = it['controller']['name']
serial_no = it['controller']['serial_no']
doors = it['doors']['doors']
errors: Dict[str, str] = {}
if user_input is not None:
l = [user_input[f'door{v}_id'] for v in doors]
for d in doors:
try:
k = f'door{d}_id'
v = user_input[k]
validate_door_id(v, self.options)
validate_door_duplicates(v, l)
except ValueError as err:
errors[k] = f'{err}'
if not errors:
v = self.options[CONF_DOORS]
for d in doors:
v.append({
CONF_DOOR_ID: user_input[f'door{d}_id'],
CONF_DOOR_CONTROLLER: controller,
CONF_DOOR_NUMBER: int(f'{d}'),
})
self.options.update({CONF_DOORS: v})
it['doors']['configured'] = True
return await self.async_step_door()
defaults = {
'door1_id': DEFAULT_DOOR1,
'door2_id': DEFAULT_DOOR2,
'door3_id': DEFAULT_DOOR3,
'door4_id': DEFAULT_DOOR4,
}
if user_input is not None:
for k in ['door1_id', 'door2_id', 'door3_id', 'door4_id']:
if k in user_input:
defaults[k] = user_input[k]
schema = vol.Schema({})
if 1 in doors:
schema = schema.extend({vol.Required('door1_id', default=defaults['door1_id']): str})
if 2 in doors:
schema = schema.extend({vol.Required('door2_id', default=defaults['door2_id']): str})
if 3 in doors:
schema = schema.extend({vol.Required('door3_id', default=defaults['door3_id']): str})
if 4 in doors:
schema = schema.extend({vol.Required('door4_id', default=defaults['door4_id']): str})
placeholders = {
'controller': f'{it["controller"]["name"]}',
'serial_no': f'{it["controller"]["serial_no"]}',
}
return self.async_show_form(step_id="door",
data_schema=schema,
errors=errors,
description_placeholders=placeholders)
async def async_step_cards(self, user_input: Optional[Dict[str, Any]] = None):
errors: Dict[str, str] = {}
if user_input is not None:
if not errors:
self.configuration['cards'] = [{
'card': int(f'{v}'),
'unique_id': uuid.uuid4(),
'configured': False, | } for v in user_input[CONF_CARDS]] | 14 | 2023-11-06 18:46:49+00:00 | 12k |
shadowpa0327/FLORA | main.py | [
{
"identifier": "AverageMeter",
"path": "my_meter.py",
"snippet": "class AverageMeter:\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self._world_size = dist.get_world_size()\n self.reset()\n\n def reset(self):\n # local\n self._val = 0\n self._sum = 0\n self._count = 0\n # global\n self._history_avg = 0\n self._history_count = 0\n self._avg = None\n\n def update(self, val, n=1):\n self._val = val\n self._sum += val * n\n self._count += n\n self._avg = None\n\n @property\n def val(self):\n return self._val\n\n @property\n def count(self):\n return self._count + self._history_count\n\n @property\n def avg(self):\n if self._avg is None:\n # compute avg\n r = self._history_count / max(1, self._history_count + self._count)\n _avg = self._sum / max(1, self._count)\n self._avg = r * self._history_avg + (1.0 - r) * _avg\n return self._avg\n\n def sync(self):\n buf = torch.tensor([self._sum, self._count],\n dtype=torch.float32).cuda()\n buf = reduce_tensor(buf, 1)\n _sum, _count = buf.tolist()\n _avg = _sum / max(1, _count)\n r = self._history_count / max(1, self._history_count + _count)\n\n self._history_avg = r * self._history_avg + (1.0 - r) * _avg\n self._history_count += _count\n\n self._sum = 0\n self._count = 0\n\n self._avg = None"
},
{
"identifier": "get_config",
"path": "config.py",
"snippet": "def get_config(args):\n \"\"\"Get a yacs CfgNode object with default values.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n config = _C.clone()\n update_config(config, args)\n\n return config"
},
{
"identifier": "build_model",
"path": "models/build.py",
"snippet": "def build_model(config):\n model_type = config.MODEL.TYPE\n if model_type == 'swin':\n model = SwinTransformer(\n img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.SWIN.PATCH_SIZE,\n in_chans=config.MODEL.SWIN.IN_CHANS,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=config.MODEL.SWIN.EMBED_DIM,\n depths=config.MODEL.SWIN.DEPTHS,\n num_heads=config.MODEL.SWIN.NUM_HEADS,\n window_size=config.MODEL.SWIN.WINDOW_SIZE,\n mlp_ratio=config.MODEL.SWIN.MLP_RATIO,\n qkv_bias=config.MODEL.SWIN.QKV_BIAS,\n qk_scale=config.MODEL.SWIN.QK_SCALE,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n ape=config.MODEL.SWIN.APE,\n patch_norm=config.MODEL.SWIN.PATCH_NORM,\n use_checkpoint=config.TRAIN.USE_CHECKPOINT,\n fused_window_process=config.FUSED_WINDOW_PROCESS\n )\n elif model_type == 'deit':\n model = VisionTransformer(\n img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.DEIT.PATCH_SIZE,\n in_chans=config.MODEL.DEIT.IN_CHANS,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=config.MODEL.DEIT.EMBED_DIM,\n depth=config.MODEL.DEIT.DEPTH,\n num_heads = config.MODEL.DEIT.NUM_HEADS,\n mlp_ratio = config.MODEL.DEIT.MLP_RATIO,\n qkv_bias = config.MODEL.DEIT.QKV_BIAS,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n )\n elif model_type == 'lr_swin':\n model = LRSwinTransformer(\n img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.SWIN.PATCH_SIZE,\n in_chans=config.MODEL.SWIN.IN_CHANS,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=config.MODEL.SWIN.EMBED_DIM,\n depths=config.MODEL.SWIN.DEPTHS,\n num_heads=config.MODEL.SWIN.NUM_HEADS,\n window_size=config.MODEL.SWIN.WINDOW_SIZE,\n mlp_ratio=config.MODEL.SWIN.MLP_RATIO,\n qkv_bias=config.MODEL.SWIN.QKV_BIAS,\n qk_scale=config.MODEL.SWIN.QK_SCALE,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n ape=config.MODEL.SWIN.APE,\n patch_norm=config.MODEL.SWIN.PATCH_NORM,\n use_checkpoint=config.TRAIN.USE_CHECKPOINT,\n fused_window_process=config.FUSED_WINDOW_PROCESS\n )\n elif model_type == 'lr_swin_subnet':\n model = LRSwinTransformerSubnet(\n svd_config=config.MODEL.SWIN.SVD_CONFIG,\n img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.SWIN.PATCH_SIZE,\n in_chans=config.MODEL.SWIN.IN_CHANS,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=config.MODEL.SWIN.EMBED_DIM,\n depths=config.MODEL.SWIN.DEPTHS,\n num_heads=config.MODEL.SWIN.NUM_HEADS,\n window_size=config.MODEL.SWIN.WINDOW_SIZE,\n mlp_ratio=config.MODEL.SWIN.MLP_RATIO,\n qkv_bias=config.MODEL.SWIN.QKV_BIAS,\n qk_scale=config.MODEL.SWIN.QK_SCALE,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n ape=config.MODEL.SWIN.APE,\n patch_norm=config.MODEL.SWIN.PATCH_NORM,\n use_checkpoint=config.TRAIN.USE_CHECKPOINT,\n fused_window_process=config.FUSED_WINDOW_PROCESS\n )\n elif model_type == 'lr_deit':\n model = LRVisionTransformer(\n img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.DEIT.PATCH_SIZE,\n in_chans=config.MODEL.DEIT.IN_CHANS,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=config.MODEL.DEIT.EMBED_DIM,\n depth=config.MODEL.DEIT.DEPTH,\n num_heads = config.MODEL.DEIT.NUM_HEADS,\n mlp_ratio = config.MODEL.DEIT.MLP_RATIO,\n qkv_bias = config.MODEL.DEIT.QKV_BIAS,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n fused_lr=config.MODEL.DEIT.FUSE_LR,\n )\n elif model_type == 'lr_deit_subnet':\n model = LRVisionTransformerSubnet(\n svd_config = config.MODEL.DEIT.SVD_CONFIG,\n img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.DEIT.PATCH_SIZE,\n in_chans=config.MODEL.DEIT.IN_CHANS,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=config.MODEL.DEIT.EMBED_DIM,\n depth=config.MODEL.DEIT.DEPTH,\n num_heads = config.MODEL.DEIT.NUM_HEADS,\n mlp_ratio = config.MODEL.DEIT.MLP_RATIO,\n qkv_bias = config.MODEL.DEIT.QKV_BIAS,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n )\n else:\n raise NotImplementedError(f\"Unkown model: {model_type}\")\n\n return model"
},
{
"identifier": "build_loader",
"path": "data/build.py",
"snippet": "def build_loader(config):\n config.defrost()\n dataset_train, config.MODEL.NUM_CLASSES = build_dataset(\n is_train=True, config=config)\n config.freeze()\n\n print(\n f\"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset\")\n dataset_val, _ = build_dataset(is_train=False, config=config)\n print(\n f\"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset\")\n\n mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None\n\n sampler_train = MyDistributedSampler(\n dataset_train, shuffle=True,\n drop_last=False, padding=True, pair=mixup_active and config.DISTILL.ENABLED,\n )\n\n sampler_val = MyDistributedSampler(\n dataset_val, shuffle=False,\n drop_last=False, padding=False, pair=False,\n )\n\n # TinyViT Dataset Wrapper\n if config.DISTILL.ENABLED:\n dataset_train = DatasetWrapper(dataset_train,\n logits_path=config.DISTILL.TEACHER_LOGITS_PATH,\n topk=config.DISTILL.LOGITS_TOPK,\n write=config.DISTILL.SAVE_TEACHER_LOGITS,\n )\n\n data_loader_train = torch.utils.data.DataLoader(\n dataset_train, sampler=sampler_train,\n batch_size=config.DATA.BATCH_SIZE,\n num_workers=config.DATA.NUM_WORKERS,\n pin_memory=config.DATA.PIN_MEMORY,\n # modified for TinyViT, we save logits of all samples\n drop_last=not config.DISTILL.SAVE_TEACHER_LOGITS,\n )\n\n data_loader_val = torch.utils.data.DataLoader(\n dataset_val, sampler=sampler_val,\n batch_size=int(config.DATA.BATCH_SIZE*1.5),\n shuffle=False,\n num_workers=config.DATA.NUM_WORKERS,\n pin_memory=config.DATA.PIN_MEMORY,\n drop_last=False\n )\n\n # setup mixup / cutmix\n mixup_fn = None\n if mixup_active:\n mixup_t = Mixup if not config.DISTILL.ENABLED else Mixup_record\n if config.DISTILL.ENABLED and config.AUG.MIXUP_MODE != \"pair2\":\n # change to pair2 mode for saving logits\n config.defrost()\n config.AUG.MIXUP_MODE = 'pair2'\n config.freeze()\n mixup_fn = mixup_t(\n mixup_alpha=config.AUG.MIXUP, cutmix_alpha=config.AUG.CUTMIX, cutmix_minmax=config.AUG.CUTMIX_MINMAX,\n prob=config.AUG.MIXUP_PROB, switch_prob=config.AUG.MIXUP_SWITCH_PROB, mode=config.AUG.MIXUP_MODE,\n label_smoothing=config.MODEL.LABEL_SMOOTHING, num_classes=config.MODEL.NUM_CLASSES)\n\n return dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn"
},
{
"identifier": "build_scheduler",
"path": "lr_scheduler.py",
"snippet": "def build_scheduler(config, optimizer, n_iter_per_epoch):\n num_steps = int(config.TRAIN.EPOCHS * n_iter_per_epoch)\n warmup_steps = int(config.TRAIN.WARMUP_EPOCHS * n_iter_per_epoch)\n decay_steps = int(\n config.TRAIN.LR_SCHEDULER.DECAY_EPOCHS * n_iter_per_epoch)\n\n lr_scheduler = None\n if config.TRAIN.LR_SCHEDULER.NAME == 'cosine':\n lr_scheduler = CosineLRScheduler(\n optimizer,\n t_initial=num_steps,\n lr_min=config.TRAIN.MIN_LR,\n warmup_lr_init=config.TRAIN.WARMUP_LR,\n warmup_t=warmup_steps,\n cycle_limit=1,\n t_in_epochs=False,\n )\n elif config.TRAIN.LR_SCHEDULER.NAME == 'linear':\n lr_scheduler = LinearLRScheduler(\n optimizer,\n t_initial=num_steps,\n lr_min_rate=0.01,\n warmup_lr_init=config.TRAIN.WARMUP_LR,\n warmup_t=warmup_steps,\n t_in_epochs=False,\n )\n elif config.TRAIN.LR_SCHEDULER.NAME == 'step':\n lr_scheduler = StepLRScheduler(\n optimizer,\n decay_t=decay_steps,\n decay_rate=config.TRAIN.LR_SCHEDULER.DECAY_RATE,\n warmup_lr_init=config.TRAIN.WARMUP_LR,\n warmup_t=warmup_steps,\n t_in_epochs=False,\n )\n return lr_scheduler"
},
{
"identifier": "build_optimizer",
"path": "optimizer.py",
"snippet": "def build_optimizer(config, model):\n \"\"\"\n Build optimizer, set weight decay of normalization to 0 by default.\n \"\"\"\n skip = {}\n skip_keywords = {}\n if hasattr(model, 'no_weight_decay'):\n skip = model.no_weight_decay()\n if hasattr(model, 'no_weight_decay_keywords'):\n skip_keywords = model.no_weight_decay_keywords()\n parameters = set_weight_decay(model, skip, skip_keywords)\n\n opt_lower = config.TRAIN.OPTIMIZER.NAME.lower()\n optimizer = None\n if opt_lower == 'sgd':\n optimizer = optim.SGD(parameters, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True,\n lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)\n elif opt_lower == 'adamw':\n optimizer = optim.AdamW(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS,\n lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)\n\n return optimizer"
},
{
"identifier": "create_logger",
"path": "logger.py",
"snippet": "@functools.lru_cache()\ndef create_logger(output_dir, dist_rank=0, name=''):\n # create logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.propagate = False\n\n # create formatter\n fmt = '[%(asctime)s %(name)s] (%(filename)s %(lineno)d): %(levelname)s %(message)s'\n color_fmt = colored('[%(asctime)s %(name)s]', 'green') + \\\n colored('(%(filename)s %(lineno)d)', 'yellow') + \\\n ': %(levelname)s %(message)s'\n\n # create console handlers for master process\n if dist_rank == 0:\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(\n logging.Formatter(fmt=color_fmt, datefmt='%Y-%m-%d %H:%M:%S'))\n logger.addHandler(console_handler)\n\n # create file handlers\n file_handler = logging.FileHandler(os.path.join(\n output_dir, f'log_rank{dist_rank}.txt'), mode='a')\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(logging.Formatter(\n fmt=fmt, datefmt='%Y-%m-%d %H:%M:%S'))\n logger.addHandler(file_handler)\n\n return logger"
},
{
"identifier": "load_checkpoint",
"path": "utils.py",
"snippet": "def load_checkpoint(config, model, optimizer, lr_scheduler, loss_scaler, logger, search_space = None):\n logger.info(\n f\"==============> Resuming form {config.MODEL.RESUME}....................\")\n if config.MODEL.RESUME.startswith('https'):\n checkpoint = torch.hub.load_state_dict_from_url(\n config.MODEL.RESUME, map_location='cpu', check_hash=True)\n else:\n checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')\n\n params = checkpoint['model']\n now_model_state = model.state_dict()\n mnames = ['head.weight', 'head.bias'] # (cls, 1024), (cls, )\n if mnames[-1] in params:\n ckpt_head_bias = params[mnames[-1]]\n now_model_bias = now_model_state[mnames[-1]]\n if ckpt_head_bias.shape != now_model_bias.shape:\n num_classes = 1000\n\n if len(ckpt_head_bias) == 21841 and len(now_model_bias) == num_classes:\n logger.info(\"Convert checkpoint from 21841 to 1k\")\n # convert 22kto1k\n fname = './imagenet_1kto22k.txt'\n with open(fname) as fin:\n mapping = torch.Tensor(\n list(map(int, fin.readlines()))).to(torch.long)\n for name in mnames:\n v = params[name]\n shape = list(v.shape)\n shape[0] = num_classes\n mean_v = v[mapping[mapping != -1]].mean(0, keepdim=True)\n v = torch.cat([v, mean_v], 0)\n v = v[mapping]\n params[name] = v\n\n msg = model.load_state_dict(params, strict=False)\n logger.info(msg)\n max_accuracy = 0.0\n if not config.EVAL_MODE:\n if 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint:\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer'])\n if lr_scheduler is not None:\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n if 'scaler' in checkpoint:\n loss_scaler.load_state_dict(checkpoint['scaler'])\n logger.info(\n f\"=> loaded successfully '{config.MODEL.RESUME}' (epoch {checkpoint['epoch']})\")\n if 'max_accuracy' in checkpoint:\n max_accuracy = checkpoint['max_accuracy']\n \n if 'search_space' in checkpoint and search_space is not None:\n search_space.load_state_dict(checkpoint['search_space'])\n logger.info(\n f\"=> Found existing search space: {search_space})\")\n logger.info(\n f\"=> loaded search space successfully\")\n \n if 'epoch' in checkpoint:\n config.defrost()\n config.TRAIN.START_EPOCH = checkpoint['epoch'] + 1\n config.freeze()\n\n del checkpoint\n torch.cuda.empty_cache()\n return max_accuracy"
},
{
"identifier": "load_pretrained",
"path": "utils.py",
"snippet": "def load_pretrained(config, model, logger):\n logger.info(\n f\"==============> Loading weight {config.MODEL.PRETRAINED} for fine-tuning......\")\n checkpoint = torch.load(config.MODEL.PRETRAINED, map_location='cpu')\n state_dict = checkpoint['model']\n\n # delete relative_position_index since we always re-init it\n relative_position_index_keys = [\n k for k in state_dict.keys() if \"relative_position_index\" in k]\n for k in relative_position_index_keys:\n del state_dict[k]\n\n # delete relative_coords_table since we always re-init it\n relative_position_index_keys = [\n k for k in state_dict.keys() if \"relative_coords_table\" in k]\n for k in relative_position_index_keys:\n del state_dict[k]\n\n # delete attn_mask since we always re-init it\n attn_mask_keys = [k for k in state_dict.keys() if \"attn_mask\" in k]\n for k in attn_mask_keys:\n del state_dict[k]\n\n model_state_dict = model.state_dict()\n\n # bicubic interpolate relative_position_bias_table if not match\n relative_position_bias_table_keys = [\n k for k in state_dict.keys() if \"relative_position_bias_table\" in k]\n for k in relative_position_bias_table_keys:\n relative_position_bias_table_pretrained = state_dict[k]\n relative_position_bias_table_current = model_state_dict[k]\n L1, nH1 = relative_position_bias_table_pretrained.size()\n L2, nH2 = relative_position_bias_table_current.size()\n if nH1 != nH2:\n logger.warning(f\"Error in loading {k}, passing......\")\n else:\n if L1 != L2:\n # bicubic interpolate relative_position_bias_table if not match\n S1 = int(L1 ** 0.5)\n S2 = int(L2 ** 0.5)\n relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(\n relative_position_bias_table_pretrained.permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2),\n mode='bicubic')\n state_dict[k] = relative_position_bias_table_pretrained_resized.view(\n nH2, L2).permute(1, 0)\n\n # bicubic interpolate attention_biases if not match\n relative_position_bias_table_keys = [\n k for k in state_dict.keys() if \"attention_biases\" in k]\n for k in relative_position_bias_table_keys:\n relative_position_bias_table_pretrained = state_dict[k]\n relative_position_bias_table_current = model_state_dict[k]\n nH1, L1 = relative_position_bias_table_pretrained.size()\n nH2, L2 = relative_position_bias_table_current.size()\n if nH1 != nH2:\n logger.warning(f\"Error in loading {k}, passing......\")\n else:\n if L1 != L2:\n # bicubic interpolate relative_position_bias_table if not match\n S1 = int(L1 ** 0.5)\n S2 = int(L2 ** 0.5)\n relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(\n relative_position_bias_table_pretrained.view(1, nH1, S1, S1), size=(S2, S2),\n mode='bicubic')\n state_dict[k] = relative_position_bias_table_pretrained_resized.view(\n nH2, L2)\n\n # bicubic interpolate absolute_pos_embed if not match\n absolute_pos_embed_keys = [\n k for k in state_dict.keys() if \"absolute_pos_embed\" in k]\n for k in absolute_pos_embed_keys:\n # dpe\n absolute_pos_embed_pretrained = state_dict[k]\n absolute_pos_embed_current = model.state_dict()[k]\n _, L1, C1 = absolute_pos_embed_pretrained.size()\n _, L2, C2 = absolute_pos_embed_current.size()\n if C1 != C1:\n logger.warning(f\"Error in loading {k}, passing......\")\n else:\n if L1 != L2:\n S1 = int(L1 ** 0.5)\n S2 = int(L2 ** 0.5)\n absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.reshape(\n -1, S1, S1, C1)\n absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.permute(\n 0, 3, 1, 2)\n absolute_pos_embed_pretrained_resized = torch.nn.functional.interpolate(\n absolute_pos_embed_pretrained, size=(S2, S2), mode='bicubic')\n absolute_pos_embed_pretrained_resized = absolute_pos_embed_pretrained_resized.permute(\n 0, 2, 3, 1)\n absolute_pos_embed_pretrained_resized = absolute_pos_embed_pretrained_resized.flatten(\n 1, 2)\n state_dict[k] = absolute_pos_embed_pretrained_resized\n\n # check classifier, if not match, then re-init classifier to zero\n head_bias_pretrained = state_dict['head.bias']\n Nc1 = head_bias_pretrained.shape[0]\n Nc2 = model.head.bias.shape[0]\n if (Nc1 != Nc2):\n if Nc1 == 21841 and Nc2 == 1000:\n logger.info(\"loading ImageNet-21841 weight to ImageNet-1K ......\")\n map22kto1k_path = f'./imagenet_1kto22k.txt'\n with open(map22kto1k_path) as fin:\n mapping = torch.Tensor(\n list(map(int, fin.readlines()))).to(torch.long)\n for name in ['head.weight', 'head.bias']:\n v = state_dict[name]\n mean_v = v[mapping[mapping != -1]].mean(0, keepdim=True)\n v = torch.cat([v, mean_v], 0)\n v = v[mapping]\n state_dict[name] = v\n else:\n torch.nn.init.constant_(model.head.bias, 0.)\n torch.nn.init.constant_(model.head.weight, 0.)\n del state_dict['head.weight']\n del state_dict['head.bias']\n logger.warning(\n f\"Error in loading classifier head, re-init classifier head to 0\")\n\n msg = model.load_state_dict(state_dict, strict=False)\n logger.warning(msg)\n\n logger.info(f\"=> loaded successfully '{config.MODEL.PRETRAINED}'\")\n\n del checkpoint\n torch.cuda.empty_cache()"
},
{
"identifier": "save_checkpoint",
"path": "utils.py",
"snippet": "def save_checkpoint(config, epoch, model, max_accuracy, optimizer, lr_scheduler, loss_scaler, logger, search_space = None):\n save_state = {'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'max_accuracy': max_accuracy,\n 'scaler': loss_scaler.state_dict(),\n 'epoch': epoch,\n 'config': config}\n if search_space:\n save_state['search_space'] = search_space.state_dict()\n save_path = os.path.join(config.OUTPUT, f'ckpt_epoch_{epoch}.pth')\n logger.info(f\"{save_path} saving......\")\n torch.save(save_state, save_path)\n logger.info(f\"{save_path} saved !!!\")"
},
{
"identifier": "NativeScalerWithGradNormCount",
"path": "utils.py",
"snippet": "class NativeScalerWithGradNormCount:\n state_dict_key = \"amp_scaler\"\n\n def __init__(self):\n self._scaler = torch.cuda.amp.GradScaler()\n\n def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):\n self._scaler.scale(loss).backward(create_graph=create_graph)\n if update_grad:\n if clip_grad is not None:\n assert parameters is not None\n # unscale the gradients of optimizer's assigned params in-place\n self._scaler.unscale_(optimizer)\n norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)\n else:\n self._scaler.unscale_(optimizer)\n norm = ampscaler_get_grad_norm(parameters)\n self._scaler.step(optimizer)\n self._scaler.update()\n else:\n norm = None\n return norm\n\n def state_dict(self):\n return self._scaler.state_dict()\n\n def load_state_dict(self, state_dict):\n self._scaler.load_state_dict(state_dict)"
},
{
"identifier": "auto_resume_helper",
"path": "utils.py",
"snippet": "def auto_resume_helper(output_dir):\n checkpoints = os.listdir(output_dir)\n checkpoints = [ckpt for ckpt in checkpoints if ckpt.endswith('pth')]\n print(f\"All checkpoints founded in {output_dir}: {checkpoints}\")\n if len(checkpoints) > 0:\n latest_checkpoint = max([os.path.join(output_dir, d)\n for d in checkpoints], key=os.path.getmtime)\n print(f\"The latest checkpoint founded: {latest_checkpoint}\")\n resume_file = latest_checkpoint\n else:\n resume_file = None\n return resume_file"
},
{
"identifier": "is_main_process",
"path": "utils.py",
"snippet": "def is_main_process():\n return dist.get_rank() == 0"
},
{
"identifier": "get_git_info",
"path": "utils.py",
"snippet": "def get_git_info():\n return dict(\n branch=get_cmd_output('git name-rev --name-only HEAD'),\n git_hash=get_cmd_output('git rev-parse HEAD'),\n )"
},
{
"identifier": "run_cmd",
"path": "utils.py",
"snippet": "def run_cmd(cmd):\n return subprocess.check_output(cmd.split(), universal_newlines=True).strip()"
},
{
"identifier": "build_low_rank_search_space",
"path": "nas_utils/rank_choices_manager.py",
"snippet": "def build_low_rank_search_space(args, config, force_uniform = False):\n per_block_searched_configs = None\n if config.NAS.LSSS.SEARCHED_CFG_PATH:\n import pickle\n with open(config.NAS.LSSS.SEARCHED_CFG_PATH, 'rb') as file:\n per_block_searched_configs = pickle.load(file)\n \n return LowRankSearchSpace(\n rank_choices = config.NAS.SEARCH_SPACE,\n num_blocks = config.NAS.NUM_BLOCKS,\n choices_per_blocks = config.NAS.NUM_CHOICES_PER_BLOCKS,\n is_non_uniform = False if force_uniform else config.NAS.NON_UNIFORM,\n per_block_searched_configs = per_block_searched_configs\n ) "
}
] | import os
import time
import random
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import wandb
from collections import defaultdict
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy
from my_meter import AverageMeter
from config import get_config
from models import build_model
from data import build_loader
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import load_checkpoint, load_pretrained, save_checkpoint,\
NativeScalerWithGradNormCount,\
auto_resume_helper, is_main_process,\
get_git_info, run_cmd
from nas_utils import build_low_rank_search_space | 9,696 |
if idx % config.PRINT_FREQ == 0:
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
logger.info(
f'Test: [{idx}/{len(data_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
f'Mem {memory_used:.0f}MB')
acc1_meter.sync()
acc5_meter.sync()
logger.info(
f' The number of validation samples is {int(acc1_meter.count)}')
logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
@torch.no_grad()
def throughput(data_loader, model, logger):
# we follow the throughput measurement of LeViT repo (https://github.com/facebookresearch/LeViT/blob/main/speed_test.py)
model.eval()
T0, T1 = 10, 60
images, _ = next(iter(data_loader))
batch_size, _, H, W = images.shape
inputs = torch.randn(batch_size, 3, H, W).cuda(non_blocking=True)
# trace model to avoid python overhead
model = torch.jit.trace(model, inputs)
torch.cuda.empty_cache()
torch.cuda.synchronize()
start = time.time()
with torch.cuda.amp.autocast():
while time.time() - start < T0:
model(inputs)
timing = []
torch.cuda.synchronize()
with torch.cuda.amp.autocast():
while sum(timing) < T1:
start = time.time()
model(inputs)
torch.cuda.synchronize()
timing.append(time.time() - start)
timing = torch.as_tensor(timing, dtype=torch.float32)
throughput = batch_size / timing.mean().item()
logger.info(f"batch_size {batch_size} throughput {throughput}")
if __name__ == '__main__':
args, config = parse_option()
config.defrost()
if config.DISTILL.TEACHER_LOGITS_PATH:
config.DISTILL.ENABLED = True
if args.lsss:
config.NAS.LSSS.ENABLE = True
assert args.lsss_bid >= 0, "Please specify the block id for local search space searching"
config.NAS.LSSS.BLOCK_ID = args.lsss_bid
config.freeze()
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
rank = int(os.environ["RANK"])
world_size = int(os.environ['WORLD_SIZE'])
print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")
else:
rank = -1
world_size = -1
torch.cuda.set_device(config.LOCAL_RANK)
torch.distributed.init_process_group(
backend='nccl', init_method='env://', world_size=world_size, rank=rank)
torch.distributed.barrier()
seed = config.SEED + dist.get_rank()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(config.SEED)
cudnn.benchmark = True
# linear scale the learning rate according to total batch size, may not be optimal
linear_scaled_lr = config.TRAIN.BASE_LR * \
config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
linear_scaled_warmup_lr = config.TRAIN.WARMUP_LR * \
config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
linear_scaled_min_lr = config.TRAIN.MIN_LR * \
config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
# gradient accumulation also need to scale the learning rate
if config.TRAIN.ACCUMULATION_STEPS > 1:
linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_warmup_lr = linear_scaled_warmup_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_min_lr = linear_scaled_min_lr * config.TRAIN.ACCUMULATION_STEPS
config.defrost()
config.TRAIN.BASE_LR = linear_scaled_lr
config.TRAIN.WARMUP_LR = linear_scaled_warmup_lr
config.TRAIN.MIN_LR = linear_scaled_min_lr
config.freeze()
os.makedirs(config.OUTPUT, exist_ok=True)
logger = create_logger(output_dir=config.OUTPUT,
dist_rank=dist.get_rank(), name=f"{config.MODEL.NAME}")
if is_main_process():
path = os.path.join(config.OUTPUT, "config.json")
with open(path, "w") as f:
f.write(config.dump())
logger.info(f"Full config saved to {path}")
config_dict = dict(config)
#config_dict['git'] = get_git_info()
if args.use_wandb:
wandb_output_path = config.OUTPUT
wandb.init(project="TrimsformerV2", entity="brian1009", config=config_dict,
dir=wandb_output_path)
# print git info
logger.info('===== git =====')
| # --------------------------------------------------------
# Based on the code: TinyViT
# (https://github.com/microsoft/Cream/tree/main/TinyViT)
# Add Low Rank Supernet Training
# --------------------------------------------------------
try:
except ImportError:
wandb = None
NORM_ITER_LEN = 100
def parse_option():
parser = argparse.ArgumentParser(
'Swin Transformer training and evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True,
metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--batch-size', type=int,
help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int,
help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--disable_amp', action='store_true',
help='Disable pytorch amp')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true',
help='Test throughput only')
parser.add_argument('--use-sync-bn', action='store_true',
default=False, help='sync bn')
parser.add_argument('--use-wandb', action='store_true',
default=False, help='use wandb to record log')
# distributed training
parser.add_argument("--local_rank", type=int,
help='local rank for DistributedDataParallel')
# NAS
parser.add_argument("--lsss", action='store_true', help = 'train only the local supernet', default=False)
parser.add_argument("--lsss-bid", type = int, help = "block id for the target transformer blocks", default = -1)
args = parser.parse_args()
config = get_config(args)
return args, config
def main(args, config):
dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn = build_loader(
config)
supernet_config = config.NAS.SEARCH_SPACE
smallest_config = []
for ratios in supernet_config:
smallest_config.append(ratios[0])
logger.info(f"Creating model:{config.MODEL.TYPE}/{config.MODEL.NAME}")
model = build_model(config)
model.cuda()
if args.use_sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
logger.info(str(model))
optimizer = build_optimizer(config, model)
if 'classic' in config.MODEL.TYPE:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False, find_unused_parameters = True)
else:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False, find_unused_parameters = False)
loss_scaler = NativeScalerWithGradNormCount()
model_without_ddp = model.module
low_rank_search_space = build_low_rank_search_space(args, config)
if config.NAS.ENABLE:
if config.NAS.INIT_CONFIG is None:
cfg = low_rank_search_space.get_smallest_config()
else:
cfg = config.NAS.INIT_CONFIG
model_without_ddp.set_sample_config(cfg)
if config.NAS.LSSS.ENABLE:
logger.info(f"=> Now training the local supernet of block-{config.NAS.LSSS.BLOCK_ID}")
else:
logger.info(f"=> Srarting supernet training !")
logger.info(f"")
logger.info(f"=> Set init subnet config to be {cfg}")
logger.info(str(model))
n_parameters = sum(p.numel()
for p in model.parameters() if p.requires_grad)
logger.info(f"number of params: {n_parameters}")
if hasattr(model_without_ddp, 'flops'):
flops = model_without_ddp.flops()
logger.info(f"number of GFLOPs: {flops / 1e9}")
lr_scheduler = build_scheduler(config, optimizer, len(
data_loader_train) // config.TRAIN.ACCUMULATION_STEPS)
if config.DISTILL.ENABLED:
# we disable MIXUP and CUTMIX when knowledge distillation
assert len(
config.DISTILL.TEACHER_LOGITS_PATH) > 0, "Please fill in DISTILL.TEACHER_LOGITS_PATH"
criterion = torch.nn.CrossEntropyLoss(reduction='mean')
else:
if config.AUG.MIXUP > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif config.MODEL.LABEL_SMOOTHING > 0.:
criterion = LabelSmoothingCrossEntropy(
smoothing=config.MODEL.LABEL_SMOOTHING)
else:
criterion = torch.nn.CrossEntropyLoss()
max_accuracy = 0.0
if config.TRAIN.AUTO_RESUME:
resume_file = auto_resume_helper(config.OUTPUT)
if resume_file:
if config.MODEL.RESUME:
logger.warning(
f"auto-resume changing resume file from {config.MODEL.RESUME} to {resume_file}")
config.defrost()
config.MODEL.RESUME = resume_file
config.freeze()
logger.info(f'auto resuming from {resume_file}')
else:
logger.info(
f'no checkpoint found in {config.OUTPUT}, ignoring auto resume')
if config.MODEL.RESUME:
max_accuracy = load_checkpoint(
config, model_without_ddp, optimizer, lr_scheduler, loss_scaler, logger)
acc1, acc5, loss = validate(args, config, data_loader_val, model)
logger.info(
f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
if config.EVAL_MODE:
return
if config.MODEL.PRETRAINED and (not config.MODEL.RESUME):
load_pretrained(config, model_without_ddp, logger)
acc1, acc5, loss = validate(args, config, data_loader_val, model)
logger.info(
f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
if config.EVAL_MODE:
return
if config.THROUGHPUT_MODE:
throughput(data_loader_val, model, logger)
return
logger.info("Start training")
start_time = time.time()
for epoch in range(config.TRAIN.START_EPOCH, config.TRAIN.EPOCHS):
# set_epoch for dataset_train when distillation
if hasattr(dataset_train, 'set_epoch'):
dataset_train.set_epoch(epoch)
data_loader_train.sampler.set_epoch(epoch)
if config.DISTILL.ENABLED:
train_one_epoch_distill_using_saved_logits(
args, config, model, criterion, data_loader_train, optimizer, epoch, mixup_fn,
lr_scheduler, loss_scaler, low_rank_search_space)
else:
train_one_epoch(args, config, model, criterion,
data_loader_train, optimizer, epoch, mixup_fn, lr_scheduler, loss_scaler,
low_rank_search_space)
if dist.get_rank() == 0 and (epoch % config.SAVE_FREQ == 0 or epoch == (config.TRAIN.EPOCHS - 1)):
save_checkpoint(config, epoch, model_without_ddp,
max_accuracy, optimizer, lr_scheduler, loss_scaler, logger)
if config.NAS.ENABLE:
if config.NAS.LSSS.ENABLE:
test_cfg = low_rank_search_space.get_smallest_config_ith_block(config.NAS.LSSS.BLOCK_ID)
else:
test_cfg = low_rank_search_space.get_smallest_config()
model.module.set_sample_config(test_cfg)
logger.info(f"=> Set smallest subnets:{test_cfg}")
logger.info(f"=> # of flops: {model.module.flops() / 1e9}")
acc1, acc5, loss = validate(args, config, data_loader_val, model)
logger.info(
f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
max_accuracy = max(max_accuracy, acc1)
logger.info(f'Max accuracy: {max_accuracy:.2f}%')
if is_main_process() and args.use_wandb:
wandb.log({
f"val/acc@1": acc1,
f"val/acc@5": acc5,
f"val/loss": loss,
"epoch": epoch,
})
wandb.run.summary['epoch'] = epoch
wandb.run.summary['best_acc@1'] = max_accuracy
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info('Training time {}'.format(total_time_str))
def is_valid_grad_norm(num):
if num is None:
return False
return not bool(torch.isinf(num)) and not bool(torch.isnan(num))
def set_bn_state(config, model):
if config.TRAIN.EVAL_BN_WHEN_TRAINING:
for m in model.modules():
if isinstance(m, torch.nn.modules.batchnorm._BatchNorm):
m.eval()
def train_one_epoch(args, config, model, criterion, data_loader, optimizer, epoch, mixup_fn, lr_scheduler, loss_scaler, low_rank_search_space):
model.train()
set_bn_state(config, model)
optimizer.zero_grad()
num_steps = len(data_loader)
batch_time = AverageMeter()
loss_meter = AverageMeter()
norm_meter = AverageMeter()
scaler_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
start = time.time()
end = time.time()
for idx, (samples, targets) in enumerate(data_loader):
normal_global_idx = epoch * NORM_ITER_LEN + \
(idx * NORM_ITER_LEN // num_steps)
samples = samples.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
if config.NAS.LSSS.ENABLE:
subnet_cfg = low_rank_search_space.random_ith_block(config.NAS.LSSS.BLOCK_ID)
else:
subnet_cfg = low_rank_search_space.random()
model.module.set_sample_config(subnet_cfg)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
original_targets = targets.argmax(dim=1)
else:
original_targets = targets
with torch.cuda.amp.autocast(enabled=config.AMP_ENABLE):
outputs = model(samples)
loss = criterion(outputs, targets)
loss = loss / config.TRAIN.ACCUMULATION_STEPS
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(
optimizer, 'is_second_order') and optimizer.is_second_order
grad_norm = loss_scaler(loss, optimizer, clip_grad=config.TRAIN.CLIP_GRAD,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0)
if (idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0:
optimizer.zero_grad()
lr_scheduler.step_update(
(epoch * num_steps + idx) // config.TRAIN.ACCUMULATION_STEPS)
loss_scale_value = loss_scaler.state_dict()["scale"]
with torch.no_grad():
acc1, acc5 = accuracy(outputs, original_targets, topk=(1, 5))
acc1_meter.update(acc1.item(), targets.size(0))
acc5_meter.update(acc5.item(), targets.size(0))
torch.cuda.synchronize()
loss_meter.update(loss.item(), targets.size(0))
if is_valid_grad_norm(grad_norm):
norm_meter.update(grad_norm)
scaler_meter.update(loss_scale_value)
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
lr = optimizer.param_groups[0]['lr']
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
etas = batch_time.avg * (num_steps - idx)
logger.info(
f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t'
f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t'
f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
f'loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t'
f'loss_scale {scaler_meter.val:.4f} ({scaler_meter.avg:.4f})\t'
f'mem {memory_used:.0f}MB')
if is_main_process() and args.use_wandb:
wandb.log({
"train/acc@1": acc1_meter.val,
"train/acc@5": acc5_meter.val,
"train/loss": loss_meter.val,
"train/grad_norm": norm_meter.val,
"train/loss_scale": scaler_meter.val,
"train/lr": lr,
}, step=normal_global_idx)
epoch_time = time.time() - start
logger.info(
f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}")
def train_one_epoch_distill_using_saved_logits(args, config, model, criterion, data_loader, optimizer, epoch, mixup_fn, lr_scheduler, loss_scaler, low_rank_search_space):
model.train()
set_bn_state(config, model)
optimizer.zero_grad()
num_steps = len(data_loader)
batch_time = AverageMeter()
loss_meter = AverageMeter()
norm_meter = AverageMeter()
scaler_meter = AverageMeter()
meters = defaultdict(AverageMeter)
start = time.time()
end = time.time()
data_tic = time.time()
num_classes = config.MODEL.NUM_CLASSES
topk = config.DISTILL.LOGITS_TOPK
for idx, ((samples, targets), (logits_index, logits_value, seeds)) in enumerate(data_loader):
normal_global_idx = epoch * NORM_ITER_LEN + \
(idx * NORM_ITER_LEN // num_steps)
samples = samples.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
if config.NAS.LSSS.ENABLE:
subnet_cfg = low_rank_search_space.random_ith_block(config.NAS.LSSS.BLOCK_ID)
else:
subnet_cfg = low_rank_search_space.random()
model.module.set_sample_config(subnet_cfg)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets, seeds)
original_targets = targets.argmax(dim=1)
else:
original_targets = targets
meters['data_time'].update(time.time() - data_tic)
with torch.cuda.amp.autocast(enabled=config.AMP_ENABLE):
outputs = model(samples)
# recover teacher logits
logits_index = logits_index.long()
logits_value = logits_value.float()
logits_index = logits_index.cuda(non_blocking=True)
logits_value = logits_value.cuda(non_blocking=True)
minor_value = (1.0 - logits_value.sum(-1, keepdim=True)
) / (num_classes - topk)
minor_value = minor_value.repeat_interleave(num_classes, dim=-1)
outputs_teacher = minor_value.scatter_(-1, logits_index, logits_value)
loss = criterion(outputs, outputs_teacher)
loss = loss / config.TRAIN.ACCUMULATION_STEPS
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(
optimizer, 'is_second_order') and optimizer.is_second_order
grad_norm = loss_scaler(loss, optimizer, clip_grad=config.TRAIN.CLIP_GRAD,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0)
if (idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0:
optimizer.zero_grad()
lr_scheduler.step_update(
(epoch * num_steps + idx) // config.TRAIN.ACCUMULATION_STEPS)
loss_scale_value = loss_scaler.state_dict()["scale"]
# compute accuracy
real_batch_size = len(original_targets)
acc1, acc5 = accuracy(outputs, original_targets, topk=(1, 5))
meters['train_acc1'].update(acc1.item(), real_batch_size)
meters['train_acc5'].update(acc5.item(), real_batch_size)
teacher_acc1, teacher_acc5 = accuracy(
outputs_teacher, original_targets, topk=(1, 5))
meters['teacher_acc1'].update(teacher_acc1.item(), real_batch_size)
meters['teacher_acc5'].update(teacher_acc5.item(), real_batch_size)
torch.cuda.synchronize()
loss_meter.update(loss.item(), real_batch_size)
if is_valid_grad_norm(grad_norm):
norm_meter.update(grad_norm)
scaler_meter.update(loss_scale_value)
batch_time.update(time.time() - end)
end = time.time()
data_tic = time.time()
if idx % config.PRINT_FREQ == 0:
lr = optimizer.param_groups[0]['lr']
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
etas = batch_time.avg * (num_steps - idx)
extra_meters_str = ''
for k, v in meters.items():
extra_meters_str += f'{k} {v.val:.4f} ({v.avg:.4f})\t'
logger.info(
f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t'
f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t'
f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
f'loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t'
f'loss_scale {scaler_meter.val:.4f} ({scaler_meter.avg:.4f})\t'
f'{extra_meters_str}'
f'mem {memory_used:.0f}MB')
if is_main_process() and args.use_wandb:
acc1_meter, acc5_meter = meters['train_acc1'], meters['train_acc5']
wandb.log({
"train/acc@1": acc1_meter.val,
"train/acc@5": acc5_meter.val,
"train/loss": loss_meter.val,
"train/grad_norm": norm_meter.val,
"train/loss_scale": scaler_meter.val,
"train/lr": lr,
}, step=normal_global_idx)
epoch_time = time.time() - start
extra_meters_str = f'Train-Summary: [{epoch}/{config.TRAIN.EPOCHS}]\t'
for k, v in meters.items():
v.sync()
extra_meters_str += f'{k} {v.val:.4f} ({v.avg:.4f})\t'
logger.info(extra_meters_str)
logger.info(
f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}")
@torch.no_grad()
def validate(args, config, data_loader, model, num_classes=1000):
criterion = torch.nn.CrossEntropyLoss()
model.eval()
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
end = time.time()
for idx, (images, target) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
with torch.cuda.amp.autocast(enabled=config.AMP_ENABLE):
output = model(images)
# measure accuracy and record loss
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
acc5_meter.update(acc5.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
logger.info(
f'Test: [{idx}/{len(data_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
f'Mem {memory_used:.0f}MB')
acc1_meter.sync()
acc5_meter.sync()
logger.info(
f' The number of validation samples is {int(acc1_meter.count)}')
logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
@torch.no_grad()
def throughput(data_loader, model, logger):
# we follow the throughput measurement of LeViT repo (https://github.com/facebookresearch/LeViT/blob/main/speed_test.py)
model.eval()
T0, T1 = 10, 60
images, _ = next(iter(data_loader))
batch_size, _, H, W = images.shape
inputs = torch.randn(batch_size, 3, H, W).cuda(non_blocking=True)
# trace model to avoid python overhead
model = torch.jit.trace(model, inputs)
torch.cuda.empty_cache()
torch.cuda.synchronize()
start = time.time()
with torch.cuda.amp.autocast():
while time.time() - start < T0:
model(inputs)
timing = []
torch.cuda.synchronize()
with torch.cuda.amp.autocast():
while sum(timing) < T1:
start = time.time()
model(inputs)
torch.cuda.synchronize()
timing.append(time.time() - start)
timing = torch.as_tensor(timing, dtype=torch.float32)
throughput = batch_size / timing.mean().item()
logger.info(f"batch_size {batch_size} throughput {throughput}")
if __name__ == '__main__':
args, config = parse_option()
config.defrost()
if config.DISTILL.TEACHER_LOGITS_PATH:
config.DISTILL.ENABLED = True
if args.lsss:
config.NAS.LSSS.ENABLE = True
assert args.lsss_bid >= 0, "Please specify the block id for local search space searching"
config.NAS.LSSS.BLOCK_ID = args.lsss_bid
config.freeze()
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
rank = int(os.environ["RANK"])
world_size = int(os.environ['WORLD_SIZE'])
print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")
else:
rank = -1
world_size = -1
torch.cuda.set_device(config.LOCAL_RANK)
torch.distributed.init_process_group(
backend='nccl', init_method='env://', world_size=world_size, rank=rank)
torch.distributed.barrier()
seed = config.SEED + dist.get_rank()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(config.SEED)
cudnn.benchmark = True
# linear scale the learning rate according to total batch size, may not be optimal
linear_scaled_lr = config.TRAIN.BASE_LR * \
config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
linear_scaled_warmup_lr = config.TRAIN.WARMUP_LR * \
config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
linear_scaled_min_lr = config.TRAIN.MIN_LR * \
config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
# gradient accumulation also need to scale the learning rate
if config.TRAIN.ACCUMULATION_STEPS > 1:
linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_warmup_lr = linear_scaled_warmup_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_min_lr = linear_scaled_min_lr * config.TRAIN.ACCUMULATION_STEPS
config.defrost()
config.TRAIN.BASE_LR = linear_scaled_lr
config.TRAIN.WARMUP_LR = linear_scaled_warmup_lr
config.TRAIN.MIN_LR = linear_scaled_min_lr
config.freeze()
os.makedirs(config.OUTPUT, exist_ok=True)
logger = create_logger(output_dir=config.OUTPUT,
dist_rank=dist.get_rank(), name=f"{config.MODEL.NAME}")
if is_main_process():
path = os.path.join(config.OUTPUT, "config.json")
with open(path, "w") as f:
f.write(config.dump())
logger.info(f"Full config saved to {path}")
config_dict = dict(config)
#config_dict['git'] = get_git_info()
if args.use_wandb:
wandb_output_path = config.OUTPUT
wandb.init(project="TrimsformerV2", entity="brian1009", config=config_dict,
dir=wandb_output_path)
# print git info
logger.info('===== git =====') | logger.info(run_cmd('git rev-parse --abbrev-ref HEAD')) | 14 | 2023-11-03 09:54:45+00:00 | 12k |
fw-ai/fireworks_poe_bot | fireworks_poe_bot/__main__.py | [
{
"identifier": "FireworksPoeTextBot",
"path": "fireworks_poe_bot/fw_poe_text_bot.py",
"snippet": "class FireworksPoeTextBot(PoeBot):\n def __init__(\n self,\n model: str,\n api_key: str,\n environment: str,\n deployment: str,\n server_version: str,\n allow_attachments: bool,\n input_image_size: int,\n prompt_truncate_len: int,\n max_tokens: int,\n system_prompt_override: Optional[str],\n additional_args: Optional[Dict[str, int | str]],\n chat_format: Optional[str],\n alpaca_instruction_msg: Optional[str],\n completion_async_method: Callable = ChatCompletion.acreate,\n ):\n super().__init__()\n self.model = model\n self.api_key = api_key\n self.environment = environment\n self.deployment = deployment\n self.server_version = server_version\n self.input_image_size = input_image_size\n self.completion_async_method = completion_async_method\n self.allow_attachments = allow_attachments\n self.prompt_truncate_len = prompt_truncate_len\n self.max_tokens = max_tokens\n self.chat_format = chat_format\n self.alpaca_instruction_msg = alpaca_instruction_msg\n self.system_prompt_override = system_prompt_override\n self.additional_args = additional_args or {}\n\n def _log_warn(self, payload: Dict):\n payload = copy.copy(payload)\n payload.update(\n {\n \"severity\": \"WARNING\",\n \"environment\": self.environment,\n \"deployment\": self.deployment,\n \"model\": self.model,\n \"server_version\": self.server_version,\n }\n )\n log_warn(payload)\n\n def _log_info(self, payload: Dict):\n payload = copy.copy(payload)\n payload.update(\n {\n \"severity\": \"INFO\",\n \"environment\": self.environment,\n \"deployment\": self.deployment,\n \"model\": self.model,\n \"server_version\": self.server_version,\n }\n )\n log_info(payload)\n\n async def download_image_and_encode_to_base64(\n self,\n url: str,\n ) -> str:\n async with httpx.AsyncClient() as client:\n image_download_start = time.perf_counter()\n r = await client.get(url)\n image_download_end = time.perf_counter()\n if r.status_code == 200:\n resize_encode_start = time.perf_counter()\n pil_img = Image.open(io.BytesIO(r.content))\n pil_img = pil_img.convert(\"RGB\")\n width, height = pil_img.size\n if width >= height:\n new_size = (\n self.input_image_size,\n int(height * self.input_image_size / width),\n )\n else:\n new_size = (\n int(width * self.input_image_size / height),\n self.input_image_size,\n )\n pil_img_resized = pil_img.resize(new_size)\n buffered = io.BytesIO()\n pil_img_resized.save(buffered, format=\"JPEG\")\n img_buffer = buffered.getvalue()\n img = \"data:image/jpeg;base64,{}\".format(\n base64.b64encode(img_buffer).decode(\"utf-8\")\n )\n resize_encode_end = time.perf_counter()\n self._log_info(\n {\n \"download_image_ms\": int(\n (image_download_end - image_download_start) * 1000\n ),\n \"encode_image_ms\": int(\n (resize_encode_end - resize_encode_start) * 1000\n ),\n \"url\": url,\n }\n )\n return img\n raise Exception(f\"Unable to download image, error code {r.status_code}\")\n\n async def get_response(\n self, query: QueryRequest\n ) -> AsyncIterable[Union[PartialResponse, ServerSentEvent]]:\n if len(query.query) == 0:\n yield ErrorResponse(allow_retry=False, text=\"Empty query\")\n return\n\n orig_api_key = fireworks.client.api_key\n fireworks.client.api_key = self.api_key\n try:\n start_t = time.time()\n messages: List[ChatMessage] = []\n\n cumulative_image_size_mb = 0\n for protocol_message in query.query:\n log_msg = protocol_message.dict()\n\n # OpenAI/Fireworks use the \"assistant\" role for the LLM, but Poe uses the\n # \"bot\" role. Replace that one. Otherwise, ignore the role\n if protocol_message.role not in {\"system\", \"user\", \"bot\"}:\n self._log_warn({\"msg\": \"Unknown role\", **log_msg})\n continue\n if protocol_message.content_type not in {\"text/plain\", \"text/markdown\"}:\n self._log_warn({\"msg\": \"Unknown content type\", **log_msg})\n continue\n # TODO: support protocol_message.feedback and protocol_message.attachments\n # if needed\n img_base64 = None\n if protocol_message.role == \"bot\":\n role = \"assistant\"\n else:\n role = protocol_message.role\n if protocol_message.attachments and protocol_message.attachments[\n 0\n ].content_type in [\"image/png\", \"image/jpeg\"]:\n try:\n img_base64 = await self.download_image_and_encode_to_base64(\n protocol_message.attachments[0].url\n )\n except Exception as e:\n yield ErrorResponse(allow_retry=False, text=str(e))\n raise RuntimeError(str(e))\n\n if img_base64:\n if cumulative_image_size_mb > 8:\n # Apigee has a limit of 10MB for payload, we set image total limit to 8MB\n yield ErrorResponse(\n allow_retry=False, text=\"The total image size is too big\"\n )\n raise RuntimeError(\"The total image size is too big\")\n messages.append(\n {\n \"role\": role,\n \"content\": [\n {\"type\": \"text\", \"text\": protocol_message.content},\n {\n \"type\": \"image_url\",\n \"image_url\": {\"url\": img_base64},\n },\n ],\n }\n )\n cumulative_image_size_mb += len(img_base64) / 1024 / 1024\n else:\n messages.append({\"role\": role, \"content\": protocol_message.content})\n\n if self.system_prompt_override is not None:\n system_prompt_msg = None\n for msg in messages:\n if msg[\"role\"] == \"system\":\n system_prompt_msg = msg\n break\n if system_prompt_msg is None:\n system_prompt_msg = {\n \"role\": \"system\",\n }\n messages.insert(0, system_prompt_msg)\n\n system_prompt_msg[\"content\"] = [\n {\"type\": \"text\", \"text\": self.system_prompt_override},\n ]\n\n if self.chat_format == \"alpaca\":\n # Discard all messages except \"system\" and the last \"user\"\n # message\n system_message = None\n user_message = None\n for msg in messages:\n if msg[\"role\"] == \"system\":\n system_message = msg\n elif msg[\"role\"] == \"user\":\n user_message = msg\n\n new_messages = []\n if system_message is not None:\n new_messages.append(system_message)\n # Insert instruction message, if applicable\n if self.alpaca_instruction_msg is not None:\n new_messages.append(\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": self.alpaca_instruction_msg}\n ],\n }\n )\n if user_message is not None:\n user_message[\"role\"] = \"input\"\n # HACKS: move the image to the instruction message\n if isinstance(user_message[\"content\"], list):\n content_non_image = [x for x in user_message['content'] if (not isinstance(x, dict)) or x[\"type\"] != \"image_url\"]\n content_image = [x for x in user_message['content'] if isinstance(x, dict) and x[\"type\"] == \"image_url\"]\n if content_image:\n new_messages[-1][\"content\"].append(content_image[0])\n user_message[\"content\"] = content_non_image\n new_messages.append(user_message)\n else:\n if user_message is not None:\n new_messages.append(user_message)\n messages = new_messages\n\n self._log_info(\n {\n \"msg\": \"Request received\",\n **query.dict(),\n }\n )\n\n if self.chat_format != \"alpaca\":\n # The poe servers send us arbitrary lists of messages. We need to do a few things\n # to normalize for our chat completion API:\n # 1. Ensure that all assistant messages are preceded by a user message\n # 2. Merge adjacent messages from the same role\n # 3. Ensure that the last message is a user message\n\n # Ensure that all assistant messages are preceded by a user message\n for i in range(len(messages) - 1, -1, -1):\n if messages[i][\"role\"] == \"assistant\" and (\n i == 0 or messages[i - 1][\"role\"] != \"user\"\n ):\n self._log_warn(\n {\n \"msg\": f\"Assistant message {messages[i]} not preceded by user message\"\n }\n )\n messages.insert(i, {\"role\": \"user\", \"content\": \"\"})\n\n # Merge adjacent messages from the same role\n merged_messages = []\n\n # Now there could be images in the messages, in which case the message content is a list\n def merge_messages_groups(\n message_group: List[Union[str, List[Dict[str, Any]]]]\n ) -> Union[str, List[Dict[str, Any]]]:\n text = []\n images = []\n for msg in message_group:\n if isinstance(msg, str):\n text.append(msg)\n elif isinstance(msg, list):\n assert msg[0][\"type\"] == \"text\"\n text.append(msg[0][\"text\"])\n images.extend(msg[1:])\n if images:\n return [{\"type\": \"text\", \"text\": \" \".join(text)}, *images]\n return \" \".join(text)\n\n for role, group in groupby(messages, key=lambda x: x[\"role\"]):\n content = merge_messages_groups([message[\"content\"] for message in group])\n merged_messages.append({\"role\": role, \"content\": content})\n\n messages = merged_messages\n\n # Ensure last message is a user message\n if messages[-1][\"role\"] != \"user\":\n self._log_warn({\"msg\": f\"Last message {messages[-1]} not a user message\"})\n messages.append({\"role\": \"user\", \"content\": \"\"})\n\n additional_args = copy.deepcopy(self.additional_args)\n if \"stop\" in additional_args:\n stop_seqs = additional_args[\"stop\"]\n additional_args.pop(\"stop\")\n else:\n stop_seqs = query.stop_sequences[:4]\n generated_len = 0\n complete_response = \"\"\n async for response in self.completion_async_method(\n model=self.model,\n messages=messages,\n stream=True,\n request_timeout=600,\n temperature=query.temperature,\n stop=stop_seqs,\n max_tokens=self.max_tokens,\n prompt_truncate_len=self.prompt_truncate_len,\n **additional_args,\n ):\n # Step 3: Transform the CompletionStreamResponse into PartialResponse format\n for choice in response.choices:\n assert isinstance(choice, ChatCompletionResponseStreamChoice)\n if choice.delta.content is None:\n continue\n\n generated_len += len(choice.delta.content)\n complete_response += choice.delta.content\n yield PartialResponse(\n text=choice.delta.content,\n raw_response=response,\n request_id=response.id,\n )\n\n end_t = time.time()\n elapsed_sec = end_t - start_t\n self._log_info(\n {\n \"severity\": \"INFO\",\n \"msg\": \"Request completed\",\n \"query\": query.dict(),\n \"response\": complete_response,\n \"generated_len\": generated_len,\n \"elapsed_sec\": elapsed_sec,\n }\n )\n yield ServerSentEvent(event=\"done\")\n return\n except Exception as e:\n end_t = time.time()\n log_error(\n {\n \"severity\": \"ERROR\",\n \"msg\": \"Invalid request\",\n \"error\": \"\\n\".join(traceback.format_exception(e)),\n \"elapsed_sec\": end_t - start_t,\n \"query\": query.dict(),\n }\n )\n if \"prompt is too long\" in str(e):\n error_type = \"user_message_too_long\"\n else:\n error_type = None\n yield ErrorResponse(allow_retry=False, error_type=error_type, text=str(e))\n return\n finally:\n fireworks.client.api_key = orig_api_key\n\n async def get_settings(self, setting: SettingsRequest) -> SettingsResponse:\n return SettingsResponse(allow_attachments=self.allow_attachments)\n\n async def on_feedback(self, feedback_request: ReportFeedbackRequest) -> None:\n \"\"\"Override this to record feedback from the user.\"\"\"\n pass\n\n async def on_error(self, error_request: ReportErrorRequest) -> None:\n \"\"\"Override this to record errors from the Poe server.\"\"\"\n log_error(\n {\n \"severity\": \"ERROR\",\n \"msg\": \"Error reported\",\n **error_request.dict(),\n }\n )"
},
{
"identifier": "FireworksPoeImageBot",
"path": "fireworks_poe_bot/fw_poe_image_bot.py",
"snippet": "class FireworksPoeImageBot(PoeBot):\n def __init__(\n self,\n model: str,\n api_key: str,\n environment: str,\n deployment: str,\n server_version: str,\n gcs_bucket_name: str,\n num_steps: int,\n multi_turn: bool\n ):\n super().__init__()\n self.model = model\n self.api_key = api_key\n self.environment = environment\n self.deployment = deployment\n self.server_version = server_version\n\n model_atoms = model.split(\"/\")\n if len(model_atoms) != 4:\n raise ValueError(\n f\"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}\"\n )\n\n if model_atoms[0] != \"accounts\" or model_atoms[2] != \"models\":\n raise ValueError(\n f\"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}\"\n )\n\n self.account = model_atoms[1]\n self.model = model_atoms[3]\n\n self.client = ImageInference(account=self.account, model=self.model)\n\n self.num_steps = num_steps\n\n self.gcs_bucket_name = gcs_bucket_name\n self.multi_turn = multi_turn\n\n def _log_warn(self, payload: Dict):\n payload = copy.copy(payload)\n payload.update(\n {\n \"severity\": \"WARNING\",\n \"environment\": self.environment,\n \"deployment\": self.deployment,\n \"model\": self.model,\n \"server_version\": self.server_version,\n }\n )\n log_warn(payload)\n\n def _log_info(self, payload: Dict):\n payload = copy.copy(payload)\n payload.update(\n {\n \"severity\": \"INFO\",\n \"environment\": self.environment,\n \"deployment\": self.deployment,\n \"model\": self.model,\n \"server_version\": self.server_version,\n }\n )\n log_info(payload)\n\n async def get_response(\n self, query: QueryRequest\n ) -> AsyncIterable[Union[PartialResponse, ServerSentEvent]]:\n orig_api_key = self.client.api_key\n fireworks.client.api_key = self.api_key\n try:\n start_t = time.time()\n\n if len(query.query) == 0:\n yield ErrorResponse(allow_retry=False, text=\"Empty query\")\n return\n\n messages: List[ChatMessage] = []\n\n for protocol_message in query.query:\n # OpenAI/Fireworks use the \"assistant\" role for the LLM, but Poe uses the\n # \"bot\" role. Replace that one. Otherwise, ignore the role\n if protocol_message.role not in {\"system\", \"user\", \"bot\"}:\n self._log_warn({\"msg\": \"Unknown role\", **protocol_message})\n continue\n if protocol_message.content_type not in {\"text/plain\", \"text/markdown\"}:\n self._log_warn({\"msg\": \"Unknown content type\", **protocol_message})\n continue\n # TODO: support protocol_message.feedback and protocol_message.attachments\n # if needed\n if protocol_message.role == \"bot\":\n role = \"assistant\"\n else:\n role = protocol_message.role\n messages.append({\"role\": role, \"content\": protocol_message.content})\n\n self._log_info(\n {\n \"msg\": \"Request received\",\n **query.dict(),\n }\n )\n\n # The poe servers send us arbitrary lists of messages. We need to do a few things\n # to normalize for our chat completion API:\n # 1. Ensure that all assistant messages are preceded by a user message\n # 2. Merge adjacent messages from the same role\n # 3. Ensure that the last message is a user message\n\n # Ensure that all assistant messages are preceded by a user message\n for i in range(len(messages) - 1, -1, -1):\n if messages[i][\"role\"] == \"assistant\" and (\n i == 0 or messages[i - 1][\"role\"] != \"user\"\n ):\n self._log_warn(\n {\n \"msg\": f\"Assistant message {messages[i]} not preceded by user message\"\n }\n )\n messages.insert(i, {\"role\": \"user\", \"content\": \"\"})\n\n # Merge adjacent messages from the same role\n merged_messages = []\n\n for role, group in groupby(messages, key=lambda x: x[\"role\"]):\n content = \" \".join(message[\"content\"] for message in group)\n merged_messages.append({\"role\": role, \"content\": content})\n\n messages = merged_messages\n\n # Ensure last message is a user message\n if messages[-1][\"role\"] != \"user\":\n self._log_warn({\"msg\": f\"Last message {messages[-1]} not a user message\"})\n messages.append({\"role\": \"user\", \"content\": \"\"})\n\n # generated_len = 0\n\n assert messages[-1][\"role\"] == \"user\"\n prompt = messages[-1][\"content\"]\n\n # TODO: support specifying aspect ratio :)\n\n control_img_uri = None\n for messages in reversed(messages[:-1]):\n if messages[\"role\"] == \"assistant\" and messages[\"content\"].startswith(\n \":\n control_img_uri = messages[\"content\"][9:-1]\n\n if not self.multi_turn or control_img_uri is None:\n answer: Answer = await self.client.text_to_image_async(\n prompt=prompt,\n cfg_scale=7,\n height=1024,\n width=1024,\n sampler=None,\n steps=self.num_steps,\n seed=0,\n safety_check=True,\n output_image_format=\"JPG\",\n )\n else:\n downloaded_image = self._download_image(control_img_uri)\n\n # TODO: don't hardcode this\n min_val, max_val = 100, 200\n image = cv2.Canny(np.array(downloaded_image), min_val, max_val)\n image = image[:, :, None]\n image = np.concatenate([image, image, image], axis=2)\n image = Image.fromarray(image)\n\n answer: Answer = await self.client.control_net_async(\n control_image=image,\n control_net_name=\"canny\",\n conditioning_scale=0.5,\n prompt=prompt,\n cfg_scale=7,\n sampler=None,\n steps=self.num_steps,\n seed=0,\n safety_check=True,\n output_image_format=\"JPG\",\n # Add additional parameters here as necessary\n )\n end_t_inference = time.time()\n start_t_encode = time.time()\n\n if answer.finish_reason == \"CONTENT_FILTERED\":\n yield self.text_event(text=\"Potentially sensitive content detected\")\n\n public_image_url = self._upload_image_to_gcs(\n answer.image, self.gcs_bucket_name\n )\n response_text = f\"\"\n\n end_t = time.time()\n elapsed_sec = end_t - start_t\n self._log_info(\n {\n \"severity\": \"INFO\",\n \"msg\": \"Request completed\",\n **query.dict(),\n \"response\": response_text,\n \"elapsed_sec\": elapsed_sec,\n \"elapsed_sec_inference\": end_t_inference - start_t,\n \"elapsed_sec_upload\": end_t - start_t_encode,\n }\n )\n yield PartialResponse(text=response_text)\n yield ServerSentEvent(event=\"done\")\n return\n except Exception as e:\n end_t = time.time()\n log_error(\n {\n \"severity\": \"ERROR\",\n \"msg\": \"Invalid request\",\n \"error\": \"\\n\".join(traceback.format_exception(e)),\n \"elapsed_sec\": end_t - start_t,\n **query.dict(),\n }\n )\n if \"prompt is too long\" in str(e):\n error_type = \"user_message_too_long\"\n else:\n error_type = None\n yield ErrorResponse(allow_retry=False, error_type=error_type, text=str(e))\n return\n finally:\n fireworks.client.api_key = orig_api_key\n\n # Function to upload a PIL Image to an S3 bucket with a presigned URL\n def _upload_image_to_s3_with_ttl(\n self, bucket_name, object_name, image: Image, expiration=600\n ):\n \"\"\"\n Upload a PIL Image to an S3 bucket with TTL by generating a presigned URL.\n\n :param bucket_name: String name of the bucket to which the image is uploaded.\n :param object_name: S3 object name. If not specified then file_name is used.\n :param image: PIL Image object to be uploaded.\n :param expiration: Time in seconds for the presigned URL to remain valid.\n \"\"\"\n # In-memory binary streams\n in_mem_file = io.BytesIO()\n\n # Save the PIL image to in-memory file as JPEG\n image.save(in_mem_file, format=\"JPEG\")\n in_mem_file.seek(0) # Reset file pointer to the beginning\n\n # Upload the image to S3\n # self.s3_client.upload_fileobj(in_mem_file, bucket_name, object_name)\n self.s3_client.put_object(\n Bucket=self.s3_bucket_name,\n Key=object_name,\n Body=in_mem_file,\n ContentType=\"image/jpeg\",\n )\n\n # Generate a presigned URL for the S3 object\n url = self.s3_client.generate_presigned_url(\n \"get_object\",\n Params={\"Bucket\": bucket_name, \"Key\": object_name},\n ExpiresIn=expiration,\n )\n\n return url\n\n def _upload_image_to_gcs(self, image: Image, bucket_name: str):\n \"\"\"Uploads a given PIL.Image to a GCS bucket.\"\"\"\n # Generate a (statistically) unique filename with a uuid4\n random_uuid = str(uuid.uuid4()).replace(\"-\", \"\")\n filename = f\"{random_uuid}.jpg\"\n\n # Initialize the GCS client\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n\n # Convert the PIL.Image to bytes\n img_byte_arr = io.BytesIO()\n image.save(img_byte_arr, format=\"JPEG\")\n img_byte_arr = img_byte_arr.getvalue()\n\n # Create a new blob (i.e., object) in the bucket and upload the image bytes\n blob = bucket.blob(filename)\n blob.upload_from_string(img_byte_arr, content_type=f\"image/jpeg\")\n\n blob.make_public()\n\n # The public URL can be accessed with the `public_url` attribute\n public_url = blob.public_url\n\n return public_url\n\n def _download_image(self, image_url):\n # Send an HTTP GET request to the image URL\n response = requests.get(image_url)\n\n # Check if the request was successful\n if response.status_code == 200:\n # Read the image content into an in-memory bytes buffer\n image_bytes = io.BytesIO(response.content)\n\n # Use Pillow to open the image from the bytes buffer\n img = Image.open(image_bytes)\n\n return img\n else:\n # If the request failed, raise an HTTPError with the response\n response.raise_for_status()\n\n async def get_settings(self, setting: SettingsRequest) -> SettingsResponse:\n \"\"\"Override this to return non-standard settings.\"\"\"\n return SettingsResponse()\n\n async def on_feedback(self, feedback_request: ReportFeedbackRequest) -> None:\n \"\"\"Override this to record feedback from the user.\"\"\"\n pass\n\n async def on_error(self, error_request: ReportErrorRequest) -> None:\n \"\"\"Override this to record errors from the Poe server.\"\"\"\n log_error(\n {\n \"severity\": \"ERROR\",\n \"msg\": \"Error reported\",\n **error_request.dict(),\n }\n )"
},
{
"identifier": "FireworksPoeQRBot",
"path": "fireworks_poe_bot/fw_poe_qr_bot.py",
"snippet": "class FireworksPoeQRBot(PoeBot):\n def __init__(\n self,\n model: str,\n api_key: str,\n environment: str,\n deployment: str,\n server_version: str,\n gcs_bucket_name: str,\n conditioning_scale: float,\n default_cfg_scale: float,\n ):\n super().__init__()\n self.model = model\n self.api_key = api_key\n self.environment = environment\n self.deployment = deployment\n self.server_version = server_version\n self.default_cfg_scale = default_cfg_scale if default_cfg_scale is not None else 8\n\n model_atoms = model.split(\"/\")\n if len(model_atoms) != 4:\n raise ValueError(\n f\"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}\"\n )\n\n if model_atoms[0] != \"accounts\" or model_atoms[2] != \"models\":\n raise ValueError(\n f\"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}\"\n )\n\n self.account = model_atoms[1]\n self.model = model_atoms[3]\n\n self.client = ImageInference(account=self.account, model=self.model)\n\n self.gcs_bucket_name = gcs_bucket_name\n self.conditioning_scale = conditioning_scale\n\n def _log_warn(self, payload: Dict):\n payload = copy.copy(payload)\n payload.update(\n {\n \"severity\": \"WARNING\",\n \"environment\": self.environment,\n \"deployment\": self.deployment,\n \"model\": self.model,\n \"server_version\": self.server_version,\n }\n )\n log_warn(payload)\n\n def _log_info(self, payload: Dict):\n payload = copy.copy(payload)\n payload.update(\n {\n \"severity\": \"INFO\",\n \"environment\": self.environment,\n \"deployment\": self.deployment,\n \"model\": self.model,\n \"server_version\": self.server_version,\n }\n )\n log_info(payload)\n\n async def get_response(\n self, query: QueryRequest\n ) -> AsyncIterable[Union[PartialResponse, ServerSentEvent]]:\n orig_api_key = self.client.api_key\n fireworks.client.api_key = self.api_key\n try:\n start_t = time.time()\n\n if len(query.query) == 0:\n yield ErrorResponse(allow_retry=False, text=\"Empty query\")\n raise\n\n messages: List[ChatMessage] = []\n\n for protocol_message in query.query:\n # OpenAI/Fireworks use the \"assistant\" role for the LLM, but Poe uses the\n # \"bot\" role. Replace that one. Otherwise, ignore the role\n if protocol_message.role not in {\"system\", \"user\", \"bot\"}:\n self._log_warn({\"msg\": \"Unknown role\", **protocol_message})\n continue\n if protocol_message.content_type not in {\"text/plain\", \"text/markdown\"}:\n self._log_warn({\"msg\": \"Unknown content type\", **protocol_message})\n continue\n # TODO: support protocol_message.feedback and protocol_message.attachments\n # if needed\n if protocol_message.role == \"bot\":\n role = \"assistant\"\n else:\n role = protocol_message.role\n messages.append({\"role\": role, \"content\": protocol_message.content})\n\n self._log_info(\n {\n \"msg\": \"Request received\",\n **query.dict(),\n }\n )\n\n # The poe servers send us arbitrary lists of messages. We need to do a few things\n # to normalize for our chat completion API:\n # 1. Ensure that all assistant messages are preceded by a user message\n # 2. Merge adjacent messages from the same role\n # 3. Ensure that the last message is a user message\n\n # Ensure that all assistant messages are preceded by a user message\n for i in range(len(messages) - 1, -1, -1):\n if messages[i][\"role\"] == \"assistant\" and (\n i == 0 or messages[i - 1][\"role\"] != \"user\"\n ):\n self._log_warn(\n {\n \"msg\": f\"Assistant message {messages[i]} not preceded by user message\"\n }\n )\n messages.insert(i, {\"role\": \"user\", \"content\": \"\"})\n\n # Merge adjacent messages from the same role\n merged_messages = []\n\n for role, group in groupby(messages, key=lambda x: x[\"role\"]):\n content = \" \".join(message[\"content\"] for message in group)\n merged_messages.append({\"role\": role, \"content\": content})\n\n messages = merged_messages\n\n # Ensure last message is a user message\n if messages[-1][\"role\"] != \"user\":\n self._log_warn({\"msg\": f\"Last message {messages[-1]} not a user message\"})\n messages.append({\"role\": \"user\", \"content\": \"\"})\n\n # generated_len = 0\n\n assert messages[-1][\"role\"] == \"user\"\n prompt = messages[-1][\"content\"]\n\n try:\n prompt, qr_data, qr_strength, prompt_strength, model = parse_input(prompt, self.conditioning_scale, self.default_cfg_scale)\n except Exception as e:\n yield self.text_event(text=f\"Error parsing input: {e}\")\n return\n\n if model == \"sdxl\":\n self.client.model = \"stable-diffusion-xl-1024-v1-0\"\n elif model == \"sdv1.5\":\n self.client.model = \"stable-diffusion-v1-5\"\n else:\n yield self.text_event(text=f\"Unknown model: {model}. Model must be one of 'sdxl' or 'sdv1.5'.\")\n return\n\n qr_image = gen_qr_code(qr_data)\n\n answer: Answer = await self.client.control_net_async(\n control_image=qr_image,\n control_net_name=\"qr\",\n conditioning_scale=qr_strength,\n prompt=prompt,\n cfg_scale=prompt_strength,\n sampler=None,\n steps=25,\n seed=0,\n safety_check=False,\n output_image_format=\"JPG\",\n # Add additional parameters here as necessary\n )\n\n end_t_inference = time.time()\n start_t_encode = time.time()\n\n if answer.finish_reason == \"CONTENT_FILTERED\":\n yield self.text_event(text=\"Potentially sensitive content detected\")\n return\n\n public_image_url = self._upload_image_to_gcs(\n answer.image, self.gcs_bucket_name\n )\n\n response_text = f\"\"\n\n end_t = time.time()\n elapsed_sec = end_t - start_t\n self._log_info(\n {\n \"severity\": \"INFO\",\n \"msg\": \"Request completed\",\n **query.dict(),\n \"prompt\": prompt,\n \"qr_data\": qr_data,\n \"qr_strength\": qr_strength,\n \"prompt_strength\": prompt_strength,\n \"response\": response_text,\n \"elapsed_sec\": elapsed_sec,\n \"elapsed_sec_inference\": end_t_inference - start_t,\n \"elapsed_sec_upload\": end_t - start_t_encode,\n }\n )\n yield PartialResponse(text=response_text)\n yield ServerSentEvent(event=\"done\")\n return\n except Exception as e:\n end_t = time.time()\n log_error(\n {\n \"severity\": \"ERROR\",\n \"msg\": \"Invalid request\",\n \"error\": \"\\n\".join(traceback.format_exception(e)),\n \"elapsed_sec\": end_t - start_t,\n **query.dict(),\n }\n )\n if \"prompt is too long\" in str(e):\n error_type = \"user_message_too_long\"\n else:\n error_type = None\n yield ErrorResponse(allow_retry=False, error_type=error_type, text=str(e))\n return\n finally:\n fireworks.client.api_key = orig_api_key\n\n # Function to upload a PIL Image to an S3 bucket with a presigned URL\n def _upload_image_to_s3_with_ttl(\n self, bucket_name, object_name, image: Image, expiration=600\n ):\n \"\"\"\n Upload a PIL Image to an S3 bucket with TTL by generating a presigned URL.\n\n :param bucket_name: String name of the bucket to which the image is uploaded.\n :param object_name: S3 object name. If not specified then file_name is used.\n :param image: PIL Image object to be uploaded.\n :param expiration: Time in seconds for the presigned URL to remain valid.\n \"\"\"\n # In-memory binary streams\n in_mem_file = io.BytesIO()\n\n # Save the PIL image to in-memory file as JPEG\n image.save(in_mem_file, format=\"JPEG\")\n in_mem_file.seek(0) # Reset file pointer to the beginning\n\n # Upload the image to S3\n # self.s3_client.upload_fileobj(in_mem_file, bucket_name, object_name)\n self.s3_client.put_object(\n Bucket=self.s3_bucket_name,\n Key=object_name,\n Body=in_mem_file,\n ContentType=\"image/jpeg\",\n )\n\n # Generate a presigned URL for the S3 object\n url = self.s3_client.generate_presigned_url(\n \"get_object\",\n Params={\"Bucket\": bucket_name, \"Key\": object_name},\n ExpiresIn=expiration,\n )\n\n return url\n\n def _upload_image_to_gcs(self, image: Image, bucket_name: str):\n \"\"\"Uploads a given PIL.Image to a GCS bucket.\"\"\"\n # Generate a (statistically) unique filename with a uuid4\n random_uuid = str(uuid.uuid4()).replace(\"-\", \"\")\n filename = f\"{random_uuid}.jpg\"\n\n # Initialize the GCS client\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n\n # Convert the PIL.Image to bytes\n img_byte_arr = io.BytesIO()\n image.save(img_byte_arr, format=\"JPEG\")\n img_byte_arr = img_byte_arr.getvalue()\n\n # Create a new blob (i.e., object) in the bucket and upload the image bytes\n blob = bucket.blob(filename)\n blob.upload_from_string(img_byte_arr, content_type=f\"image/jpeg\")\n\n blob.make_public()\n\n # The public URL can be accessed with the `public_url` attribute\n public_url = blob.public_url\n\n return public_url\n\n def _download_image(self, image_url):\n # Send an HTTP GET request to the image URL\n response = requests.get(image_url)\n\n # Check if the request was successful\n if response.status_code == 200:\n # Read the image content into an in-memory bytes buffer\n image_bytes = io.BytesIO(response.content)\n\n # Use Pillow to open the image from the bytes buffer\n img = Image.open(image_bytes)\n\n return img\n else:\n # If the request failed, raise an HTTPError with the response\n response.raise_for_status()\n\n async def get_settings(self, setting: SettingsRequest) -> SettingsResponse:\n \"\"\"Override this to return non-standard settings.\"\"\"\n return SettingsResponse()\n\n async def on_feedback(self, feedback_request: ReportFeedbackRequest) -> None:\n \"\"\"Override this to record feedback from the user.\"\"\"\n pass\n\n async def on_error(self, error_request: ReportErrorRequest) -> None:\n \"\"\"Override this to record errors from the Poe server.\"\"\"\n log_error(\n {\n \"severity\": \"ERROR\",\n \"msg\": \"Error reported\",\n **error_request.dict(),\n }\n )"
},
{
"identifier": "UVICORN_LOGGING_CONFIG",
"path": "fireworks_poe_bot/logging.py",
"snippet": "UVICORN_LOGGING_CONFIG = copy.deepcopy(uvicorn.config.LOGGING_CONFIG)"
},
{
"identifier": "LoggingPlugin",
"path": "fireworks_poe_bot/plugin.py",
"snippet": "class LoggingPlugin(ABC):\n @abstractmethod\n def log_warn(self, payload: Dict[str, Any]):\n ...\n\n @abstractmethod\n def log_info(self, payload: Dict[str, Any]):\n ...\n\n @abstractmethod\n def log_error(self, payload: Dict[str, Any]):\n ..."
},
{
"identifier": "register_logging_plugin",
"path": "fireworks_poe_bot/plugin.py",
"snippet": "def register_logging_plugin(plugin: LoggingPlugin):\n _LOGGING_PLUGINS.append(plugin)"
},
{
"identifier": "BOT_PLUGINS",
"path": "fireworks_poe_bot/plugin.py",
"snippet": "BOT_PLUGINS: List[_BotPlugin] = []"
},
{
"identifier": "log_info",
"path": "fireworks_poe_bot/plugin.py",
"snippet": "@abstractmethod\ndef log_info(self, payload: Dict[str, Any]):\n ..."
},
{
"identifier": "make_app",
"path": "fireworks_poe_bot/fastapi_poe/base.py",
"snippet": "def make_app(\n bots: Dict[str, PoeBot],\n access_key: str = \"\",\n *,\n api_key: str = \"\",\n allow_without_key: bool = False,\n) -> FastAPI:\n \"\"\"Create an app object. Arguments are as for run().\"\"\"\n app = FastAPI()\n app.add_exception_handler(RequestValidationError, exception_handler)\n\n global auth_key\n auth_key = _verify_access_key(\n access_key=access_key, api_key=api_key, allow_without_key=allow_without_key\n )\n\n def find_bot(account: str, model: str) -> PoeBot:\n bot_fqn = f\"accounts/{account}/models/{model}\"\n if bot_fqn not in bots:\n raise HTTPException(status_code=404, detail=f\"Bot {bot_fqn} not found\")\n return bots[bot_fqn]\n\n @app.get(\"/\")\n async def index() -> Response:\n # Default endpoint for health checks\n return HTMLResponse(\"It works!\")\n\n @app.get(\"/accounts/{account}/models/{model}\")\n async def index(account: str, model: str) -> Response:\n bot = find_bot(account, model)\n\n url = \"https://poe.com/create_bot?server=1\"\n return HTMLResponse(\n \"<html><body><h1>FastAPI Poe bot server</h1><p>Congratulations! Your server\"\n \" is running. To connect it to Poe, create a bot at <a\"\n f' href=\"{url}\">{url}</a>.</p></body></html>'\n )\n\n @app.post(\"/accounts/{account}/models/{model}\")\n async def poe_post(\n account: str, model: str, request: Dict[str, Any], dict=Depends(auth_user)\n ) -> Response:\n bot = find_bot(account, model)\n\n if request[\"type\"] == \"query\":\n return EventSourceResponse(\n bot.handle_query(\n QueryRequest.parse_obj(\n {\n **request,\n \"access_key\": auth_key or \"<missing>\",\n \"api_key\": auth_key or \"<missing>\",\n }\n )\n )\n )\n elif request[\"type\"] == \"settings\":\n return await bot.handle_settings(SettingsRequest.parse_obj(request))\n elif request[\"type\"] == \"report_feedback\":\n return await bot.handle_report_feedback(\n ReportFeedbackRequest.parse_obj(request)\n )\n elif request[\"type\"] == \"report_error\":\n return await bot.handle_report_error(ReportErrorRequest.parse_obj(request))\n else:\n raise HTTPException(status_code=501, detail=\"Unsupported request type\")\n\n # Uncomment this line to print out request and response\n # app.add_middleware(LoggingMiddleware)\n return app"
}
] | from fireworks_poe_bot.fw_poe_text_bot import FireworksPoeTextBot
from fireworks_poe_bot.fw_poe_image_bot import FireworksPoeImageBot
from fireworks_poe_bot.fw_poe_qr_bot import FireworksPoeQRBot
from fireworks_poe_bot.logging import UVICORN_LOGGING_CONFIG
from fireworks_poe_bot.plugin import LoggingPlugin, register_logging_plugin, BOT_PLUGINS, log_info
from dataclasses import dataclass
from typing import Any, Dict
from .fastapi_poe import make_app
import argparse
import uvicorn
import logging
import os
import json | 9,919 |
@dataclass
class ServerArgs:
host: str = "0.0.0.0"
port: int = 80
config_file_path: str = "config.json"
environment: str = ""
deployment: str = "poe-omnibot"
|
@dataclass
class ServerArgs:
host: str = "0.0.0.0"
port: int = 80
config_file_path: str = "config.json"
environment: str = ""
deployment: str = "poe-omnibot"
| class PyLoggingPlugin(LoggingPlugin): | 4 | 2023-11-03 23:24:23+00:00 | 12k |
Fsoft-AIC/LSDM | run/train_sdm.py | [
{
"identifier": "count_parameters",
"path": "posa/posa_utils.py",
"snippet": "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)"
},
{
"identifier": "ProxDataset_txt",
"path": "posa/dataset.py",
"snippet": "class ProxDataset_txt(Dataset): # when jump_step=8, for a whole seq, dataset's max_frame is 165, max num_seg is 29\n def __init__(self, data_dir, fix_orientation=False, no_obj_classes=8, max_frame=220,\n ds_weights_path=\"posa/support_files/downsampled_weights.npy\", jump_step=8, step_multiplier=1, max_objs=8, pnt_size=1024, \n objs_data_dir='data/protext/objs', max_cats=13, **kwargs):\n '''\n data_dir: directory that stores processed PROXD dataset.\n fix_orientation: flag that specifies whether we always make the first pose in a motion sequence facing\n towards a canonical direction.\n no_obj_classes: number of contact object classes.\n max_frame: the maximum motion sequence length which the model accepts (after applying frame skipping).\n ds_weights_path: the saved downsampling matrix for downsampling body vertices.\n jump_step: for every jump_step frames, we only select the first frame for some sequence.\n step_multiplier: a dummy parameter used to control the number of examples seen in each epoch (You can\n ignore it if you don't know how to adjust it).\n '''\n self.data_dir = data_dir\n self.max_objs = max_objs\n self.pnt_size = pnt_size\n self.max_cats = max_cats\n \n # Setup handle case for dataset: 0 for training, 1 for testing\n is_train = self.data_dir.split('_')[1]\n self.handle = 0 if is_train == 'train' else 1\n self.objs_dir = objs_data_dir\n self.context_dir = os.path.join(data_dir, \"context\")\n self.reduced_verts_dir = os.path.join(data_dir, \"reduced_vertices\")\n self.seq_names = [f.split('.txt')[0] for f in os.listdir(self.context_dir)]\n\n # Setup reading object files and cases\n self._setup_static_objs()\n\n # Initialize for human sequences\n self.reduced_verts_dict = dict()\n self.context_dict = dict()\n\n self.total_frames = 0\n for seq_name in self.seq_names:\n self.reduced_verts_dict[seq_name] = torch.tensor(np.load(os.path.join(self.reduced_verts_dir, seq_name + \".npy\")), dtype=torch.float32)\n with open(os.path.join(self.context_dir, seq_name + \".txt\")) as f:\n text_prompt, given_objs, target_obj = f.readlines()\n text_prompt = text_prompt.strip('\\n')\n given_objs = given_objs.strip('\\n').split(' ')\n self.context_dict[seq_name] = (text_prompt, given_objs, target_obj)\n\n self.fix_orientation = fix_orientation\n self.no_obj_classes = no_obj_classes\n self.ds_weights_path = ds_weights_path\n self.ds_weights = None\n self.associated_joints = None\n if fix_orientation:\n self.ds_weights = torch.tensor(np.load(self.ds_weights_path))\n self.associated_joints = torch.argmax(self.ds_weights, dim=1)\n\n self.jump_step = jump_step\n self.step_multiplier = step_multiplier\n\n @property\n def _cat(self):\n return {\n \"chair\": 1,\n \"table\": 2,\n \"cabinet\": 3,\n \"sofa\": 4,\n \"bed\": 5,\n \"chest_of_drawers\": 6,\n \"chest\": 6,\n \"stool\": 7,\n \"tv_monitor\": 8,\n \"tv\": 8,\n \"lighting\": 9,\n \"shelving\": 10,\n \"seating\": 11,\n \"furniture\": 12,\n \"human\": 0,\n }\n\n def _setup_static_objs(self):\n self.scenes = os.listdir(self.objs_dir)\n self.objs = dict()\n self.cats = dict()\n for scene in self.scenes:\n self.objs[scene] = dict()\n self.cats[scene] = dict()\n \n objs_list = os.listdir(os.path.join(self.objs_dir, scene))\n for obj_file in objs_list:\n obj = obj_file[:-4]\n cat = obj.split('.')[0].split('_')[0]\n # Read vertices of objects\n with open(os.path.join(self.objs_dir, scene, obj_file), 'rb') as f:\n verts = np.load(f)\n self.objs[scene][obj] = verts\n self.cats[scene][obj] = self._cat[cat]\n \n def __len__(self):\n return len(self.seq_names)\n\n def __getitem__(self, idx):\n # seq_idx = torch.randint(len(self.seq_names), size=(1,))\n seq_idx = idx\n seq_name = self.seq_names[seq_idx]\n scene = seq_name.split('_')[0]\n all_objs = self.objs[scene]\n all_cats = self.cats[scene]\n text_prompt, given_objs, target_obj = self.context_dict[seq_name]\n human_verts = self.reduced_verts_dict[seq_name]\n\n # Initialize for objects, note that, the first object is human\n obj_verts = torch.zeros(self.max_objs+1, self.pnt_size, 3)\n obj_verts[0] = human_verts.clone().detach()\n obj_mask = torch.zeros(self.max_objs+1)\n obj_cats = torch.zeros(self.max_objs+1, self.max_cats)\n obj_cats[0][self._cat['human']] = 1\n for idx, obj in enumerate(given_objs):\n cat = obj.split('_')[0]\n obj_verts[idx+1] = torch.tensor(all_objs[obj])\n obj_mask[idx+1] = 1\n obj_cats[idx+1][self._cat[cat]] = 1\n\n # Retrieve information of target vertices\n target_verts = all_objs[target_obj]\n target_cat = target_obj.split('_')[0]\n target_num = self._cat[target_cat]\n target_cat = torch.zeros(self.max_cats)\n target_cat[target_num] = 1\n\n return obj_mask, obj_verts, obj_cats, target_verts, target_cat, text_prompt"
},
{
"identifier": "HUMANISE",
"path": "posa/dataset.py",
"snippet": "class HUMANISE(Dataset): # when jump_step=8, for a whole seq, dataset's max_frame is 165, max num_seg is 29\n def __init__(self, data_dir, fix_orientation=False, no_obj_classes=8, max_frame=220,\n ds_weights_path=\"posa/support_files/downsampled_weights.npy\", jump_step=8, step_multiplier=1, max_objs=8, pnt_size=1024, \n objs_data_dir='data/humanise/objs', max_cats=11, **kwargs):\n '''\n data_dir: directory that stores processed PROXD dataset.\n fix_orientation: flag that specifies whether we always make the first pose in a motion sequence facing\n towards a canonical direction.\n no_obj_classes: number of contact object classes.\n max_frame: the maximum motion sequence length which the model accepts (after applying frame skipping).\n ds_weights_path: the saved downsampling matrix for downsampling body vertices.\n jump_step: for every jump_step frames, we only select the first frame for some sequence.\n step_multiplier: a dummy parameter used to control the number of examples seen in each epoch (You can\n ignore it if you don't know how to adjust it).\n '''\n self.data_dir = data_dir\n self.max_objs = max_objs\n self.pnt_size = pnt_size\n self.max_cats = max_cats\n \n # Setup handle case for dataset: 0 for training, 1 for testing\n is_train = self.data_dir.split('/')[-1]\n self.handle = 0 if is_train == 'train' else 1\n self.objs_dir = objs_data_dir\n self.context_dir = os.path.join(data_dir, \"context\")\n self.reduced_verts_dir = os.path.join(data_dir, \"reduced_vertices\")\n self.seq_names = [f.split('.txt')[0] for f in os.listdir(self.context_dir)]\n\n # Setup reading object files and cases\n self._setup_static_objs()\n\n # Initialize for human sequences\n self.reduced_verts_dict = dict()\n self.context_dict = dict()\n\n self.total_frames = 0\n for seq_name in self.seq_names:\n self.reduced_verts_dict[seq_name] = torch.tensor(np.load(os.path.join(self.reduced_verts_dir, seq_name + \".npy\")), dtype=torch.float32)\n with open(os.path.join(self.context_dir, seq_name + \".txt\")) as f:\n text_prompt, given_objs, target_obj = f.readlines()\n text_prompt = text_prompt.strip('\\n')\n given_objs = given_objs.strip('\\n').split(' ')\n self.context_dict[seq_name] = (text_prompt, given_objs, target_obj)\n\n self.fix_orientation = fix_orientation\n self.no_obj_classes = no_obj_classes\n self.ds_weights_path = ds_weights_path\n self.ds_weights = None\n self.associated_joints = None\n if fix_orientation:\n self.ds_weights = torch.tensor(np.load(self.ds_weights_path))\n self.associated_joints = torch.argmax(self.ds_weights, dim=1)\n\n self.jump_step = jump_step\n self.step_multiplier = step_multiplier\n\n @property\n def _cat(self):\n \n return {\n \"bed\": 1,\t\t# bed\n \"sofa\": 2, \t\t# sofa\n \"table\": 3,\t\t# table\n \"door\": 4, \t\t# door\n \"desk\": 5,\t\t# desk\n \"refrigerator\": 6, \t\t# refrigerator\n \"chair\": 7,\n \"counter\": 8,\n \"bookshelf\": 9,\n \"cabinet\": 10,\n \"human\": 0\n }\n\n def _setup_static_objs(self):\n self.scenes = os.listdir(self.objs_dir)\n self.objs = dict()\n self.cats = dict()\n for scene in self.scenes:\n self.objs[scene] = dict()\n self.cats[scene] = dict()\n \n objs_list = os.listdir(os.path.join(self.objs_dir, scene))\n for obj_file in objs_list:\n obj = obj_file[:-4]\n cat = obj.split('_')[0]\n if cat in self._cat:\n # Read vertices of objects\n with open(os.path.join(self.objs_dir, scene, obj_file), 'rb') as f:\n verts = np.load(f)\n self.objs[scene][obj] = verts\n self.cats[scene][obj] = self._cat[cat]\n \n def __len__(self):\n return len(self.seq_names)\n\n def __getitem__(self, idx):\n \n # seq_idx = torch.randint(len(self.seq_names), size=(1,))\n seq_idx = idx\n seq_name = self.seq_names[seq_idx]\n scene = seq_name[:9] + '_00'\n all_objs = self.objs[scene]\n all_cats = self.cats[scene]\n text_prompt, given_objs, target_obj = self.context_dict[seq_name]\n human_verts = self.reduced_verts_dict[seq_name]\n\n # Initialize for objects, note that, the first object is human\n obj_verts = torch.zeros(self.max_objs+1, self.pnt_size, 3)\n obj_verts[0] = human_verts.clone().detach()\n obj_mask = torch.zeros(self.max_objs+1)\n obj_cats = torch.zeros(self.max_objs+1, self.max_cats)\n obj_cats[0][self._cat['human']] = 1\n for idx, obj in enumerate(given_objs):\n cat = obj.split('_')[0]\n obj_verts[idx+1] = torch.tensor(all_objs[obj])\n obj_mask[idx+1] = 1\n obj_cats[idx+1][self._cat[cat]] = 1\n\n # Retrieve information of target vertices\n target_verts = all_objs[target_obj]\n target_cat = target_obj.split('_')[0]\n target_num = self._cat[target_cat]\n target_cat = torch.zeros(self.max_cats)\n target_cat[target_num] = 1\n\n return obj_mask, obj_verts, obj_cats, target_verts, target_cat, text_prompt"
},
{
"identifier": "compute_recon_loss",
"path": "posa/general_utils.py",
"snippet": "def compute_recon_loss(gt_batch, pr_batch, mask=None, semantics_w=1.0, reduction='mean', **kwargs):\n batch_size, _, n_verts, _ = gt_batch.shape\n\n if mask is not None:\n reduction = 'none'\n # mask = mask.unsqueeze(-1).expand(-1, -1, n_verts)\n gt_batch = gt_batch[mask == 1].unsqueeze(0)\n pr_batch = pr_batch[mask == 1].unsqueeze(0)\n\n targets = gt_batch.argmax(dim=-1).type(torch.long)\n recon_loss_semantics = semantics_w * F.cross_entropy(pr_batch.permute(0, 3, 1, 2), targets, reduction=reduction)\n semantics_recon_acc = (targets == torch.argmax(pr_batch, dim=-1)).float()\n if mask is not None:\n # recon_loss_semantics *= mask\n # recon_loss_semantics = torch.sum(recon_loss_semantics) / (torch.sum(mask) * n_verts)\n recon_loss_semantics = torch.mean(recon_loss_semantics)\n # semantics_recon_acc *= mask\n # semantics_recon_acc = torch.sum(semantics_recon_acc) / (torch.sum(mask) * n_verts)\n semantics_recon_acc = torch.mean(semantics_recon_acc)\n else:\n semantics_recon_acc = torch.mean(semantics_recon_acc)\n\n return recon_loss_semantics, semantics_recon_acc"
},
{
"identifier": "compute_delta",
"path": "posa/general_utils.py",
"snippet": "def compute_delta(vertices_can, seg_len):\n assert vertices_can.dim() == 4, \"The dim of vertices_can must be 4!\"\n half_seg_len = seg_len // 2\n center_frame_verts = vertices_can[:, half_seg_len, :, :].unsqueeze(1)\n vertices_can = vertices_can - center_frame_verts\n vertices_can[:, half_seg_len, :, :] = center_frame_verts[:, 0, :, :]\n return vertices_can\n # test1 = torch.range(start=0, end=23).reshape(2, 2, 3, 2)\n # test2 = torch.ones(2, 1, 3, 2)\n # test2[1] = 2\n # result = test1 - test2\n # print(result)"
},
{
"identifier": "create_named_schedule_sampler",
"path": "diffusion/resample.py",
"snippet": "def create_named_schedule_sampler(name, diffusion):\n \"\"\"\n Create a ScheduleSampler from a library of pre-defined samplers.\n\n :param name: the name of the sampler.\n :param diffusion: the diffusion object to sample for.\n \"\"\"\n if name == \"uniform\":\n return UniformSampler(diffusion)\n elif name == \"loss-second-moment\":\n return LossSecondMomentResampler(diffusion)\n else:\n raise NotImplementedError(f\"unknown schedule sampler: {name}\")"
},
{
"identifier": "MixedPrecisionTrainer",
"path": "diffusion/fp16_util.py",
"snippet": "class MixedPrecisionTrainer:\n def __init__(\n self,\n *,\n model,\n use_fp16=False,\n fp16_scale_growth=1e-3,\n initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE,\n ):\n self.model = model\n self.use_fp16 = use_fp16\n self.fp16_scale_growth = fp16_scale_growth\n\n self.model_params = list(self.model.parameters())\n self.master_params = self.model_params\n self.param_groups_and_shapes = None\n self.lg_loss_scale = initial_lg_loss_scale\n\n if self.use_fp16:\n self.param_groups_and_shapes = get_param_groups_and_shapes(\n self.model.named_parameters()\n )\n self.master_params = make_master_params(self.param_groups_and_shapes)\n self.model.convert_to_fp16()\n\n def zero_grad(self):\n zero_grad(self.model_params)\n\n def backward(self, loss: th.Tensor):\n if self.use_fp16:\n loss_scale = 2 ** self.lg_loss_scale\n (loss * loss_scale).backward()\n else:\n loss.backward()\n\n def optimize(self, opt: th.optim.Optimizer):\n if self.use_fp16:\n return self._optimize_fp16(opt)\n else:\n return self._optimize_normal(opt)\n\n def _optimize_fp16(self, opt: th.optim.Optimizer):\n logger.logkv_mean(\"lg_loss_scale\", self.lg_loss_scale)\n model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)\n grad_norm, param_norm = self._compute_norms(grad_scale=2 ** self.lg_loss_scale)\n if check_overflow(grad_norm):\n self.lg_loss_scale -= 1\n logger.log(f\"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}\")\n zero_master_grads(self.master_params)\n return False\n\n logger.logkv_mean(\"grad_norm\", grad_norm)\n logger.logkv_mean(\"param_norm\", param_norm)\n\n self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))\n opt.step()\n zero_master_grads(self.master_params)\n master_params_to_model_params(self.param_groups_and_shapes, self.master_params)\n self.lg_loss_scale += self.fp16_scale_growth\n return True\n\n def _optimize_normal(self, opt: th.optim.Optimizer):\n grad_norm, param_norm = self._compute_norms()\n logger.logkv_mean(\"grad_norm\", grad_norm)\n logger.logkv_mean(\"param_norm\", param_norm)\n opt.step()\n return True\n\n def _compute_norms(self, grad_scale=1.0):\n grad_norm = 0.0\n param_norm = 0.0\n for p in self.master_params:\n with th.no_grad():\n param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2\n if p.grad is not None:\n grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2\n return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm)\n\n def master_params_to_state_dict(self, master_params):\n return master_params_to_state_dict(\n self.model, self.param_groups_and_shapes, master_params, self.use_fp16\n )\n\n def state_dict_to_master_params(self, state_dict):\n return state_dict_to_master_params(self.model, state_dict, self.use_fp16)"
},
{
"identifier": "create_model_and_diffusion",
"path": "util/model_util.py",
"snippet": "def create_model_and_diffusion(datatype):\n # model = SceneDiffusionModel(**get_model_args(args, data))\n if datatype == \"proxd\":\n model = SceneDiffusionModel(**get_default_model_proxd())\n else:\n model = SceneDiffusionModel(**get_default_model_humanise())\n diffusion = create_gaussian_diffusion(get_default_diffusion())\n return model, diffusion"
}
] | import os
import functools
import os.path as osp
import math
import argparse
import time
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torch.optim import AdamW
from pytorch3d.loss import chamfer_distance
from posa.posa_utils import count_parameters
from posa.dataset import ProxDataset_txt, HUMANISE
from posa.general_utils import compute_recon_loss, compute_delta
from diffusion.resample import create_named_schedule_sampler
from diffusion.fp16_util import MixedPrecisionTrainer
from util.model_util import create_model_and_diffusion | 7,206 | skip_timesteps=0, # 0 is the default value - i.e. don't skip any step
init_image=None,
progress=False,
dump_steps=None,
noise=None,
const_noise=False,
# when experimenting guidance_scale we want to nutrileze the effect of noise on generation
)
# pr_cf: (bs, seg_len, 655, 43), mu: (bs, 256), logvar: (bs, 256)
# z = torch.tensor(np.random.normal(0, 1, (max_frame, 256)).astype(np.float32)).to(device)
# posa_out = model.posa(z, verts_can)
pr_pnts = sample
gt_pnts = target_obj
recon_loss_semantics = ((pr_pnts-gt_pnts)**2).mean()
cfd, cfd_normals = chamfer_distance(pr_pnts, gt_pnts.float().to(device))
# Calculate for categorical
pred_cat = model.saved_cat
pred_cat = pred_cat.squeeze(1)
pred_cat = torch.argmax(pred_cat, dim=1)
target_cat = torch.argmax(target_cat, dim=1)
acc = (pred_cat==target_cat).sum().item()
# recon_loss_semantics, semantics_recon_acc = compute_recon_loss(gt_cf, pr_cf, mask=mask, **args_dict)
total_recon_loss_semantics += recon_loss_semantics
total_cfd += cfd
total_acc += acc
n_steps += 1
total_recon_loss_semantics /= n_steps
total_cfd /= n_steps
total_acc /= n_steps
print(n_steps)
writer.add_scalar('recon_loss_semantics/validate', total_recon_loss_semantics, e)
writer.add_scalar('total_cfd/validate', total_cfd, e)
writer.add_scalar('total_acc/validate', total_acc, e)
print(
'====>Recon_loss_semantics = {:.4f} , Chamfer distance = {:.4f}, Category acc = {:.4f}'.format(
total_recon_loss_semantics, total_cfd, total_acc))
return total_recon_loss_semantics, total_cfd, total_acc
if __name__ == '__main__':
# torch.manual_seed(0)
print(torch.version.cuda)
# torch.multiprocessing.set_start_method('spawn')
parser = argparse.ArgumentParser(description="")
parser.add_argument("--train_data_dir", type=str, default="data/proxd_train",
help="path to POSA_temp dataset dir")
parser.add_argument("--valid_data_dir", type=str, default="data/proxd_valid",
help="path to POSA_temp dataset dir")
parser.add_argument("--load_ckpt", type=str, default=None,
help="load a checkpoint as the continue point for training")
parser.add_argument("--posa_path", type=str, default="training/posa/model_ckpt/best_model_recon_acc.pt")
parser.add_argument("--out_dir", type=str, default="training/", help="Folder that stores checkpoints and training logs")
parser.add_argument("--experiment", type=str, default="default_experiment",
help="Experiment name. Checkpoints and training logs will be saved in out_dir/experiment folder.")
parser.add_argument("--save_interval", type=int, default=50, help="Epoch interval for saving model checkpoints.")
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--epochs", type=int, default=1000)
parser.add_argument('--fix_ori', dest='fix_ori', action='store_const', const=True, default=False,
help="fix orientation of each segment with the rotation calculated from the first frame")
parser.add_argument("--encoder_mode", type=int, default=1,
help="Encoder mode (different number represents different variants of encoder)")
parser.add_argument("--decoder_mode", type=int, default=1,
help="Decoder mode (different number represents different variants of decoder)")
parser.add_argument("--n_layer", type=int, default=3, help="number of layers in transformer")
parser.add_argument("--n_head", type=int, default=4, help="number of heads in transformer")
parser.add_argument("--dim_ff", type=int, default=512, help="dimension of hidden layers in positionwise MLP in the transformer")
parser.add_argument("--f_vert", type=int, default=64, help="dimension of the embeddings for body vertices")
parser.add_argument("--num_workers", type=int, default=0, help="number of workers for dataloader")
parser.add_argument("--jump_step", type=int, default=8, help="frame skip size for each input motion sequence")
parser.add_argument("--max_frame", type=int, default=256, help="The maximum length of motion sequence (after frame skipping) which model accepts.")
parser.add_argument("--eval_epochs", type=int, default=10, help="The number of epochs that we periodically evalute the model.")
parser.add_argument("--datatype", type=str, default="proxd", help="Dataset type indicator: PRO-teXt or HUMANISE.")
args = parser.parse_args()
args_dict = vars(args)
# Parse arguments
train_data_dir = args_dict['train_data_dir']
valid_data_dir = args_dict['valid_data_dir']
load_ckpt = args_dict['load_ckpt']
save_interval = args_dict['save_interval']
out_dir = args_dict['out_dir']
experiment = args_dict['experiment']
lr = args_dict['lr']
epochs = args_dict['epochs']
fix_ori = args_dict['fix_ori']
encoder_mode = args_dict['encoder_mode']
decoder_mode = args_dict['decoder_mode']
n_layer = args_dict['n_layer']
n_head = args_dict['n_head']
num_workers = args_dict['num_workers']
jump_step = args_dict['jump_step']
max_frame = args_dict['max_frame']
dim_ff = args_dict['dim_ff']
f_vert = args_dict['f_vert']
posa_path = args_dict['posa_path']
eval_epochs = args_dict['eval_epochs']
datatype = args_dict['datatype']
save_ckpt_dir = os.path.join(out_dir, experiment, "model_ckpt")
log_dir = os.path.join(out_dir, experiment, "tb_log")
os.makedirs(save_ckpt_dir, exist_ok=True)
device = torch.device("cuda")
dtype = torch.float32
kl_w = 0.5
if datatype == "proxd":
train_dataset = ProxDataset_txt(train_data_dir, max_frame=max_frame, fix_orientation=fix_ori,
step_multiplier=1, jump_step=jump_step)
train_data_loader = DataLoader(train_dataset, batch_size=6, shuffle=True, num_workers=num_workers)
valid_dataset = ProxDataset_txt(valid_data_dir, max_frame=max_frame, fix_orientation=fix_ori,
step_multiplier=1, jump_step=jump_step)
valid_data_loader = DataLoader(valid_dataset, batch_size=6, shuffle=True, num_workers=num_workers)
else:
|
"""
Running sample:
python train_contactformer.py --train_data_dir ../data/proxd_train --valid_data_dir ../data/proxd_valid --fix_ori --epochs 1000 --jump_step 8
"""
def train():
# Create diffusion sampler, optimizer, and trainer
schedule_sampler_type = 'uniform'
schedule_sampler = create_named_schedule_sampler(schedule_sampler_type, diffusion)
use_fp16 = False
fp16_scale_growth = 1e-3
mp_trainer = MixedPrecisionTrainer(
model=model,
use_fp16=use_fp16,
fp16_scale_growth=fp16_scale_growth,
)
optimizer = AdamW(
mp_trainer.master_params, lr=lr
)
model.train()
torch.autograd.set_detect_anomaly(True)
total_recon_loss_semantics = 0
total_semantics_recon_acc = 0
total_train_loss = 0
n_steps = 0
for mask, given_objs, given_cats, target_obj, target_cat, y in tqdm(train_data_loader):
# Initialize params of the training batch
# verts_can: (bs, seg_len, Nverts, 3), contacts_s: (bs, seg_len, Nverts, 8)
mask = mask.to(device)
given_objs = given_objs.to(device)
given_cats = given_cats.to(device)
target_obj = target_obj.to(device)
target_cat = target_cat.to(device)
t, weights = schedule_sampler.sample(target_obj.shape[0], device)
# Initialize the optimizer's step
mp_trainer.zero_grad()
# Calculate loss
compute_losses = functools.partial(
diffusion.training_losses,
model,
target_obj, # [bs, ch, image_size, image_size]
mask,
t, # [bs](int) sampled timesteps
given_objs,
given_cats,
target_cat,
y=y,
)
losses = compute_losses()
loss = (losses["loss"] * weights).mean()
total_train_loss += loss
# Backward loss
mp_trainer.backward(loss)
mp_trainer.optimize(optimizer)
# Schedule learning rate
n_steps += 1
# Logging the training epoch
# pr_cf: (bs, seg_len, 655, 8), mu: (bs, 256), logvar: (bs, 256)
# pr_cf = sample
# recon_loss_semantics, semantics_recon_acc = compute_recon_loss(gt_cf, pr_cf, mask=mask, **args_dict)
# total_recon_loss_semantics += recon_loss_semantics.item()
# total_semantics_recon_acc += semantics_recon_acc.item()
# total_train_loss += loss.item()
# total_recon_loss_semantics /= n_steps
total_train_loss /= n_steps
# total_semantics_recon_acc /= n_steps
# writer.add_scalar('recon_loss_semantics/train', total_recon_loss_semantics, e)
# writer.add_scalar('total_semantics_recon_acc/train', total_semantics_recon_acc, e)
writer.add_scalar('total/train_total_loss', total_train_loss, e)
print('====> Total_train_loss: {:.4f}'.format(total_train_loss))
return total_train_loss
def validate():
use_ddim = False # FIXME - hardcoded
clip_denoised = False # FIXME - hardcoded
model.eval()
with torch.no_grad():
sample_fn = (
diffusion.p_sample_loop if not use_ddim else diffusion.ddim_sample_loop
)
total_recon_loss_semantics = 0
total_cfd = 0
total_acc = 0
n_steps = 0
for mask, given_objs, given_cats, target_obj, target_cat, y in tqdm(valid_data_loader):
# verts_can: (bs, seg_len, Nverts, 3), contacts: (bs, seg_len, Nverts, 1), contacts_s: (bs, seg_len, Nverts, 42)
mask = mask.to(device)
given_objs = given_objs.to(device)
given_cats = given_cats.to(device)
target_obj = target_obj.to(device)
target_cat = target_cat.to(device)
sample = sample_fn(
model,
target_obj.shape,
mask,
given_objs,
given_cats,
y=y,
clip_denoised=clip_denoised,
model_kwargs=None,
skip_timesteps=0, # 0 is the default value - i.e. don't skip any step
init_image=None,
progress=False,
dump_steps=None,
noise=None,
const_noise=False,
# when experimenting guidance_scale we want to nutrileze the effect of noise on generation
)
# pr_cf: (bs, seg_len, 655, 43), mu: (bs, 256), logvar: (bs, 256)
# z = torch.tensor(np.random.normal(0, 1, (max_frame, 256)).astype(np.float32)).to(device)
# posa_out = model.posa(z, verts_can)
pr_pnts = sample
gt_pnts = target_obj
recon_loss_semantics = ((pr_pnts-gt_pnts)**2).mean()
cfd, cfd_normals = chamfer_distance(pr_pnts, gt_pnts.float().to(device))
# Calculate for categorical
pred_cat = model.saved_cat
pred_cat = pred_cat.squeeze(1)
pred_cat = torch.argmax(pred_cat, dim=1)
target_cat = torch.argmax(target_cat, dim=1)
acc = (pred_cat==target_cat).sum().item()
# recon_loss_semantics, semantics_recon_acc = compute_recon_loss(gt_cf, pr_cf, mask=mask, **args_dict)
total_recon_loss_semantics += recon_loss_semantics
total_cfd += cfd
total_acc += acc
n_steps += 1
total_recon_loss_semantics /= n_steps
total_cfd /= n_steps
total_acc /= n_steps
print(n_steps)
writer.add_scalar('recon_loss_semantics/validate', total_recon_loss_semantics, e)
writer.add_scalar('total_cfd/validate', total_cfd, e)
writer.add_scalar('total_acc/validate', total_acc, e)
print(
'====>Recon_loss_semantics = {:.4f} , Chamfer distance = {:.4f}, Category acc = {:.4f}'.format(
total_recon_loss_semantics, total_cfd, total_acc))
return total_recon_loss_semantics, total_cfd, total_acc
if __name__ == '__main__':
# torch.manual_seed(0)
print(torch.version.cuda)
# torch.multiprocessing.set_start_method('spawn')
parser = argparse.ArgumentParser(description="")
parser.add_argument("--train_data_dir", type=str, default="data/proxd_train",
help="path to POSA_temp dataset dir")
parser.add_argument("--valid_data_dir", type=str, default="data/proxd_valid",
help="path to POSA_temp dataset dir")
parser.add_argument("--load_ckpt", type=str, default=None,
help="load a checkpoint as the continue point for training")
parser.add_argument("--posa_path", type=str, default="training/posa/model_ckpt/best_model_recon_acc.pt")
parser.add_argument("--out_dir", type=str, default="training/", help="Folder that stores checkpoints and training logs")
parser.add_argument("--experiment", type=str, default="default_experiment",
help="Experiment name. Checkpoints and training logs will be saved in out_dir/experiment folder.")
parser.add_argument("--save_interval", type=int, default=50, help="Epoch interval for saving model checkpoints.")
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--epochs", type=int, default=1000)
parser.add_argument('--fix_ori', dest='fix_ori', action='store_const', const=True, default=False,
help="fix orientation of each segment with the rotation calculated from the first frame")
parser.add_argument("--encoder_mode", type=int, default=1,
help="Encoder mode (different number represents different variants of encoder)")
parser.add_argument("--decoder_mode", type=int, default=1,
help="Decoder mode (different number represents different variants of decoder)")
parser.add_argument("--n_layer", type=int, default=3, help="number of layers in transformer")
parser.add_argument("--n_head", type=int, default=4, help="number of heads in transformer")
parser.add_argument("--dim_ff", type=int, default=512, help="dimension of hidden layers in positionwise MLP in the transformer")
parser.add_argument("--f_vert", type=int, default=64, help="dimension of the embeddings for body vertices")
parser.add_argument("--num_workers", type=int, default=0, help="number of workers for dataloader")
parser.add_argument("--jump_step", type=int, default=8, help="frame skip size for each input motion sequence")
parser.add_argument("--max_frame", type=int, default=256, help="The maximum length of motion sequence (after frame skipping) which model accepts.")
parser.add_argument("--eval_epochs", type=int, default=10, help="The number of epochs that we periodically evalute the model.")
parser.add_argument("--datatype", type=str, default="proxd", help="Dataset type indicator: PRO-teXt or HUMANISE.")
args = parser.parse_args()
args_dict = vars(args)
# Parse arguments
train_data_dir = args_dict['train_data_dir']
valid_data_dir = args_dict['valid_data_dir']
load_ckpt = args_dict['load_ckpt']
save_interval = args_dict['save_interval']
out_dir = args_dict['out_dir']
experiment = args_dict['experiment']
lr = args_dict['lr']
epochs = args_dict['epochs']
fix_ori = args_dict['fix_ori']
encoder_mode = args_dict['encoder_mode']
decoder_mode = args_dict['decoder_mode']
n_layer = args_dict['n_layer']
n_head = args_dict['n_head']
num_workers = args_dict['num_workers']
jump_step = args_dict['jump_step']
max_frame = args_dict['max_frame']
dim_ff = args_dict['dim_ff']
f_vert = args_dict['f_vert']
posa_path = args_dict['posa_path']
eval_epochs = args_dict['eval_epochs']
datatype = args_dict['datatype']
save_ckpt_dir = os.path.join(out_dir, experiment, "model_ckpt")
log_dir = os.path.join(out_dir, experiment, "tb_log")
os.makedirs(save_ckpt_dir, exist_ok=True)
device = torch.device("cuda")
dtype = torch.float32
kl_w = 0.5
if datatype == "proxd":
train_dataset = ProxDataset_txt(train_data_dir, max_frame=max_frame, fix_orientation=fix_ori,
step_multiplier=1, jump_step=jump_step)
train_data_loader = DataLoader(train_dataset, batch_size=6, shuffle=True, num_workers=num_workers)
valid_dataset = ProxDataset_txt(valid_data_dir, max_frame=max_frame, fix_orientation=fix_ori,
step_multiplier=1, jump_step=jump_step)
valid_data_loader = DataLoader(valid_dataset, batch_size=6, shuffle=True, num_workers=num_workers)
else: | train_dataset = HUMANISE(train_data_dir, max_frame=max_frame, fix_orientation=fix_ori, | 2 | 2023-11-06 07:55:51+00:00 | 12k |
Harvard-Ophthalmology-AI-Lab/FairSeg | SAMed/segment_anything/build_sam.py | [
{
"identifier": "Sam",
"path": "SAMed/segment_anything/modeling/sam.py",
"snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n def forward(self, batched_input, multimask_output, image_size):\n if isinstance(batched_input, list):\n outputs = self.forward_test(batched_input, multimask_output)\n else:\n outputs = self.forward_train(batched_input, multimask_output, image_size)\n return outputs\n\n def forward_train(self, batched_input, multimask_output, image_size):\n input_images = self.preprocess(batched_input)\n image_embeddings = self.image_encoder(input_images)\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=None, boxes=None, masks=None\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=image_embeddings,\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=(image_size, image_size),\n original_size=(image_size, image_size)\n )\n outputs = {\n 'masks': masks,\n 'iou_predictions': iou_predictions,\n 'low_res_logits': low_res_masks\n }\n return outputs\n\n @torch.no_grad()\n def forward_test(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input promts,\n C is determiend by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x"
},
{
"identifier": "ImageEncoderViT",
"path": "SAMed/segment_anything/modeling/image_encoder.py",
"snippet": "class ImageEncoderViT(nn.Module):\n def __init__(\n self,\n img_size: int = 1024,\n patch_size: int = 16,\n in_chans: int = 3,\n embed_dim: int = 768,\n depth: int = 12,\n num_heads: int = 12,\n mlp_ratio: float = 4.0,\n out_chans: int = 256,\n qkv_bias: bool = True,\n norm_layer: Type[nn.Module] = nn.LayerNorm,\n act_layer: Type[nn.Module] = nn.GELU,\n use_abs_pos: bool = True,\n use_rel_pos: bool = False,\n rel_pos_zero_init: bool = True,\n window_size: int = 0,\n global_attn_indexes: Tuple[int, ...] = (),\n ) -> None:\n \"\"\"\n Args:\n img_size (int): Input image size.\n patch_size (int): Patch size.\n in_chans (int): Number of input image channels.\n embed_dim (int): Patch embedding dimension.\n depth (int): Depth of ViT.\n num_heads (int): Number of attention heads in each ViT block.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool): If True, add a learnable bias to query, key, value.\n norm_layer (nn.Module): Normalization layer.\n act_layer (nn.Module): Activation layer.\n use_abs_pos (bool): If True, use absolute positional embeddings.\n use_rel_pos (bool): If True, add relative positional embeddings to the attention map.\n rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\n window_size (int): Window size for window attention blocks.\n global_attn_indexes (list): Indexes for blocks using global attention.\n \"\"\"\n super().__init__()\n self.img_size = img_size\n\n self.patch_embed = PatchEmbed(\n kernel_size=(patch_size, patch_size),\n stride=(patch_size, patch_size),\n in_chans=in_chans,\n embed_dim=embed_dim,\n )\n\n self.pos_embed: Optional[nn.Parameter] = None\n if use_abs_pos:\n # Initialize absolute positional embedding with pretrain image size.\n self.pos_embed = nn.Parameter(\n torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)\n )\n\n self.blocks = nn.ModuleList()\n for i in range(depth):\n block = Block(\n dim=embed_dim,\n num_heads=num_heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n norm_layer=norm_layer,\n act_layer=act_layer,\n use_rel_pos=use_rel_pos,\n rel_pos_zero_init=rel_pos_zero_init,\n window_size=window_size if i not in global_attn_indexes else 0,\n input_size=(img_size // patch_size, img_size // patch_size),\n )\n self.blocks.append(block)\n\n self.neck = nn.Sequential(\n nn.Conv2d(\n embed_dim,\n out_chans,\n kernel_size=1,\n bias=False,\n ),\n LayerNorm2d(out_chans),\n nn.Conv2d(\n out_chans,\n out_chans,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n LayerNorm2d(out_chans),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.patch_embed(x) # pre embed: [1, 3, 1024, 1024], post embed: [1, 64, 64, 768]\n if self.pos_embed is not None:\n x = x + self.pos_embed\n\n for blk in self.blocks:\n x = blk(x)\n\n x = self.neck(x.permute(0, 3, 1, 2)) # [b, c, h, w], [1, 256, 64, 64]\n\n return x"
},
{
"identifier": "MaskDecoder",
"path": "SAMed/segment_anything/modeling/mask_decoder.py",
"snippet": "class MaskDecoder(nn.Module):\n def __init__(\n self,\n *,\n transformer_dim: int,\n transformer: nn.Module,\n num_multimask_outputs: int = 3,\n activation: Type[nn.Module] = nn.GELU,\n iou_head_depth: int = 3,\n iou_head_hidden_dim: int = 256,\n ) -> None:\n \"\"\"\n Predicts masks given an image and prompt embeddings, using a\n tranformer architecture.\n\n Arguments:\n transformer_dim (int): the channel dimension of the transformer\n transformer (nn.Module): the transformer used to predict masks\n num_multimask_outputs (int): the number of masks to predict\n when disambiguating masks\n activation (nn.Module): the type of activation to use when\n upscaling masks\n iou_head_depth (int): the depth of the MLP used to predict\n mask quality\n iou_head_hidden_dim (int): the hidden dimension of the MLP\n used to predict mask quality\n \"\"\"\n super().__init__()\n self.transformer_dim = transformer_dim\n self.transformer = transformer\n\n self.num_multimask_outputs = num_multimask_outputs\n\n self.iou_token = nn.Embedding(1, transformer_dim)\n self.num_mask_tokens = num_multimask_outputs + 1\n self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\n\n self.output_upscaling = nn.Sequential(\n nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),\n LayerNorm2d(transformer_dim // 4),\n activation(),\n nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),\n activation(),\n )\n self.output_hypernetworks_mlps = nn.ModuleList(\n [\n MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)\n for i in range(self.num_mask_tokens)\n ]\n )\n\n self.iou_prediction_head = MLP(\n transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth\n )\n\n def forward(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n multimask_output: bool,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks given image and prompt embeddings.\n\n Arguments:\n image_embeddings (torch.Tensor): the embeddings from the image encoder\n image_pe (torch.Tensor): positional encoding with the shape of image_embeddings\n sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes\n dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs\n multimask_output (bool): Whether to return multiple masks or a single\n mask.\n\n Returns:\n torch.Tensor: batched predicted masks\n torch.Tensor: batched predictions of mask quality\n \"\"\"\n masks, iou_pred = self.predict_masks(\n image_embeddings=image_embeddings,\n image_pe=image_pe,\n sparse_prompt_embeddings=sparse_prompt_embeddings,\n dense_prompt_embeddings=dense_prompt_embeddings,\n )\n\n # Select the correct mask or masks for output\n # if multimask_output:\n # mask_slice = slice(1, None)\n # else:\n # mask_slice = slice(0, 1)\n # masks = masks[:, mask_slice, :, :]\n # iou_pred = iou_pred[:, mask_slice]\n\n # Prepare output\n return masks, iou_pred\n\n def predict_masks(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Predicts masks. See 'forward' for more details.\"\"\"\n # Concatenate output tokens\n output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)\n output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)\n tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)\n\n # Expand per-image data in batch direction to be per-mask\n src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)\n src = src + dense_prompt_embeddings\n pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)\n b, c, h, w = src.shape\n\n # Run the transformer\n hs, src = self.transformer(src, pos_src, tokens)\n iou_token_out = hs[:, 0, :]\n mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]\n\n # Upscale mask embeddings and predict masks using the mask tokens\n src = src.transpose(1, 2).view(b, c, h, w)\n upscaled_embedding = self.output_upscaling(src)\n hyper_in_list: List[torch.Tensor] = []\n for i in range(self.num_mask_tokens):\n hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))\n hyper_in = torch.stack(hyper_in_list, dim=1) # [b, c, token_num]\n\n b, c, h, w = upscaled_embedding.shape # [h, token_num, h, w]\n masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) # [1, 4, 256, 256], 256 = 4 * 64, the size of image embeddings\n\n # Generate mask quality predictions\n iou_pred = self.iou_prediction_head(iou_token_out)\n\n return masks, iou_pred"
},
{
"identifier": "PromptEncoder",
"path": "SAMed/segment_anything/modeling/prompt_encoder.py",
"snippet": "class PromptEncoder(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n image_embedding_size: Tuple[int, int],\n input_image_size: Tuple[int, int],\n mask_in_chans: int,\n activation: Type[nn.Module] = nn.GELU,\n ) -> None:\n \"\"\"\n Encodes prompts for input to SAM's mask decoder.\n\n Arguments:\n embed_dim (int): The prompts' embedding dimension\n image_embedding_size (tuple(int, int)): The spatial size of the\n image embedding, as (H, W).\n input_image_size (int): The padded size of the image as input\n to the image encoder, as (H, W).\n mask_in_chans (int): The number of hidden channels used for\n encoding input masks.\n activation (nn.Module): The activation to use when encoding\n input masks.\n \"\"\"\n super().__init__()\n self.embed_dim = embed_dim\n self.input_image_size = input_image_size\n self.image_embedding_size = image_embedding_size\n self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)\n\n self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners\n point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]\n self.point_embeddings = nn.ModuleList(point_embeddings)\n self.not_a_point_embed = nn.Embedding(1, embed_dim)\n\n self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])\n self.mask_downscaling = nn.Sequential(\n nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans // 4),\n activation(),\n nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans),\n activation(),\n nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),\n ) # downsample to 1/4\n self.no_mask_embed = nn.Embedding(1, embed_dim)\n\n def get_dense_pe(self) -> torch.Tensor:\n \"\"\"\n Returns the positional encoding used to encode point prompts,\n applied to a dense set of points the shape of the image encoding.\n\n Returns:\n torch.Tensor: Positional encoding with shape\n 1x(embed_dim)x(embedding_h)x(embedding_w)\n \"\"\"\n return self.pe_layer(self.image_embedding_size).unsqueeze(0)\n\n def _embed_points(\n self,\n points: torch.Tensor,\n labels: torch.Tensor,\n pad: bool,\n ) -> torch.Tensor:\n \"\"\"Embeds point prompts.\"\"\"\n points = points + 0.5 # Shift to center of pixel\n if pad:\n padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)\n padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)\n points = torch.cat([points, padding_point], dim=1)\n labels = torch.cat([labels, padding_label], dim=1)\n point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)\n point_embedding[labels == -1] = 0.0\n point_embedding[labels == -1] += self.not_a_point_embed.weight\n point_embedding[labels == 0] += self.point_embeddings[0].weight\n point_embedding[labels == 1] += self.point_embeddings[1].weight\n return point_embedding\n\n def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds box prompts.\"\"\"\n boxes = boxes + 0.5 # Shift to center of pixel\n coords = boxes.reshape(-1, 2, 2)\n corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)\n corner_embedding[:, 0, :] += self.point_embeddings[2].weight\n corner_embedding[:, 1, :] += self.point_embeddings[3].weight\n return corner_embedding\n\n def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds mask inputs.\"\"\"\n mask_embedding = self.mask_downscaling(masks)\n return mask_embedding\n\n def _get_batch_size(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n ) -> int:\n \"\"\"\n Gets the batch size of the output given the batch size of the input prompts.\n \"\"\"\n if points is not None:\n return points[0].shape[0]\n elif boxes is not None:\n return boxes.shape[0]\n elif masks is not None:\n return masks.shape[0]\n else:\n return 1\n\n def _get_device(self) -> torch.device:\n return self.point_embeddings[0].weight.device\n\n def forward(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Embeds different types of prompts, returning both sparse and dense\n embeddings.\n\n Arguments:\n points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates\n and labels to embed.\n boxes (torch.Tensor or none): boxes to embed\n masks (torch.Tensor or none): masks to embed\n\n Returns:\n torch.Tensor: sparse embeddings for the points and boxes, with shape\n BxNx(embed_dim), where N is determined by the number of input points\n and boxes.\n torch.Tensor: dense embeddings for the masks, in the shape\n Bx(embed_dim)x(embed_H)x(embed_W)\n \"\"\"\n bs = self._get_batch_size(points, boxes, masks)\n sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())\n if points is not None:\n coords, labels = points\n point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))\n sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)\n if boxes is not None:\n box_embeddings = self._embed_boxes(boxes)\n sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)\n\n if masks is not None:\n dense_embeddings = self._embed_masks(masks)\n else:\n dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(\n bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]\n )\n\n return sparse_embeddings, dense_embeddings"
},
{
"identifier": "TwoWayTransformer",
"path": "SAMed/segment_anything/modeling/transformer.py",
"snippet": "class TwoWayTransformer(nn.Module):\n def __init__(\n self,\n depth: int,\n embedding_dim: int,\n num_heads: int,\n mlp_dim: int,\n activation: Type[nn.Module] = nn.ReLU,\n attention_downsample_rate: int = 2,\n ) -> None:\n \"\"\"\n A transformer decoder that attends to an input image using\n queries whose positional embedding is supplied.\n\n Args:\n depth (int): number of layers in the transformer\n embedding_dim (int): the channel dimension for the input embeddings\n num_heads (int): the number of heads for multihead attention. Must\n divide embedding_dim\n mlp_dim (int): the channel dimension internal to the MLP block\n activation (nn.Module): the activation to use in the MLP block\n \"\"\"\n super().__init__()\n self.depth = depth\n self.embedding_dim = embedding_dim\n self.num_heads = num_heads\n self.mlp_dim = mlp_dim\n self.layers = nn.ModuleList()\n\n for i in range(depth):\n self.layers.append(\n TwoWayAttentionBlock(\n embedding_dim=embedding_dim,\n num_heads=num_heads,\n mlp_dim=mlp_dim,\n activation=activation,\n attention_downsample_rate=attention_downsample_rate,\n skip_first_layer_pe=(i == 0),\n )\n )\n\n self.final_attn_token_to_image = Attention(\n embedding_dim, num_heads, downsample_rate=attention_downsample_rate\n )\n self.norm_final_attn = nn.LayerNorm(embedding_dim)\n\n def forward(\n self,\n image_embedding: Tensor,\n image_pe: Tensor,\n point_embedding: Tensor,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"\n Args:\n image_embedding (torch.Tensor): image to attend to. Should be shape\n B x embedding_dim x h x w for any h and w.\n image_pe (torch.Tensor): the positional encoding to add to the image. Must\n have the same shape as image_embedding.\n point_embedding (torch.Tensor): the embedding to add to the query points.\n Must have shape B x N_points x embedding_dim for any N_points.\n\n Returns:\n torch.Tensor: the processed point_embedding\n torch.Tensor: the processed image_embedding\n \"\"\"\n # BxCxHxW -> BxHWxC == B x N_image_tokens x C\n bs, c, h, w = image_embedding.shape\n image_embedding = image_embedding.flatten(2).permute(0, 2, 1)\n image_pe = image_pe.flatten(2).permute(0, 2, 1)\n\n # Prepare queries\n queries = point_embedding\n keys = image_embedding\n\n # Apply transformer blocks and final layernorm\n for layer in self.layers:\n queries, keys = layer(\n queries=queries,\n keys=keys,\n query_pe=point_embedding,\n key_pe=image_pe,\n )\n\n # Apply the final attenion layer from the points to the image\n q = queries + point_embedding\n k = keys + image_pe\n attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)\n queries = queries + attn_out\n queries = self.norm_final_attn(queries)\n\n return queries, keys"
}
] | import torch
from torch.nn import functional as F
from icecream import ic
from functools import partial
from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer | 7,562 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(image_size, num_classes, pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375],
checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
num_classes=num_classes,
image_size=image_size,
pixel_mean=pixel_mean,
pixel_std=pixel_std
)
build_sam = build_sam_vit_h
def build_sam_vit_l(image_size, num_classes, pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375],
checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
num_classes=num_classes,
image_size=image_size,
pixel_mean=pixel_mean,
pixel_std=pixel_std
)
def build_sam_vit_b(image_size, num_classes, pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375],
checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
# adopt global attention at [3, 6, 9, 12] transform layer, else window attention layer
checkpoint=checkpoint,
num_classes=num_classes,
image_size=image_size,
pixel_mean=pixel_mean,
pixel_std=pixel_std
)
sam_model_registry = {
"default": build_sam_vit_h,
"vit_h": build_sam_vit_h,
"vit_l": build_sam_vit_l,
"vit_b": build_sam_vit_b,
}
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
num_classes,
image_size,
pixel_mean,
pixel_std,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = image_size
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size # Divide by 16 here
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
),
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(image_size, num_classes, pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375],
checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
num_classes=num_classes,
image_size=image_size,
pixel_mean=pixel_mean,
pixel_std=pixel_std
)
build_sam = build_sam_vit_h
def build_sam_vit_l(image_size, num_classes, pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375],
checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
num_classes=num_classes,
image_size=image_size,
pixel_mean=pixel_mean,
pixel_std=pixel_std
)
def build_sam_vit_b(image_size, num_classes, pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375],
checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
# adopt global attention at [3, 6, 9, 12] transform layer, else window attention layer
checkpoint=checkpoint,
num_classes=num_classes,
image_size=image_size,
pixel_mean=pixel_mean,
pixel_std=pixel_std
)
sam_model_registry = {
"default": build_sam_vit_h,
"vit_h": build_sam_vit_h,
"vit_l": build_sam_vit_l,
"vit_b": build_sam_vit_b,
}
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
num_classes,
image_size,
pixel_mean,
pixel_std,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = image_size
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size # Divide by 16 here
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
), | prompt_encoder=PromptEncoder( | 3 | 2023-11-03 17:05:40+00:00 | 12k |
microsoft/PLEX | PLEX/util/evaluators.py | [
{
"identifier": "DEFAULT_CAM",
"path": "PLEX/envs/environments.py",
"snippet": "DEFAULT_CAM = {'robosuite': 'agentview',\n 'metaworld': 'corner',\n 'd4rl': None}"
},
{
"identifier": "RobosuiteEnv",
"path": "PLEX/envs/environments.py",
"snippet": "class RobosuiteEnv(gym.Env):\n # If env_meta is specified, image_size and postprocess_visual_obs will be ignored.\n def __init__(self, task, use_normalized_reward, full_state_mode, env_meta=None, render_gpu_device_id=-1,\n camera_names=None, image_size=84, postprocess_visual_obs=True):\n self.full_state_mode = full_state_mode\n if self.full_state_mode:\n raise NotImplementedError\n if use_normalized_reward:\n raise NotImplementedError\n if isinstance(camera_names, str):\n self.camera_names = camera_names.split(',')\n elif type(camera_names) in {list, tuple}:\n self.camera_names = list(camera_names)\n else:\n raise ValueError(f\"camera_names should be str or list of str, but got {camera_names}\")\n\n if env_meta is not None:\n env_meta = deepcopy(env_meta)\n env_meta['env_kwargs']['camera_names'] = self.camera_names\n self._env = EnvUtils.create_env_from_metadata(\n env_meta=env_meta,\n env_name=task.name,\n use_image_obs=True\n )\n else:\n from robomimic.envs.env_robosuite import EnvRobosuite\n from robosuite.controllers import load_controller_config\n self._env = EnvRobosuite(\n env_name=task.name,\n render=False,\n render_offscreen=True,\n use_image_obs=True,\n postprocess_visual_obs=postprocess_visual_obs,\n # ---------------------- kwargs -------------------------\n render_gpu_device_id=render_gpu_device_id,\n camera_names=self.camera_names,\n camera_widths=image_size,\n camera_heights=image_size,\n camera_depths=[False] * len(self.camera_names),\n robots=task.robot,\n # Even if use_sparse_reward it True, we will let the native env. return its native reward and convert it to\n # the particular kind of sparse reward we need on the fly.\n reward_shaping=True,\n controller_configs = load_controller_config(default_controller=\"OSC_POSE\")\n # Explicitly add more params from here: https://robosuite.ai/docs/modules/environments.html ?\n )\n\n self.use_sparse_reward = False\n self.image_size = image_size\n\n obs = self.reset()\n self.proprio_dim = obs['proprio'].shape[0]\n self.obs_dims = obs[f'{self.camera_names[0]}_image'].shape\n\n self.action_dim = self._env.action_dimension\n\n def _format_obs(self, obs):\n # ASSUMPTION: The images tensor entries are scaled to [0, 1]. The tensor is in the CWH format and has the correct size (3 x self.image_size x self.image_size)\n # The assumption is enforced by Robomimic under the hood thanks to the init_obs_preprocessing(.) call in experiment.py.\n ret = {'proprio': compose_proprio(obs, keys=PROPRIO_KEYS['robosuite'])}\n for cam_name in self.camera_names:\n key = f'{cam_name}_image'\n ret[key] = obs[key]\n return ret\n\n def reset(self):\n obs = self._env.reset()\n return self._format_obs(obs)\n\n def step(self, action):\n obs, reward, done, info = self._env.step(action)\n info['success'] = self._env.is_success()['task']\n if self.use_sparse_reward:\n reward = sparse_reward(info['success'])\n return self._format_obs(obs), reward, done, info"
},
{
"identifier": "MetaWorldEnv",
"path": "PLEX/envs/environments.py",
"snippet": "class MetaWorldEnv(gym.Env):\n def __init__(self, task, use_normalized_reward, full_state_mode, env_meta=None, steps_at_goal=1, render_gpu_device_id=-1,\n camera_name=None, image_size=84, train_distrib=False):\n self.use_sparse_reward = False\n self.full_state_mode = full_state_mode\n self._env = get_mw_env(task_name=task.name,\n cam_height=image_size,\n cam_width=image_size,\n cam_name=(camera_name if not self.full_state_mode else None), # This turns off rendering when we aren't using image obs.\n goal_cost_reward=self.use_sparse_reward,\n # We include 'proprios' in ('states', 'proprios') only for compatibility.\n obs_types=(('images', 'proprio_states') if not self.full_state_mode else ('states', 'proprio_states')),\n fix_task_sequence=False,\n steps_at_goal=steps_at_goal,\n stop_at_goal=True,\n train_distrib=train_distrib,\n use_normalized_reward=use_normalized_reward)\n self.camera_name = camera_name\n self.image_size = image_size\n\n obs = self.reset()\n if not self.full_state_mode:\n self.obs_dims = obs[f'{self.camera_name}_image'].shape\n self.proprio_dim = obs['proprio'].shape[0]\n else:\n self.obs_dims = obs['state'].shape[0]\n # There will be no proprio states. self.proprio_dim = 1 just keeps code from breaking.\n self.proprio_dim = 1\n self.action_dim = 4\n\n\n def _format_obs(self, obs):\n # process_obs ensures that image tensor entries are scaled to [0, 1], the image is in the CWH format, and has the correct size (3 x self.image_size x self.image_size).\n # An init_obs_preprocessing(.) call in experiment.py sets all the relevant parameters for process_obs.\n if not self.full_state_mode:\n ret = {'proprio': obs['proprio_state'],\n f'{self.camera_name}_image': process_obs(obs['image'], 'rgb')}\n else:\n ret = {'state': obs['state']}\n return ret\n\n\n def reset(self):\n obs = self._env.reset()\n return self._format_obs(obs)\n\n\n def step(self, action):\n obs, reward, done, info = self._env.step(action)\n return self._format_obs(obs), reward, done, info"
},
{
"identifier": "d4rlEnv",
"path": "PLEX/envs/environments.py",
"snippet": "class d4rlEnv(gym.Env):\n def __init__(self, task, full_state_mode):\n import d4rl\n\n self.full_state_mode = full_state_mode\n assert self.full_state_mode, \"D4RL envs are to be used only in full-state mode.\"\n self._env = gym.make(task.name)\n obs = self.reset()\n self.obs_dims = self._env.observation_space.shape[0]\n self.proprio_dim = 1\n self.action_dim = self._env.action_space.shape[0]\n\n def _format_obs(self, obs):\n ret = {'state': obs}\n return ret\n\n\n def reset(self):\n obs = self._env.reset()\n return self._format_obs(obs)\n\n\n def step(self, action):\n obs, reward, done, info = self._env.step(action)\n return self._format_obs(obs), reward, done, info"
},
{
"identifier": "init_obs_preprocessing",
"path": "PLEX/envs/environments.py",
"snippet": "def init_obs_preprocessing(camera_names, target_img_size):\n def resize_img(obs):\n return process_image(obs, target_img_size)\n def resize_depth_map(obs):\n return process_depth_map(obs, target_img_size)\n ImageModality.set_obs_processor(resize_img)\n DepthModality.set_obs_processor(resize_depth_map)\n rgb_obs = [\"{}_image\".format(camera) for camera in camera_names]\n depth_obs = [\"{}_depth\".format(camera) for camera in camera_names]\n obs_modality_specs = {\n \"obs\": {\n \"low_dim\": PROPRIO_KEYS['robosuite'],\n \"rgb\": rgb_obs,\n \"depth\": depth_obs\n },\n \"goal\": {\n \"low_dim\": PROPRIO_KEYS['robosuite'],\n \"rgb\": rgb_obs,\n \"depth\": depth_obs\n }\n }\n\n initialize_obs_utils_with_obs_specs(obs_modality_specs)"
},
{
"identifier": "unprocess_image",
"path": "PLEX/envs/environments.py",
"snippet": "def unprocess_image(image):\n assert image.ndim == 3\n processed = deepcopy(image)\n # At this point, the image array is still in the CHW format.\n processed = ImageModality._default_obs_unprocessor(processed)\n # Now the image array is in the HWC format and scaled back to [0, 255]\n return processed"
},
{
"identifier": "PLEX",
"path": "PLEX/models/trajectory_models/plex.py",
"snippet": "class PLEX(TrajectoryModel):\n def __init__(\n self,\n camera_names, obs_dims,\n proprio_dim, act_dim,\n hidden_dim,\n relative_position_encodings,\n future_step=1,\n obs_pred_gpt2_kwargs={},\n inv_d_pred_gpt2_kwargs={},\n **kwargs\n ):\n super().__init__(camera_names, obs_dims, proprio_dim, act_dim, hidden_dim, **kwargs)\n\n # Create separately trainable positional embeddings and LayerNorms for the observational and the inverse dynamics transformer.\n self.relative_position_encodings = relative_position_encodings\n obs_pred_gpt2_kwargs['relative_position_encodings'] = relative_position_encodings\n inv_d_pred_gpt2_kwargs['relative_position_encodings'] = relative_position_encodings\n\n self.obs_tr_history_len = obs_pred_gpt2_kwargs['K']\n self.inv_d_tr_history_len = inv_d_pred_gpt2_kwargs['K']\n\n if not self.relative_position_encodings:\n self.embed_obs_tr_timestep = nn.Embedding(self.obs_tr_history_len, hidden_dim)\n self.embed_inv_d_tr_timestep = nn.Embedding(self.inv_d_tr_history_len, hidden_dim)\n\n self.embed_obs_tr_ln = nn.LayerNorm(hidden_dim)\n self.embed_inv_d_ln = nn.LayerNorm(hidden_dim)\n\n self.n_obs_tr_components = 2 # namely: target returns and image observations\n # One extra position is for the context embedding.\n n_obs_tr_positions = 1 + self.obs_tr_history_len * self.n_obs_tr_components\n obs_tr_config = transformers.GPT2Config(\n vocab_size=1, # doesn't matter -- we don't use the vocab\n n_positions=n_obs_tr_positions,\n n_ctx=n_obs_tr_positions,\n n_embd=hidden_dim,\n **obs_pred_gpt2_kwargs\n )\n self.obs_transformer = GPT2Model(obs_tr_config)\n\n self.n_inv_d_tr_components = 3 # namely: integrated observations (image obs. embeddings + proprios combined), image obs predictions, and actions\n n_inv_d_transformer_positions = self.inv_d_tr_history_len * self.n_inv_d_tr_components\n inv_d_transformer_config = transformers.GPT2Config(\n vocab_size=1, # doesn't matter -- we don't use the vocab\n n_positions=n_inv_d_transformer_positions,\n n_ctx=n_inv_d_transformer_positions,\n n_embd=hidden_dim,\n **inv_d_pred_gpt2_kwargs\n )\n self.inv_d_transformer = GPT2Model(inv_d_transformer_config)\n\n self.future_step = future_step\n\n # NOTE: currently, using the Gaussian head-based stochastic prediction of observation latents doesn't work very well.\n # Therefore, we'll use deterministic prediction of observation latents instead.\n self.deterministic_future_obs_emb_predictions = True\n if not self.deterministic_future_obs_emb_predictions:\n self.predict_future = GaussianHead(\n input_dim=hidden_dim, output_dim=hidden_dim,\n std_bounds=self.std_bounds,\n hidden_dim=hidden_dim\n )\n\n\n def _get_tunables(self, image_encoder_tune_style='all', obs_pred_transformer_tune_style='all', inv_d_pred_transformer_tune_style='all'):\n tunables = super()._get_tunables(image_encoder_tune_style)\n\n #\n # Handle the tunables of the observation prediction transformer.\n #\n if not self.deterministic_future_obs_emb_predictions and obs_pred_transformer_tune_style != 'none':\n tunables.append(self.predict_future)\n\n if obs_pred_transformer_tune_style == 'all':\n tunables.extend([\n self.embed_obs_tr_ln,\n self.return_encoder,\n self.obs_transformer\n ])\n\n if self.impute_style == 'trainable':\n tunables.extend([\n getattr(self, f'missing_{x}_embedding') for x in [\n 'context', 'image', 'return'\n ]\n ])\n\n if not self.relative_position_encodings:\n tunables.append(self.embed_obs_tr_timestep) # Only for absolute position encodings.\n\n elif obs_pred_transformer_tune_style == 'last_block':\n # Fine-tune the last block of the transformer\n tunables.extend([\n self.obs_transformer.h[-1],\n self.obs_transformer.ln_f\n ])\n elif obs_pred_transformer_tune_style == 'linear_probe':\n # Only tune the predict_* networks\n pass\n elif obs_pred_transformer_tune_style == 'none':\n # Tune nothing -- no parameters got included\n pass\n else:\n raise ValueError(f'Invalid transformer_tune_style: {obs_pred_transformer_tune_style}')\n\n #\n # Handle the tunables of the inverse dynamics prediction transformer.\n #\n if inv_d_pred_transformer_tune_style != 'none':\n tunables.append(self.predict_action)\n\n if inv_d_pred_transformer_tune_style == 'all':\n tunables.extend([\n self.embed_inv_d_ln,\n self.proprio_encoder,\n self.action_encoder,\n self.image_and_proprio_emb_combiner,\n self.inv_d_transformer\n ])\n\n if self.impute_style == 'trainable':\n tunables.extend([\n getattr(self, f'missing_{x}_embedding') for x in [\n 'proprio', 'action'\n ]\n ])\n\n if not self.relative_position_encodings:\n tunables.append(self.embed_inv_d_tr_timestep) # Only for absolute position encodings.\n\n elif inv_d_pred_transformer_tune_style == 'last_block':\n # Fine-tune the last block of the transformer\n tunables.extend([\n self.inv_d_transformer.h[-1],\n self.inv_d_transformer.ln_f\n ])\n elif inv_d_pred_transformer_tune_style == 'linear_probe':\n # Only tune the predict_* networks\n pass\n elif inv_d_pred_transformer_tune_style == 'none':\n # Tune nothing -- no parameters got included\n pass\n else:\n raise ValueError(f'Invalid transformer_tune_style: {inv_d_pred_transformer_tune_style}')\n\n return tunables\n\n\n def _stack_inputs_and_masks(self, n_tr_input_components, inputs, mask, seq_length, batch_size, hidden_dim):\n assert len(inputs) == n_tr_input_components\n total_seq_length = len(inputs) * seq_length\n stacked_inputs = torch.stack(inputs, dim=1)\\\n .permute(0, 2, 1, 3)\\\n .reshape(batch_size, total_seq_length, hidden_dim) # [B, N-1, NS]\n\n # To make the attention mask fit the stacked inputs, have to stack it as well\n stacked_mask = torch.stack(\n [mask for _ in range(len(inputs))], dim=1\n ).permute(0, 2, 1).reshape(batch_size, total_seq_length)\n return stacked_inputs, stacked_mask\n\n\n def _predict_obs(self, context_embeddings, returns_embeddings, current_image_obs_embeddings, mask, seq_length, batch_size):\n stacked_obs_tr_inputs, stacked_obs_tr_mask = self._stack_inputs_and_masks(self.n_obs_tr_components,\n [returns_embeddings, current_image_obs_embeddings],\n mask,\n seq_length,\n batch_size,\n self.hidden_dim)\n # Account for context conditioning for the observation prediction transformer\n stacked_obs_tr_inputs = torch.cat([\n context_embeddings.unsqueeze(1),\n stacked_obs_tr_inputs\n ], dim=1) # [B, N, NS]\n stacked_obs_tr_inputs = self.embed_obs_tr_ln(stacked_obs_tr_inputs) # [B, N, NS]\n\n stacked_obs_tr_mask = torch.cat([\n torch.ones(batch_size, 1, device=stacked_obs_tr_mask.device),\n stacked_obs_tr_mask\n ], dim=1)\n\n # We feed the input embeddings (not word indices as in NLP) to the observation prediciton model.\n obs_tr_outputs = self.obs_transformer(\n inputs_embeds=stacked_obs_tr_inputs,\n attention_mask=stacked_obs_tr_mask\n )\n x_obs_tr = obs_tr_outputs['last_hidden_state']\n\n # Ignore first hidden state (corresponding to context)\n x_obs_tr = x_obs_tr[:,1:,:]\n\n # reshape x so that the second dimension corresponds to the original\n # returns-to-go (0), or observations (1); i.e. x[:,1,t] is the token for s_t\n x_obs_tr = x_obs_tr.reshape(batch_size, seq_length, self.n_obs_tr_components, self.hidden_dim).permute(0, 2, 1, 3)\n\n # Get predictions\n\n # For each time step, the observation prediction transformer outputs two latent states:\n # the first for return-to-go, the other for the state distribution parameters.\n predicted_obs_pos_idx = self.n_obs_tr_components - 1\n if not self.deterministic_future_obs_emb_predictions:\n future_image_obs_emb_distr = self.predict_future(x_obs_tr[:,predicted_obs_pos_idx])\n pred_future_image_obs_embeddings = future_image_obs_emb_distr.rsample()\n else:\n future_image_obs_emb_distr = None\n pred_future_image_obs_embeddings = x_obs_tr[:,predicted_obs_pos_idx]\n\n return pred_future_image_obs_embeddings, future_image_obs_emb_distr\n\n\n def _predict_actions(self, integrated_obs_embeddings, future_image_obs_emb, action_embeddings, mask, seq_length, batch_size):\n stacked_inv_d_inputs, stacked_inv_d_mask = self._stack_inputs_and_masks(self.n_inv_d_tr_components,\n [integrated_obs_embeddings, future_image_obs_emb, action_embeddings],\n mask,\n seq_length,\n batch_size,\n self.hidden_dim)\n\n inv_d_tr_outputs = self.inv_d_transformer(\n inputs_embeds=stacked_inv_d_inputs,\n attention_mask=stacked_inv_d_mask\n )\n x_inv_d_tr = inv_d_tr_outputs['last_hidden_state']\n\n # reshape x so that the second dimension corresponds to the original\n # observations (0), or actions (1); i.e. x[:,0,t] is the token for s_t\n x_inv_d_tr = x_inv_d_tr.reshape(batch_size, seq_length, self.n_inv_d_tr_components, self.hidden_dim).permute(0, 2, 1, 3)\n\n # For each time step, the inverse dynamics prediction transformer outputs three latent states, the last of which corresponds\n # to the action (see the call to self._stack_inputs_and_masks above). We want to predict that last component using all the data\n # that comes before it.\n predicted_action_pos_idx = self.n_inv_d_tr_components - 2\n pred_future_pred_actions = self.predict_action(x_inv_d_tr[:,predicted_action_pos_idx])\n return pred_future_pred_actions\n\n\n def forward(self, context, images, proprios, actions, rewards, returns_to_go, timesteps, mask, compute_pred_obs=True, compute_pred_future_actions=True, compute_known_future_actions=False, eval_mode=False):\n batch_dims = images[self.camera_names[0]].shape[:2]\n obs_tr_batch_size, seq_length = batch_dims\n batch_increase_ratio = self.obs_tr_history_len // self.inv_d_tr_history_len\n inv_d_batch_size = obs_tr_batch_size * batch_increase_ratio\n\n # NOTE: During training, the length of trajectory sequences that are fed to this method is (obs_pred.K + lookahead).\n # During evaluation, it is just obs_pred.K. So, we need to let this method's logic know about this, as below.\n if eval_mode:\n k = 0\n else:\n k = self.future_step\n seq_length -= k\n assert seq_length == self.obs_tr_history_len\n\n #\n # ******* STEP 1: Embed all the inputs to the model. *******\n #\n\n image_obs_embeddings = self.embed_image_observations({f'{cam}_image': images[cam] for cam in images.keys()}, batch_dims)\n prop_embeddings = self.embed_proprio(proprios, batch_dims)\n integrated_obs_embeddings = self.image_and_proprio_emb_combiner(torch.cat([image_obs_embeddings, prop_embeddings], dim=-1))\n action_embeddings = self.embed_action(actions, batch_dims)\n returns_embeddings = self.embed_return(returns_to_go, batch_dims)\n\n # Save for later\n orig_image_obs_embeddings = image_obs_embeddings[:,k:].detach()\n\n passthrough_current_image_obs_embeddings = image_obs_embeddings[:,:self.obs_tr_history_len]\n stopgrad_current_image_obs_embeddings = image_obs_embeddings[:,:self.obs_tr_history_len].detach()\n\n known_future_image_obs_embeddings = image_obs_embeddings[:,k:].reshape(inv_d_batch_size, self.inv_d_tr_history_len, self.hidden_dim)\n\n image_obs_embeddings = image_obs_embeddings[:,:self.obs_tr_history_len]\n prop_embeddings = prop_embeddings[:,:self.obs_tr_history_len]\n returns_embeddings = returns_embeddings[:,:self.obs_tr_history_len]\n integrated_obs_embeddings = integrated_obs_embeddings[:,:self.obs_tr_history_len].reshape(inv_d_batch_size, self.inv_d_tr_history_len, self.hidden_dim)\n action_embeddings = action_embeddings[:,:self.obs_tr_history_len].reshape(inv_d_batch_size, self.inv_d_tr_history_len, self.hidden_dim)\n\n # Masks for each model\n mask_prefix = mask[:,:self.obs_tr_history_len]\n inv_d_mask = mask_prefix.reshape(inv_d_batch_size, self.inv_d_tr_history_len)\n\n assert np.prod(passthrough_current_image_obs_embeddings.shape) == np.prod(known_future_image_obs_embeddings.shape)\n assert np.prod(stopgrad_current_image_obs_embeddings.shape) == np.prod(known_future_image_obs_embeddings.shape)\n\n if not self.relative_position_encodings:\n # Shift embeddings by position embedding\n # Obs. prediction and inverse dynamics prediction transformers potentially have their own position embeddings\n position_embeddings_for_obs_tr = self.embed_obs_tr_timestep(\n torch.arange(self.obs_tr_history_len, device=self.embed_obs_tr_timestep.weight.device))\n position_embeddings_for_obs_tr = torch.tile(position_embeddings_for_obs_tr, (obs_tr_batch_size, 1, 1))\n\n # Image obs. embeddings and returns will be fed only into the obs. prediction transformer.\n passthrough_current_image_obs_embeddings = passthrough_current_image_obs_embeddings.to(position_embeddings_for_obs_tr.device) + position_embeddings_for_obs_tr\n stopgrad_current_image_obs_embeddings = stopgrad_current_image_obs_embeddings.to(position_embeddings_for_obs_tr.device) + position_embeddings_for_obs_tr\n returns_embeddings = returns_embeddings.to(position_embeddings_for_obs_tr.device) + position_embeddings_for_obs_tr\n\n position_embeddings_for_inv_d_tr = self.embed_inv_d_tr_timestep(\n torch.arange(self.inv_d_tr_history_len, device=self.embed_inv_d_tr_timestep.weight.device))\n position_embeddings_for_inv_d_tr = torch.tile(position_embeddings_for_inv_d_tr, (inv_d_batch_size, 1, 1))\n\n # Integrated observations and actions will be fed only into the inv.d. transformer\n integrated_obs_embeddings = integrated_obs_embeddings.to(position_embeddings_for_inv_d_tr.device) + position_embeddings_for_inv_d_tr\n # NOTE: the future image observation embeddings aren't integrated with proprios, because predicting inverse dynamics from known current\n # and future proprio would be too easy and woudn't need to rely on the future image observation embeddings.\n known_future_image_obs_embeddings = known_future_image_obs_embeddings.to(position_embeddings_for_inv_d_tr.device) + position_embeddings_for_inv_d_tr\n action_embeddings = action_embeddings.to(position_embeddings_for_inv_d_tr.device) + position_embeddings_for_inv_d_tr\n\n #\n # ******* STEP 2: Use the observation prediction transformer to predict the observation embeddings. *******\n #\n\n # NOTE: this prediction makes sense only for trajectories with a task/context, since without one it's impossible to\n # reasonably predict the next observation. But we compute the predictions anyway and let the compute_losses(.) method ignore\n # these predictions during loss computation if needed.\n\n # For the obs. prediction transformer, we make the sequence look like (C, R_1, o_1, R_2, o_2, ...)\n if (compute_pred_future_actions and (actions is not None) and (context is not None)) or (compute_pred_obs and (context is not None)):\n context_embeddings = self.embed_context({f'{cam}_image': context[cam] for cam in context.keys()} if context is not None else None, batch_dims)\n passthrough_context_embeddings = context_embeddings\n stopgrad_context_embeddings = context_embeddings.detach()\n pred_future_image_obs_embeddings_from_passthrough_obs, _ = self._predict_obs(passthrough_context_embeddings, returns_embeddings, passthrough_current_image_obs_embeddings, mask_prefix, self.obs_tr_history_len, obs_tr_batch_size)\n pred_future_image_obs_embeddings_from_stopgrad_obs, future_image_obs_emb_distr_from_stopgrad_obs = self._predict_obs(stopgrad_context_embeddings, returns_embeddings, stopgrad_current_image_obs_embeddings, mask_prefix, self.obs_tr_history_len, obs_tr_batch_size)\n\n else:\n pred_future_image_obs_embeddings_from_passthrough_obs = None\n pred_future_image_obs_embeddings_from_stopgrad_obs = None\n future_image_obs_emb_distr_from_stopgrad_obs = None\n\n #\n # ******* STEP 3: Predict inverse dynamics, possibly in two ways. *******\n #\n\n # For the inv. dynamics prediction transformer, we make the sequence look like (int_o_1, pred_img_o_2, a_1, int_o_2, pred_img_o_3, a_2, ...)\n # Here, int_o_X are the embeddings of combined image-proprio observations, and pred_img_o_(X+1) are the predicted embeddings\n # of the next image observation. During learning, latter can be obtained either from STEP 2 or from the image_obs_embeddings array\n # *shifted by 1 position*. In this case, Presumably, the original image observation sequence contains 1 more entry than the action array.\n #\n # NOTE that the sequence doesn't contain a task specification C, since inverse dynamics should be task-agnostic.\n #\n # NOTE: We drop the last element of each input sequence before reshaping the inputs and passing them to the\n # inverse dynamics transformer. This is because the last action in each input sequence can't be predicted,\n # reliably, since we don't have the ground truth for the following observation, we omit this action from the\n # sequence.\n\n # NOTE: perhaps we shouldn't include predicted observations into the history (shaping the input as (int_o_1, pred_img_o_2, a_1, int_o_2, pred_img_o_3, a_2, ... ) includes them).\n # It makes the history long for no good reason (just due to including past predictions, which don't add any information), potentially making the model the model \"used to\" the\n # fact that predictions carry no extra info and making it largely ignore the prediction of the latest observation latent, which is actually crucial for making the correct action prediction.\n #\n if compute_pred_future_actions and (actions is not None):\n # If compute_pred_future_actions, this means we are doing inference. At inference/execution time, we don't have future observations\n # available to us, and therefore *must* rely on those predicted in STEP 2.\n assert pred_future_image_obs_embeddings_from_passthrough_obs is not None\n pred_future_image_obs_embeddings_from_passthrough_obs = pred_future_image_obs_embeddings_from_passthrough_obs.reshape(inv_d_batch_size, self.inv_d_tr_history_len, self.hidden_dim)\n # Remember to add position encodings as appropriate\n if not self.relative_position_encodings:\n pred_future_image_obs_embeddings_from_passthrough_obs + position_embeddings_for_inv_d_tr\n\n pred_future_pred_actions = self._predict_actions(integrated_obs_embeddings,\n ### For passing zeros instead of target vector\n #torch.zeros_like(pred_future_image_obs_embeddings_from_passthrough_obs),\n ### For passing goal instead of target vector\n #torch.tile(passthrough_context_embeddings, (30, 1, 1)).reshape(pred_future_image_obs_embeddings_from_passthrough_obs.shape),\n pred_future_image_obs_embeddings_from_passthrough_obs,\n action_embeddings,\n inv_d_mask,\n self.inv_d_tr_history_len,\n inv_d_batch_size)\n else:\n pred_future_pred_actions = None\n\n if compute_known_future_actions and (actions is not None):\n # If compute_loss, then we are doing learning. During learning, we know the actual future observation for each step in\n # the training trajectories, so we can use it to infer the actions.\n known_future_pred_actions = self._predict_actions(integrated_obs_embeddings,\n known_future_image_obs_embeddings,\n action_embeddings,\n inv_d_mask,\n self.inv_d_tr_history_len,\n inv_d_batch_size)\n else:\n known_future_pred_actions = None\n\n return (\n pred_future_pred_actions,\n known_future_pred_actions,\n orig_image_obs_embeddings,\n (future_image_obs_emb_distr_from_stopgrad_obs if not self.deterministic_future_obs_emb_predictions else pred_future_image_obs_embeddings_from_stopgrad_obs)\n )\n\n\n def compute_losses(self, forward_outputs, actions, contextual, mask):\n # Include superclass's losses\n losses = super().compute_losses(forward_outputs, actions, contextual, mask)\n\n # Unpack model outputs into local vars\n pred_future_action_preds, grounded_action_preds, target_obs_embeddings, future_obs_distr_from_stopgrad_obs = forward_outputs\n\n batch_size, actual_seq_length = target_obs_embeddings.shape[:2]\n assert actual_seq_length == self.obs_tr_history_len\n obs_mask = mask[:,:self.obs_tr_history_len]\n\n if actions is not None:\n target_actions = actions[:,:self.obs_tr_history_len]\n if grounded_action_preds is not None:\n mask__reshaped_for_predictions = obs_mask.reshape(grounded_action_preds.shape[0], -1)\n target_actions__reshaped_for_predictions = target_actions.reshape(grounded_action_preds.shape[0], grounded_action_preds.shape[1], -1)\n losses['grounded_inverse_dynamics'] = _action_loss(grounded_action_preds,\n target_actions__reshaped_for_predictions,\n mask__reshaped_for_predictions)\n if contextual and pred_future_action_preds is not None:\n # Action prediction based on predicted observations makes sense only for contextual trajectories\n # because without a context/task, observations can't be reasonably predicted.\n mask__reshaped_for_predictions = obs_mask.reshape(pred_future_action_preds.shape[0], -1)\n target_actions__reshaped_for_predictions = target_actions.reshape(pred_future_action_preds.shape[0], pred_future_action_preds.shape[1], -1)\n\n if pred_future_action_preds is not None:\n losses['predicted_inverse_dynamics'] = _action_loss(pred_future_action_preds,\n target_actions__reshaped_for_predictions,\n mask__reshaped_for_predictions)\n\n # Predict embedding k steps into the future.\n #\n # As with inverse dynamics computation based on predicted observations, observation prediction loss itself makes sense\n # only for contextual trajectories.\n if contextual:\n future_mask = obs_mask.bool()\n # NOTE: Here, we stop-grad the computed observation embeddings so that backpropagation affects only\n # the observation embedding prediction model, not the observation encoders. If we allow observation\n # encoders to be updated as well, the observation embeddings may eventually collapse due to\n # updates on observation-only batches. On observation-action batches, the encoders get updated anyway\n # thanks to backpropagation from the inverse dynamics.\n\n if not self.deterministic_future_obs_emb_predictions:\n future_embeddings = target_obs_embeddings[future_mask].detach()\n sliced_future_distr = slice_dist(future_obs_distr_from_stopgrad_obs, (slice(batch_size), slice(self.obs_tr_history_len)))\n masked_future_distr = slice_dist(sliced_future_distr, future_mask)\n future_log_probs = masked_future_distr.log_prob(future_embeddings)\n losses['future_prediction'] = -future_log_probs.mean()\n else:\n future_embeddings = target_obs_embeddings.detach()\n unmasked_losses = torch.mean((future_obs_distr_from_stopgrad_obs - future_embeddings)**2, dim=-1)\n assert unmasked_losses.shape == future_mask.shape\n selected_losses = unmasked_losses[future_mask]\n losses['future_prediction'] = selected_losses.mean()\n\n return losses"
},
{
"identifier": "setup_context_sampler",
"path": "PLEX/util/data.py",
"snippet": "def setup_context_sampler(style):\n def get_context(traj, start, end):\n # For simplicity, all images from a given time step will serve as context.\n traj_len = traj['len']\n\n if 'success' in style:\n success_indices = np.nonzero(traj['success'][:traj_len] == True)[0]\n if len(success_indices) == 0:\n if not globals.full_state_mode:\n traj = traj['load_images'](traj, start_idx=0, end_idx=1)\n images = traj['image']\n return {cam: np.zeros_like(images[cam][0]) for cam in images.keys()}, False\n else:\n states = traj['full_state']\n return np.zeros_like(states[0]), False\n elif style == 'first-success':\n chosen_idx = success_indices[0]\n elif style == 'random-success':\n chosen_idx = random.choice(success_indices)\n else:\n raise NotImplementedError\n elif style.startswith('random-next-'):\n window_len = int(style[12:])\n high = min(end + window_len, traj_len)\n chosen_idx = np.random.randint(end - 1, high)\n elif style.startswith('random-last-'):\n window_len = int(style[12:])\n chosen_idx = np.random.randint(traj_len - window_len, traj_len)\n elif style == 'blank':\n if not globals.full_state_mode:\n images = traj['load_images'](traj, start_idx=0, end_idx=1)['image']\n return {cam: np.zeros_like(images[cam][0]) for cam in images.keys()}, True\n else:\n states = traj['full_state']\n return np.zeros_like(states[0]),True\n else:\n raise NotImplementedError\n\n if not globals.full_state_mode:\n images = traj['load_images'](traj, start_idx=chosen_idx, end_idx=chosen_idx+1)['image']\n else:\n states = traj['full_state']\n\n return {cam: images[cam][chosen_idx].astype(np.float32) for cam in images.keys()} if not globals.full_state_mode else states[chosen_idx], True\n return get_context"
},
{
"identifier": "setup_batch_sampler",
"path": "PLEX/util/data.py",
"snippet": "def setup_batch_sampler(dataset, context_style, cmdline_args, device):\n context_fn = setup_context_sampler(context_style) if dataset.contextual else lambda *args, **kwargs: None\n return lambda batch_size, target_frame_rate, pad_frame_gaps: dataset.sample_batch(batch_size,\n target_frame_rate,\n pad_frame_gaps,\n max_len=((cmdline_args['obs_pred.K'] + cmdline_args['future_step']) if cmdline_args['model'] == 'PLEX' else cmdline_args['K']),\n get_context=context_fn,\n discount=cmdline_args['discount'],\n device=device,\n context_from_same_traj=cmdline_args['context_from_same_traj'])"
},
{
"identifier": "discount_cumsum",
"path": "PLEX/util/data.py",
"snippet": "def discount_cumsum(x, is_successful, gamma):\n global MIN_REWARD\n global MAX_REWARD\n discount_cumsum = np.zeros_like(x)\n # To deal with trajectories of different lenghths, we pretend that all trajectories are infinitely long\n # and define the discounted cumulative sum as\n # discount_cumsum[-1] = max_reward / (1. - gamma) # pretend last state is absorbing\n # for trajectories that reached the goal.\n #\n # For trajectories that timed out, we don't know the right value to infinitely \"extend\" them. Ideally,\n # it should be the min_reward / (1 - gamma), but we generally don't know what min_reward is for a given environment\n # and need to estimate it empirically from all of this environment's loaded data.\n discount_cumsum[-1] = x[-1] + (MAX_REWARD / (1. - gamma) if is_successful else MIN_REWARD / (1. - gamma))\n for t in reversed(range(x.shape[0]-1)):\n discount_cumsum[t] = x[t] + gamma * discount_cumsum[t+1]\n return discount_cumsum"
},
{
"identifier": "parse_comma_sep_param_value",
"path": "PLEX/util/misc.py",
"snippet": "def parse_comma_sep_param_value(comma_sep_param_value_str):\n param_values = [param_value.strip() for param_value in comma_sep_param_value_str.split(',')]\n return param_values"
},
{
"identifier": "construct_rewards",
"path": "PLEX/util/misc.py",
"snippet": "def construct_rewards(original_rewards, successes, reward_type):\n if reward_type == 'sparse':\n rewards = np.asarray([sparse_reward(r) for r in successes])\n elif reward_type == 'native':\n rewards = original_rewards\n elif reward_type == 'negative':\n rewards = -original_rewards\n elif reward_type == 'zero':\n rewards = np.zeros_like(original_rewards)\n elif reward_type == 'random':\n rewards = np.random.rand(*original_rewards.shape)\n else:\n raise NotImplementedError\n return rewards"
},
{
"identifier": "setup_trainer",
"path": "PLEX/util/misc.py",
"snippet": "def setup_trainer(batch_sampler, lr, eval_fns, model, trainable_params, cmdline_args):\n optimizer = torch.optim.AdamW(\n trainable_params,\n lr=lr,\n weight_decay=cmdline_args['weight_decay'],\n )\n scheduler = torch.optim.lr_scheduler.LambdaLR(\n optimizer,\n lambda steps: min((steps+1)/cmdline_args['warmup_steps'], 1)\n )\n\n # Model-specific loss weights\n if cmdline_args['model'] == 'DT' or cmdline_args['model'] == 'MLP':\n loss_weights = {\n 'action': 1.0\n }\n elif cmdline_args['model'] == 'PLEX':\n loss_weights = {\n # This is the task-conditioned latent state prediction loss weight.\n # It should be 1.0 for PL pretraining and 0.0 for EX pretraining (since EX pretraining uses\n # task-agnostic data that makes task-conditioned latent state prediction impossible).\n # It should be 1.0 for target-task finetuning as well.\n 'future_prediction': cmdline_args['future_prediction_loss_weight']\n }\n # The EX part of PLEX (i.e., inversed dynamics -- action prediction based on the current and a future latent state)\n # can be trained using the future latent state of the training trajectory *or* the future latent state\n # predicted by the PL part of PLEX (the latent state predictor).\n # If we care about the former, we set grounded_inverse_dynamics_loss_weight = 1 and predicted_inverse_dynamics_loss_weight = 0.\n # If we care about the latter, then vice versa. In either case,\n # predicted_inverse_dynamics_loss_weight = 1 - grounded_inverse_dynamics_loss_weight.\n #\n # Namely, for EX pretraining we set grounded_inverse_dynamics_loss_weight = 1, because\n # the latent state predictor (PL) is unavailable at the time when EX is being pretrained.\n #\n # For PL pretraining, grounded_inverse_dynamics_loss_weight doesn't matter, because during PL pretraining\n # the inverse dynamics precictor (EX) is frozen and isn't affected by training, and the inverse dynamics\n # losses, in turn, don't affect the PL component of PLEX.\n #\n # For target-task finetuning of PLEX, we set predicted_inverse_dynamics_loss_weight = 1, because we want to adapt the\n # PL and EX components of PLEX to work together.\n for which in ['predicted', 'grounded']:\n key = f'{which}_inverse_dynamics'\n loss_weights[key] = cmdline_args[f'{key}_loss_weight']\n else:\n raise NotImplementedError\n\n return Trainer(\n model=model,\n optimizer=optimizer,\n get_batch=batch_sampler,\n batch_size=cmdline_args['batch_size'],\n target_frame_rate=cmdline_args['target_frame_rate'],\n pad_frame_gaps=cmdline_args['pad_frame_gaps'],\n scheduler=scheduler,\n loss_weights=loss_weights,\n eval_fns=eval_fns,\n )"
}
] | import os
import time
import math
import random
import numpy as np
import torch
import torch.multiprocessing as mp
import robomimic.utils.obs_utils as ObsUtils
import cv2
import PLEX.util.globals as globals
import d4rl
import moviepy.editor as mpy
from copy import deepcopy
from PLEX.envs.environments import DEFAULT_CAM, RobosuiteEnv, MetaWorldEnv, d4rlEnv, init_obs_preprocessing, unprocess_image
from PLEX.models.trajectory_models.plex import PLEX
from PLEX.util.data import setup_context_sampler, setup_batch_sampler, discount_cumsum
from PLEX.util.misc import parse_comma_sep_param_value, construct_rewards, setup_trainer | 10,243 |
def evaluate_episode(
task,
model,
ep_id,
use_normalized_reward=False,
reward_type='native',
env_meta=None,
min_time_at_goal_for_success=5,
camera_names=None,
image_size=84,
device='cuda',
max_ep_len=500,
discount=1.,
full_state_mode=False,
context=None,
target_return=None,
record_camera=None,
write_individual_images=False,
record_traj_dir=None
):
if not full_state_mode:
# Make sure ObsUtils is set up (each process has to run this once)
#
# Actually, do we need this, given that it's done by the top-level module?
# Presumably, it doesn't hurt...
if ObsUtils.OBS_KEYS_TO_MODALITIES is None:
init_obs_preprocessing(camera_names, image_size)
image_obs_list = []
if task.dataset_type in {'robosuite', 'robomimic'}:
# Choosing a GPU for each episode in this way prevents all evluation env instances from running on the same GPU and potentially causing an OOM error.
render_device = ep_id % torch.cuda.device_count() if device == 'cuda' else -1
if env_meta is not None and 'robosuite' in env_meta:
env_meta['robosuite']['env_kwargs']['render_gpu_device_id'] = render_device
env = RobosuiteEnv(task, use_normalized_reward, full_state_mode,
env_meta=(env_meta['robosuite'] if env_meta is not None and 'robosuite' in env_meta else None),
render_gpu_device_id=render_device,
camera_names=camera_names,
image_size=image_size)
elif task.dataset_type == 'metaworld':
env = MetaWorldEnv(task, use_normalized_reward, full_state_mode,
env_meta=env_meta['metaworld'] if env_meta is not None and 'metaworld' in env_meta else None,
steps_at_goal=min_time_at_goal_for_success,
#render_gpu_device_id=render_device,
camera_name=camera_names[0],
image_size=image_size)
elif task.dataset_type == 'd4rl':
|
def evaluate_episode(
task,
model,
ep_id,
use_normalized_reward=False,
reward_type='native',
env_meta=None,
min_time_at_goal_for_success=5,
camera_names=None,
image_size=84,
device='cuda',
max_ep_len=500,
discount=1.,
full_state_mode=False,
context=None,
target_return=None,
record_camera=None,
write_individual_images=False,
record_traj_dir=None
):
if not full_state_mode:
# Make sure ObsUtils is set up (each process has to run this once)
#
# Actually, do we need this, given that it's done by the top-level module?
# Presumably, it doesn't hurt...
if ObsUtils.OBS_KEYS_TO_MODALITIES is None:
init_obs_preprocessing(camera_names, image_size)
image_obs_list = []
if task.dataset_type in {'robosuite', 'robomimic'}:
# Choosing a GPU for each episode in this way prevents all evluation env instances from running on the same GPU and potentially causing an OOM error.
render_device = ep_id % torch.cuda.device_count() if device == 'cuda' else -1
if env_meta is not None and 'robosuite' in env_meta:
env_meta['robosuite']['env_kwargs']['render_gpu_device_id'] = render_device
env = RobosuiteEnv(task, use_normalized_reward, full_state_mode,
env_meta=(env_meta['robosuite'] if env_meta is not None and 'robosuite' in env_meta else None),
render_gpu_device_id=render_device,
camera_names=camera_names,
image_size=image_size)
elif task.dataset_type == 'metaworld':
env = MetaWorldEnv(task, use_normalized_reward, full_state_mode,
env_meta=env_meta['metaworld'] if env_meta is not None and 'metaworld' in env_meta else None,
steps_at_goal=min_time_at_goal_for_success,
#render_gpu_device_id=render_device,
camera_name=camera_names[0],
image_size=image_size)
elif task.dataset_type == 'd4rl': | env = d4rlEnv(task, full_state_mode) | 3 | 2023-11-06 09:38:09+00:00 | 12k |
Giftify-Bot/Giftify-Bot | models/giveaways.py | [
{
"identifier": "ChannelConfig",
"path": "models/giveaway_settings.py",
"snippet": "class ChannelConfig:\n \"\"\"Represents the configuration settings for a channel.\n\n Attributes\n ----------\n channel: Union[discord.TextChannel, discord.CategoryChannel]\n The channel associated with the config.\n guild: discord.Guild\n The guild to which the channel belongs.\n required_roles: List[discord.Role]\n The list of default required roles.\n blacklisted_roles: List[discord.Role]\n The list of default blacklisted roles.\n bypass_roles: List[discord.Role]\n The list of default bypass_roles.\n multiplier_roles: Dict[discord.Role, int]\n The role and number of multiplier_roles entries mapping.\n ping: Optional[discord.Role]\n The default ping role for some channel.\n \"\"\"\n\n __slots__: Tuple[str, ...] = (\n \"channel\",\n \"guild\",\n \"required_roles\",\n \"blacklisted_roles\",\n \"bypass_roles\",\n \"multiplier_roles\",\n \"ping\",\n )\n\n def __init__(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n guild: discord.Guild,\n *,\n required_roles: List[discord.Role],\n blacklisted_roles: List[discord.Role],\n bypass_roles: List[discord.Role],\n multiplier_roles: Dict[discord.Role, int],\n ping: Optional[discord.Role] = None,\n ):\n self.channel = channel\n self.guild = guild\n self.required_roles = required_roles\n self.blacklisted_roles = blacklisted_roles\n self.bypass_roles = bypass_roles\n self.multiplier_roles = multiplier_roles\n self.ping = ping\n\n def __repr__(self):\n return f\"<ChannelConfig channel={self.channel!r}>\"\n\n @classmethod\n def from_data(\n cls,\n guild: discord.Guild,\n data: asyncpg.Record,\n ) -> Optional[\"ChannelConfig\"]:\n \"\"\"Create a ChannelConfig object from given data.\n\n Parameters\n ----------\n guild: discord.Guild\n The guild to which the channel belongs.\n value: Any\n The new value for the column.\n\n Returns\n -------\n ChannelConfig\n The updated `ChannelConfig` instance.\n \"\"\"\n\n data = dict(data)\n\n # We do not need these\n channel_id = data.pop(\"channel\")\n channel = guild.get_channel(channel_id)\n if channel is None:\n return\n\n assert isinstance(channel, (discord.TextChannel, discord.CategoryChannel))\n\n data[\"ping\"] = guild.get_role(data[\"ping\"])\n data[\"required_roles\"] = [\n guild.get_role(role) for role in data[\"required_roles\"] if role is not None\n ]\n data[\"blacklisted_roles\"] = [\n guild.get_role(role)\n for role in data[\"blacklisted_roles\"]\n if role is not None\n ]\n data[\"bypass_roles\"] = [\n guild.get_role(role) for role in data[\"bypass_roles\"] if role is not None\n ]\n data[\"multiplier_roles\"] = {\n guild.get_role(role): multiplier_roles\n for role, multiplier_roles in data[\"multiplier_roles\"].items()\n if role is not None\n }\n\n data.pop(\"guild\")\n\n return cls(channel, guild, **data)\n\n async def update(\n self, column: str, value: Any, pool: asyncpg.Pool\n ) -> \"ChannelConfig\":\n \"\"\"Update the specified column with the provided value in the database.\n\n Parameters\n ----------\n column: str\n The column to be updated.\n value: Any\n The new value for the column.\n pool: asyncpg.Pool\n The database connection pool.\n\n Raises\n ------\n ValueError\n If the provided column is not a valid column name in `self.__slots__`.\n\n Returns\n -------\n ChannelConfig\n The updated `ChannelConfig` instance.\n \"\"\"\n if column not in self.__slots__:\n raise ValueError(f\"Invalid column: {column}\")\n\n setattr(self, column, value)\n\n if isinstance(value, list):\n value = [role.id for role in value if role is not None]\n elif isinstance(value, dict):\n value = {\n role.id: multiplier_roles\n for role, multiplier_roles in value.items()\n if role is not None\n }\n elif isinstance(value, discord.Role):\n value = value.id\n else:\n raise ValueError(\"Unknown type given.\")\n\n query = f\"\"\"INSERT INTO channel_configs (guild, channel, {column}) VALUES ($1, $2, $3)\n ON CONFLICT (guild, channel) DO\n UPDATE SET {column} = excluded.{column}\"\"\"\n\n await pool.execute(\n query,\n self.guild.id,\n self.channel.id,\n value,\n )\n\n return self\n\n @classmethod\n async def create(\n cls,\n guild: discord.Guild,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n pool: asyncpg.Pool,\n ) -> \"ChannelConfig\":\n query = \"\"\"INSERT INTO channel_configs (guild, channel) VALUES ($1, $2) RETURNING *\"\"\"\n\n record = await pool.fetchrow(\n query,\n guild.id,\n channel.id,\n )\n\n instance = cls.from_data(guild, record)\n assert instance is not None # Since we just created it.\n return instance\n\n @staticmethod\n async def delete(channel_id: int, guild_id: int, pool: asyncpg.Pool):\n \"\"\"Delete the current ChannelConfig object.\n\n Parameters\n ----------\n channel_id: int\n The ID of the channel.\n guild_id: int\n The ID of the guild.\n pool: asyncpg.Pool\n The database connection pool.\n \"\"\"\n\n query = \"\"\"DELETE FROM channel_configs\n WHERE guild = $ AND channel = $2\"\"\"\n\n await pool.execute(query, guild_id, channel_id)"
},
{
"identifier": "GuildConfig",
"path": "models/giveaway_settings.py",
"snippet": "class GuildConfig:\n \"\"\"Represents the configuration settings for a guild.\n\n Parameters\n ----------\n guild: discord.Guild\n The guild associated with the configuration.\n logging: Optional[discord.TextChannel]\n The logging text channel for the guild.\n ping: Optional[discord.Role]\n The role to ping for notifications.\n reaction: str\n The reaction used for giveaways.\n participants_reaction,: str\n The reaction used for giveaways participants button.\n required_roles: List[discord.Role]\n The default roles required to join giveaway.\n blacklisted_roles: List[discord.Role]\n The default roles blacklisted from joining a giveaway.\n bypass_roles: List[discord.Role]\n The roles that bypass_roles certain restrictions.\n multiplier_roles: Dict[discord.Role, int]\n The multiplier_roles points assigned to each role.\n managers: List[discord.Role]\n The roles with manager permissions.\n dm_winner: bool\n Whether to send a direct message to the winner.\n dm_host: bool\n Whether to send a direct message to the host.\n channel_settings: List[ChannelConfig]\n The settings for each channel.\n color: discord.Colour\n The color used for messages.\n button_style: discord.ButtonStyle\n The style of the button.\n end_message: str\n The message sent when a giveaway ends.\n reroll_message: str\n The message sent when a giveaway rerolls.\n dm_message: str\n The direct message sent to winner.\n dm_host_message: str\n The direct message sent to host.\n gw_header: str\n The header for the giveaway message.\n gw_end_header: str\n The header for the giveaway end.\n \"\"\"\n\n __slots__: Tuple[str, ...] = (\n \"guild\",\n \"logging\",\n \"ping\",\n \"reaction\",\n \"participants_reaction\",\n \"required_roles\",\n \"blacklisted_roles\",\n \"bypass_roles\",\n \"multiplier_roles\",\n \"managers\",\n \"dm_winner\",\n \"dm_host\",\n \"channel_settings\",\n \"color\",\n \"button_style\",\n \"end_message\",\n \"reroll_message\",\n \"dm_message\",\n \"dm_host_message\",\n \"gw_header\",\n \"gw_end_header\",\n )\n\n def __init__(\n self,\n guild: discord.Guild,\n *,\n logging: Optional[discord.TextChannel],\n ping: Optional[discord.Role],\n reaction: str,\n participants_reaction: str,\n required_roles: List[discord.Role],\n blacklisted_roles: List[discord.Role],\n bypass_roles: List[discord.Role],\n multiplier_roles: Dict[discord.Role, int],\n managers: List[discord.Role],\n dm_winner: bool,\n dm_host: bool,\n channel_settings: List[ChannelConfig],\n color: discord.Colour,\n button_style: discord.ButtonStyle,\n end_message: str,\n reroll_message: str,\n dm_message: str,\n dm_host_message: str,\n gw_header: str,\n gw_end_header: str,\n ):\n self.guild = guild\n self.logging = logging\n self.ping = ping\n self.reaction = reaction\n self.participants_reaction = participants_reaction\n self.required_roles = required_roles\n self.blacklisted_roles = blacklisted_roles\n self.bypass_roles = bypass_roles\n self.multiplier_roles = multiplier_roles\n self.managers = managers\n self.dm_winner = dm_winner\n self.dm_host = dm_host\n self.channel_settings = channel_settings\n self.color = color\n self.button_style = button_style\n self.end_message = end_message\n self.reroll_message = reroll_message\n self.dm_host_message = dm_host_message\n self.dm_message = dm_message\n self.gw_header = gw_header\n self.gw_end_header = gw_end_header\n\n def __repr__(self):\n return f\"<GuildConfig guild={self.guild!r}>\"\n\n @staticmethod\n async def _create_config(guild_id: int, pool: asyncpg.Pool) -> asyncpg.Record:\n return await pool.fetchrow(\n \"INSERT INTO configs (guild) VALUES ($1) RETURNING *\",\n guild_id,\n )\n\n @classmethod\n def _from_data(\n cls,\n guild: discord.Guild,\n data: asyncpg.Record,\n channel_data: List[asyncpg.Record],\n ) -> \"GuildConfig\":\n data = dict(data)\n data[\"color\"] = discord.Colour(data[\"color\"])\n\n data[\"logging\"] = guild.get_channel(data[\"logging\"])\n data[\"ping\"] = guild.get_role(data[\"ping\"])\n data[\"required_roles\"] = [\n guild.get_role(role) for role in data[\"required_roles\"] if role is not None\n ]\n data[\"blacklisted_roles\"] = [\n guild.get_role(role)\n for role in data[\"blacklisted_roles\"]\n if role is not None\n ]\n data[\"bypass_roles\"] = [\n guild.get_role(role) for role in data[\"bypass_roles\"] if role is None\n ]\n data[\"multiplier_roles\"] = {\n guild.get_role(role): multiplier\n for role, multiplier in data[\"multiplier_roles\"].items()\n if role is not None and multiplier > 1\n }\n data[\"managers\"] = [\n guild.get_role(role) for role in data[\"managers\"] if role is not None\n ]\n\n data[\"button_style\"] = discord.utils.get(\n discord.ButtonStyle, value=data[\"button_style\"]\n )\n\n data[\"channel_settings\"] = [\n channel_setting\n for record in channel_data\n if (channel_setting := ChannelConfig.from_data(guild, record))\n ]\n\n data.pop(\"guild\") # We do not need this.\n\n return cls(guild, **data)\n\n def to_dict(self) -> GuildConfigData:\n \"\"\"Converts this GuildConfig object into a dict.\"\"\"\n\n data = GuildConfigData(\n guild=self.guild.id,\n reaction=self.reaction,\n participants_reaction=self.participants_reaction,\n required_roles=[\n role.id for role in self.required_roles if role is not None\n ],\n blacklisted_roles=[\n role.id for role in self.blacklisted_roles if role is not None\n ],\n bypass_roles=[role.id for role in self.bypass_roles if role is not None],\n multiplier_roles={\n role.id: multiplier_roles\n for role, multiplier_roles in self.multiplier_roles.items()\n if role is not None\n },\n managers=[role.id for role in self.managers if role is not None],\n dm_winner=self.dm_winner,\n dm_host=self.dm_host,\n color=int(self.color),\n button_style=self.button_style.value,\n end_message=self.end_message,\n reroll_message=self.reroll_message,\n dm_message=self.dm_message,\n dm_host_message=self.dm_host_message,\n gw_header=self.gw_header,\n gw_end_header=self.gw_end_header,\n ) # type: ignore\n if self.logging:\n data[\"logging\"] = self.logging.id\n if self.ping:\n data[\"ping\"] = self.ping.id\n return data\n\n @classmethod\n async def fetch(cls, guild: discord.Guild, pool: asyncpg.Pool) -> \"GuildConfig\":\n \"\"\"Create a GuildConfig instance from data retrieved from a database.\n\n Parameters\n ----------\n guild: discord.Guild\n The discord guild.\n pool: asyncpg.Pool\n The database connection pool.\n\n Returns\n -------\n GuildConfig\n An instance of GuildConfig populated with the retrieved data.\n \"\"\"\n\n data = await pool.fetchrow(\"SELECT * FROM configs WHERE guild = $1\", guild.id)\n channel_data: List[asyncpg.Record] = await pool.fetch(\n \"SELECT * FROM channel_configs WHERE guild = $1\", guild.id\n )\n\n if not data:\n data: asyncpg.Record = await cls._create_config(guild.id, pool)\n\n return cls._from_data(guild, data, channel_data)\n\n async def update(\n self, column: str, value: Any, pool: asyncpg.Pool\n ) -> \"GuildConfig\":\n \"\"\"Update the specified column with the provided value in the database.\n\n Parameters\n ----------\n column: str\n The column to be updated.\n value: Any\n The new value for the column.\n pool: asyncpg.Pool\n The database connection pool.\n\n Raises\n ------\n ValueError\n If the provided column is not a valid column name in `self.__slots__`.\n\n Returns\n -------\n GuildConfig\n The updated `GuildConfig` instance.\n \"\"\"\n if column not in self.__slots__:\n raise ValueError(f\"Invalid column: {column}\")\n\n setattr(self, column, value)\n\n data = self.to_dict()\n\n columns = \", \".join(data.keys())\n placeholders = \", \".join([f\"${i+1}\" for i in range(len(data))])\n update_clause = \", \".join(\n [f\"{key} = EXCLUDED.{key}\" for key in data.keys() if key != \"guild\"]\n )\n\n query = f\"\"\"\n INSERT INTO configs ({columns}) \n VALUES ({placeholders})\n ON CONFLICT (guild) DO \n UPDATE SET {update_clause}\n \"\"\"\n\n values = list(data.values())\n await pool.execute(query, *values)\n return self\n\n @overload\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = True,\n pool: Optional[asyncpg.Pool] = None,\n ) -> ChannelConfig:\n ...\n\n @overload\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = False,\n pool: Optional[asyncpg.Pool] = None,\n ) -> Optional[ChannelConfig]:\n ...\n\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = True,\n pool: Optional[asyncpg.Pool] = None,\n ) -> Optional[ChannelConfig]:\n \"\"\"\n Retrieves the configuration for a specific channel.\n\n Parameters\n ----------\n channel: Union[discord.TextChannel, discord.CategoryChannel]\n The channel for which to retrieve the configuration.\n create_if_not_exists: Optional[bool]\n Whether to create a new configuration if it doesn't exist. Default is True.\n pool: Optional[asyncpg.Pool]\n The connection pool for interacting with the database.\n\n Returns\n -------\n Optional[ChannelConfig]\n The ChannelConfig object if it exists, or None if it doesn't exist and create_if_not_exists is set to False.\n\n Raises\n ------\n MaxChannelConfigCreationError\n If create_if_not_exists is True and the maximum number of channel configurations has already been reached.\n \"\"\"\n\n config = discord.utils.get(self.channel_settings, channel=channel)\n if config is not None:\n return config\n\n if create_if_not_exists:\n if len(self.channel_settings) >= 25:\n raise MaxChannelConfigCreationError()\n else:\n if pool:\n config = await ChannelConfig.create(channel.guild, channel, pool)\n self.channel_settings.append(config)\n return config\n\n return None"
},
{
"identifier": "GIFT_EMOJI",
"path": "utils/constants.py",
"snippet": "GIFT_EMOJI = \"<:GiftifyGift:1119664021914796125> \""
},
{
"identifier": "GiveawayError",
"path": "utils/exceptions.py",
"snippet": "class GiveawayError(Exception):\r\n \"\"\"Error raised in a giveaway.\"\"\"\r"
},
{
"identifier": "bold",
"path": "utils/functions.py",
"snippet": "def bold(message: str) -> str:\n return f\"**{message}**\""
},
{
"identifier": "safe_format",
"path": "utils/functions.py",
"snippet": "def safe_format(message: str, **kwargs) -> str:\n \"\"\"A poorly written format function.\"\"\"\n for key, value in kwargs.items():\n formatted_key = \"{\" + key + \"}\"\n message = message.replace(formatted_key, str(value))\n return message"
},
{
"identifier": "Interaction",
"path": "utils/tree.py",
"snippet": "class CommandTree(app_commands.CommandTree):\r\n async def on_error(\r\n self,\r\n interaction: Interaction,\r\n error: app_commands.AppCommandError,\r\n ) -> None:\r"
},
{
"identifier": "BaseView",
"path": "utils/view.py",
"snippet": "class BaseView(discord.ui.View):\r\n children: List[Union[discord.ui.Button, discord.ui.Select]]\r\n message: Optional[Union[discord.Message, discord.InteractionMessage]] = None\r\n author: Optional[Union[discord.Member, discord.User]] = None\r\n\r\n async def on_error(\r\n self, interaction: Interaction, error: Exception, item: discord.ui.Item\r\n ) -> None:\r\n if isinstance(error, GiveawayError):\r\n embed = discord.Embed(\r\n title=\"An error was raised while executing this command!\",\r\n description=f\"{WARN_EMOJI} {str(error)}\",\r\n color=discord.Colour.red(),\r\n )\r\n view = discord.ui.View()\r\n button = discord.ui.Button(\r\n label=\"Support\", url=\"https://discord.gg/GQSGChbEKz\"\r\n )\r\n view.add_item(button)\r\n\r\n await interaction.followup.send(embed=embed, view=view, ephemeral=True)\r\n elif isinstance(error, ButtonOnCooldown):\r\n embed = discord.Embed(\r\n title=\"Stop clicking the button too fast!\",\r\n description=f\"{WARN_EMOJI} You are clicking the button too fast. Please retry after {error.retry_after: .2f}s.\",\r\n color=discord.Colour.red(),\r\n )\r\n view = discord.ui.View()\r\n button = discord.ui.Button(\r\n label=\"Support\", url=\"https://discord.gg/GQSGChbEKz\"\r\n )\r\n view.add_item(button)\r\n\r\n await interaction.followup.send(embed=embed, view=view, ephemeral=True)\r\n else:\r\n if not isinstance(\r\n error, (discord.HTTPException, discord.errors.InteractionResponded)\r\n ):\r\n if not interaction.response.is_done():\r\n await interaction.response.defer(thinking=True, ephemeral=True)\r\n\r\n embed = discord.Embed(\r\n title=\"An error was raised while executing this command!\",\r\n description=f\"{WARN_EMOJI} An unknown error occurred, my developers have been notified about this error.\",\r\n color=discord.Colour.red(),\r\n )\r\n view = discord.ui.View()\r\n button = discord.ui.Button(\r\n label=\"Support\", url=\"https://discord.gg/GQSGChbEKz\"\r\n )\r\n view.add_item(button)\r\n\r\n await interaction.followup.send(embed=embed, view=view, ephemeral=True)\r\n sentry_sdk.capture_exception(error)\r\n return interaction.client.log_handler.log.exception(\r\n \"Exception occurred in the View:\\n\", exc_info=error\r\n )\r\n\r\n async def on_timeout(self) -> None:\r\n for item in self.children:\r\n if isinstance(item, (discord.ui.Button, discord.ui.Select)):\r\n item.disabled = True\r\n\r\n if self.message is not None:\r\n try:\r\n await self.message.edit(view=self)\r\n except Exception:\r\n pass\r"
},
{
"identifier": "GiveawayView",
"path": "utils/view.py",
"snippet": "class GiveawayView(BaseView):\r\n def __init__(\r\n self,\r\n reaction: str = GIVEAWAY_EMOJI,\r\n participants_reaction: str = PARTICIPANTS_EMOJI,\r\n button_style: discord.ButtonStyle = discord.ButtonStyle.blurple,\r\n *,\r\n participant_count: Optional[int] = None,\r\n disabled: bool = False,\r\n ):\r\n super().__init__(timeout=None)\r\n\r\n self.add_item(\r\n GiveawayButton(\r\n reaction,\r\n button_style,\r\n participant_count=participant_count,\r\n disabled=disabled,\r\n )\r\n )\r\n self.add_item(ParticipantsButton(reaction=participants_reaction))\r\n\r\n def key(interaction: Interaction):\r\n return interaction.user.id\r\n\r\n self.cooldown = commands.CooldownMapping.from_cooldown(3, 5, key)\r\n\r\n async def interaction_check(self, interaction: Interaction):\r\n if retry_after := self.cooldown.update_rate_limit(interaction):\r\n raise ButtonOnCooldown(retry_after)\r\n\r\n return await super().interaction_check(interaction)\r"
}
] | import contextlib
import datetime
import random
import asyncpg
import discord
from enum import Enum
from typing import TYPE_CHECKING, Dict, List, Optional
from models.giveaway_settings import ChannelConfig, GuildConfig
from utils.constants import GIFT_EMOJI
from utils.exceptions import GiveawayError
from utils.functions import bold, safe_format
from utils.tree import Interaction
from utils.view import BaseView, GiveawayView
from bot import Giftify | 7,253 | self.bypass_roles: List[int] = record["bypass_roles"] or []
self.multiplier_roles: Dict[int, int] = {
int(role): entries
for role, entries in record["multiplier_roles"].items()
if entries > 1
}
self.messages: Dict[int, int] = {
int(member): messages for member, messages in record["messages"].items()
}
self.messages_required: Optional[int] = record["messages_required"]
self.allowed_message_channels: Optional[List[int]] = record["messages_channel"]
self.amari: Optional[int] = record["amari"]
self.weekly_amari: Optional[int] = record["weekly_amari"]
def __eq__(self, other: "Giveaway") -> bool:
try:
return (
self.guild_id == other.guild_id
and self.channel_id == other.channel_id
and self.message_id == other.message_id
)
except AttributeError:
return False
def __hash__(self) -> int:
return hash((self.guild_id, self.channel_id, self.message_id))
def __repr__(self) -> str:
return f"<Giveaway guild_id={self.guild_id} channel_id={self.channel_id} message_id={self.message_id}>"
@property
def jump_to_giveaway(self) -> discord.ui.View:
url = f"https://discord.com/channels/{self.guild_id}/{self.channel_id}/{self.message_id}"
view = BaseView(timeout=None)
button = discord.ui.Button(label="Jump To Giveaway", url=url)
view.add_item(button)
return view
@staticmethod
def create_embed(
interaction: Interaction,
config: GuildConfig,
duration: datetime.datetime,
winners: int,
prize: str,
required_roles: Optional[List[discord.Role]] = None,
blacklisted_roles: Optional[List[discord.Role]] = None,
bypass_roles: Optional[List[discord.Role]] = None,
multiplier_roles: Optional[Dict[discord.Role, int]] = None,
messages_required: Optional[int] = None,
allowed_message_channels: Optional[List[discord.TextChannel]] = None,
amari: Optional[int] = None,
weekly_amari: Optional[int] = None,
donor: Optional[discord.Member] = None,
) -> discord.Embed:
assert interaction.guild is not None
description = f"Click the {config.reaction} button to join the giveaway!\n"
description += f"Hosted By: {interaction.user.mention}\n"
if donor:
description += f"Donor: {donor.mention}\n"
description += f"Ends: {discord.utils.format_dt(duration, style='R')} ({discord.utils.format_dt(duration, style='f')})\n"
embed = discord.Embed(
title=prize,
description=description,
colour=config.color,
timestamp=duration,
)
embed.set_footer(
text=f"{winners} winner(s) • Ends",
icon_url=interaction.guild.icon or interaction.client.user.display_avatar,
)
requirements = ""
if required_roles:
requirements += f"Required Roles: {', '.join(role.mention for role in required_roles if role is not None)}\n"
if bypass_roles:
requirements += f"Bypass Roles: {', '.join(role.mention for role in bypass_roles if role is not None)}\n"
if blacklisted_roles:
requirements += f"Blacklisted Roles: {', '.join(role.mention for role in blacklisted_roles if role is not None)}\n"
if messages_required:
requirements += (
f"Messages Required: **{messages_required}** message(s) (5s cooldown)\n"
)
if allowed_message_channels:
requirements += f"Allowed Channels: {', '.join(f'<#{c.id}>' for c in allowed_message_channels)}\n"
if amari:
requirements += f"Amari Level: {amari}\n"
if weekly_amari:
requirements += f"Weekly Amari: {weekly_amari} XP Points\n"
if requirements:
embed.add_field(name="Requirements", value=requirements, inline=False)
if multiplier_roles:
multiplier_roles_mention = "\n".join(
[
f"- {entry}x ・ {role.mention}"
for role, entry in multiplier_roles.items()
if role is not None
]
)
embed.add_field(
name="Bonus Entries", value=multiplier_roles_mention, inline=False
)
return embed
@classmethod
async def start(
cls,
interaction: Interaction,
duration: datetime.datetime,
winners: int,
prize: str,
config: GuildConfig,
| from __future__ import annotations
if TYPE_CHECKING:
class Giveaway:
"""
Represents a giveaway object.
Attributes
----------
bot: Giftify
The bot instance to handle the giveaway.
guild_id: int
The ID of the guild (server) where the giveaway is hosted.
channel_id: int
The ID of the channel where the giveaway is hosted.
message_id: int
The ID of the giveaway message.
extra_message_id: int
The ID of the extra message with giveaway.
host_id: int
The ID of the user hosting the giveaway.
donor_id: int
The ID of the user donating for the giveaway.
prize: int
The prize of the giveaway.
winner_count: int
The number of winners for the giveaway.
winners: List[int]
The winners of the giveaway.
participants: List[int]
The IDs participants for the giveaway.
ended: bool
Indicates whether the giveaway has ended.
ends: datetime.datetime
The timestamp when the giveaway will be ended.
required_roles: List[int]
The list of role IDs required to participate in the giveaway.
blacklisted_roles: List[int]
The list of role IDs excluded from participating in the giveaway.
bypass_roles: List[int]
The list of user IDs exempted from giveaway restrictions.
multiplier_roles: Optional[dict]
A dictionary containing multiplier_roles criteria for the giveaway.
messages: Optional[dict]
A dictionary containing message-based criteria for the giveaway.
messages_required: Optional[int]
The number of messages required to participate in the giveaway.
allowed_message_channels: Optional[List[int]]
The ID of the channels where the message count is tracked.
amari: Optional[int]
The required Amari XP to participate in the giveaway.
weekly_amari: Optional[int]
The required weekly Amari XP to participate in the giveaway.
"""
__slots__ = (
"bot",
"guild_id",
"channel_id",
"message_id",
"extra_message_id",
"prize",
"host_id",
"donor_id",
"winner_count",
"winners",
"participants",
"ended",
"ends",
"required_roles",
"blacklisted_roles",
"bypass_roles",
"multiplier_roles",
"messages",
"messages_required",
"allowed_message_channels",
"amari",
"weekly_amari",
)
def __init__(self, *, bot: Giftify, record: asyncpg.Record):
self.bot = bot
self.guild_id: int = record["guild"]
self.channel_id: int = record["channel"]
self.message_id: int = record["message"]
self.extra_message_id: int = record["extra_message"]
self.prize: str = record["prize"]
self.host_id: int = record["host"]
self.donor_id: Optional[int] = record["donor"]
self.winner_count: int = record["winner_count"]
self.winners: List[int] = record["winners"]
self.participants: List[int] = record["participants"]
self.ended: bool = record["ended"]
self.ends: datetime.datetime = record["ends"]
self.required_roles: List[int] = record["required_roles"] or []
self.blacklisted_roles: List[int] = record["blacklisted_roles"] or []
self.bypass_roles: List[int] = record["bypass_roles"] or []
self.multiplier_roles: Dict[int, int] = {
int(role): entries
for role, entries in record["multiplier_roles"].items()
if entries > 1
}
self.messages: Dict[int, int] = {
int(member): messages for member, messages in record["messages"].items()
}
self.messages_required: Optional[int] = record["messages_required"]
self.allowed_message_channels: Optional[List[int]] = record["messages_channel"]
self.amari: Optional[int] = record["amari"]
self.weekly_amari: Optional[int] = record["weekly_amari"]
def __eq__(self, other: "Giveaway") -> bool:
try:
return (
self.guild_id == other.guild_id
and self.channel_id == other.channel_id
and self.message_id == other.message_id
)
except AttributeError:
return False
def __hash__(self) -> int:
return hash((self.guild_id, self.channel_id, self.message_id))
def __repr__(self) -> str:
return f"<Giveaway guild_id={self.guild_id} channel_id={self.channel_id} message_id={self.message_id}>"
@property
def jump_to_giveaway(self) -> discord.ui.View:
url = f"https://discord.com/channels/{self.guild_id}/{self.channel_id}/{self.message_id}"
view = BaseView(timeout=None)
button = discord.ui.Button(label="Jump To Giveaway", url=url)
view.add_item(button)
return view
@staticmethod
def create_embed(
interaction: Interaction,
config: GuildConfig,
duration: datetime.datetime,
winners: int,
prize: str,
required_roles: Optional[List[discord.Role]] = None,
blacklisted_roles: Optional[List[discord.Role]] = None,
bypass_roles: Optional[List[discord.Role]] = None,
multiplier_roles: Optional[Dict[discord.Role, int]] = None,
messages_required: Optional[int] = None,
allowed_message_channels: Optional[List[discord.TextChannel]] = None,
amari: Optional[int] = None,
weekly_amari: Optional[int] = None,
donor: Optional[discord.Member] = None,
) -> discord.Embed:
assert interaction.guild is not None
description = f"Click the {config.reaction} button to join the giveaway!\n"
description += f"Hosted By: {interaction.user.mention}\n"
if donor:
description += f"Donor: {donor.mention}\n"
description += f"Ends: {discord.utils.format_dt(duration, style='R')} ({discord.utils.format_dt(duration, style='f')})\n"
embed = discord.Embed(
title=prize,
description=description,
colour=config.color,
timestamp=duration,
)
embed.set_footer(
text=f"{winners} winner(s) • Ends",
icon_url=interaction.guild.icon or interaction.client.user.display_avatar,
)
requirements = ""
if required_roles:
requirements += f"Required Roles: {', '.join(role.mention for role in required_roles if role is not None)}\n"
if bypass_roles:
requirements += f"Bypass Roles: {', '.join(role.mention for role in bypass_roles if role is not None)}\n"
if blacklisted_roles:
requirements += f"Blacklisted Roles: {', '.join(role.mention for role in blacklisted_roles if role is not None)}\n"
if messages_required:
requirements += (
f"Messages Required: **{messages_required}** message(s) (5s cooldown)\n"
)
if allowed_message_channels:
requirements += f"Allowed Channels: {', '.join(f'<#{c.id}>' for c in allowed_message_channels)}\n"
if amari:
requirements += f"Amari Level: {amari}\n"
if weekly_amari:
requirements += f"Weekly Amari: {weekly_amari} XP Points\n"
if requirements:
embed.add_field(name="Requirements", value=requirements, inline=False)
if multiplier_roles:
multiplier_roles_mention = "\n".join(
[
f"- {entry}x ・ {role.mention}"
for role, entry in multiplier_roles.items()
if role is not None
]
)
embed.add_field(
name="Bonus Entries", value=multiplier_roles_mention, inline=False
)
return embed
@classmethod
async def start(
cls,
interaction: Interaction,
duration: datetime.datetime,
winners: int,
prize: str,
config: GuildConfig, | channel_config: Optional[ChannelConfig], | 0 | 2023-11-09 15:00:15+00:00 | 12k |
Zjy0401/CoCoFormer | model/CoCoFormer.py | [
{
"identifier": "get_device",
"path": "utilities/device.py",
"snippet": "def get_device():\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE"
},
{
"identifier": "PositionalEncoding",
"path": "model/positional_encoding.py",
"snippet": "class PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)"
},
{
"identifier": "TransformerEncoderRPR",
"path": "model/rpr.py",
"snippet": "class TransformerEncoderRPR(Module):\n\n def __init__(self, encoder_layer, num_layers, encoder_past, max_seq, c_max_seq, b_max_seq, norm=None):\n super(TransformerEncoderRPR, self).__init__()\n self.past_layers = _get_clones(encoder_past, 1)\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n self.max_seq = max_seq\n self.c_max_seq = c_max_seq\n self.b_max_seq = b_max_seq\n\n def forward(self, src, mask=None, src_key_padding_mask=None):\n\n args = parse_train_args()\n\n def generate_square_subsequent_mask(sz: int) -> Tensor:\n r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n Unmasked positions are filled with float(0.0).\n \"\"\"\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n x_n = src[:mask.shape[0], :, :]\n x_c = src[mask.shape[0]:mask.shape[0]+(src.shape[0] // 10 + 1), :, :]\n x_b = src[mask.shape[0]+(src.shape[0] // 10 + 1):, :, :]\n\n if args.gpu[0] != -1:\n mask_c = generate_square_subsequent_mask(x_c.shape[0]).cuda(device=args.gpu[0])\n mask_b = generate_square_subsequent_mask(x_b.shape[0]).cuda(device=args.gpu[0])\n mask_zero_c = torch.zeros(x_n.shape[0], x_c.shape[0]).cuda(device=args.gpu[0])\n mask_zero_b = torch.zeros(x_n.shape[0], x_b.shape[0]).cuda(device=args.gpu[0])\n else:\n mask_c = generate_square_subsequent_mask(x_c.shape[0]).cpu()\n mask_b = generate_square_subsequent_mask(x_b.shape[0]).cpu()\n mask_zero_c = torch.zeros(x_n.shape[0], x_c.shape[0]).cpu()\n mask_zero_b = torch.zeros(x_n.shape[0], x_b.shape[0]).cpu()\n\n mask_past_layer = torch.cat((mask, mask_zero_c, mask_zero_b), dim=1)\n\n # past layer of transformer\n output = self.past_layers[0](x_n, x_c, x_b, src_past_c_mask=mask_c, src_past_c_key_padding_mask=src_key_padding_mask,\n src_past_b_mask=mask_b, src_past_b_key_padding_mask=src_key_padding_mask,\n src_mask=mask_past_layer, src_key_padding_mask=src_key_padding_mask)\n\n # origin Transformer\n for i in range(1, self.num_layers):\n output = self.layers[i](output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)\n\n if self.norm:\n output = self.norm(output)\n\n return output"
},
{
"identifier": "TransformerEncoderLayerRPR",
"path": "model/rpr.py",
"snippet": "class TransformerEncoderLayerRPR(Module):\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, er_len=None):\n super(TransformerEncoderLayerRPR, self).__init__()\n self.self_attn = MultiheadAttentionRPR(d_model, nhead, dropout=dropout, er_len=er_len)\n # Implementation of Feedforward model\n self.linear1 = Linear(d_model, dim_feedforward)\n self.dropout = Dropout(dropout)\n self.linear2 = Linear(dim_feedforward, d_model)\n\n self.norm1 = LayerNorm(d_model)\n self.norm2 = LayerNorm(d_model)\n self.dropout1 = Dropout(dropout)\n self.dropout2 = Dropout(dropout)\n\n def forward(self, src: Tensor, src_mask=None, src_key_padding_mask=None):\n src2 = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n src2 = self.linear2(self.dropout(F.relu(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n\n return src"
},
{
"identifier": "TransformerEncoderLayerRPR_",
"path": "model/rpr.py",
"snippet": "class TransformerEncoderLayerRPR_(Module):\n \"\"\"\n ----------\n The model of condition input\n ----------\n \"\"\"\n\n def __init__(self, cpast_layer_dmodel, cpast_layer_nhead, cpast_dim_forward,\n bpast_layer_dmodel, bpast_layer_nhead, bpast_dim_forward,\n d_model, nhead, dim_feedforward=2048, dropout=0.1, er_len=None):\n super(TransformerEncoderLayerRPR_, self).__init__()\n\n # past_layer of chord\n self.C_self_attn1 = MultiheadAttentionRPR(cpast_layer_dmodel, cpast_layer_nhead, dropout=dropout, er_len=er_len)\n # Implementation of Feedforward model\n self.C_linear1 = Linear(cpast_layer_dmodel, cpast_dim_forward)\n self.C_dropout = Dropout(dropout)\n self.C_linear2 = Linear(cpast_dim_forward, cpast_layer_dmodel)\n\n self.C_norm1 = LayerNorm(cpast_layer_dmodel)\n self.C_norm2 = LayerNorm(cpast_layer_dmodel)\n self.C_dropout1 = Dropout(dropout)\n self.C_dropout2 = Dropout(dropout)\n\n # Implementation of linear for kc and vc\n self.C_norm3 = LayerNorm(cpast_layer_dmodel)\n self.C_norm4 = LayerNorm(cpast_layer_dmodel)\n self.C_dropout3 = Dropout(dropout)\n self.C_dropout4 = Dropout(dropout)\n self.C_linear3 = Linear(cpast_layer_dmodel, cpast_layer_dmodel)\n self.C_linear4 = Linear(cpast_layer_dmodel, cpast_layer_dmodel)\n\n # past_layer of beat\n self.B_self_attn1 = MultiheadAttentionRPR(bpast_layer_dmodel, bpast_layer_nhead, dropout=dropout, er_len=er_len)\n # Implementation of Feedforward model\n self.B_linear1 = Linear(bpast_layer_dmodel, bpast_dim_forward)\n self.B_dropout = Dropout(dropout)\n self.B_linear2 = Linear(bpast_dim_forward, bpast_layer_dmodel)\n\n self.B_norm1 = LayerNorm(bpast_layer_dmodel)\n self.B_norm2 = LayerNorm(bpast_layer_dmodel)\n self.B_dropout1 = Dropout(dropout)\n self.B_dropout2 = Dropout(dropout)\n\n # Implementation of linear for kc and vc\n self.B_norm3 = LayerNorm(bpast_layer_dmodel)\n self.B_norm4 = LayerNorm(bpast_layer_dmodel)\n self.B_dropout3 = Dropout(dropout)\n self.B_dropout4 = Dropout(dropout)\n self.B_linear3 = Linear(bpast_layer_dmodel, bpast_layer_dmodel)\n self.B_linear4 = Linear(bpast_layer_dmodel, bpast_layer_dmodel)\n\n # normal encoder\n self.self_attn2 = MultiheadAttentionRPR(d_model, nhead, dropout=dropout)\n self.linear5 = Linear(d_model, dim_feedforward)\n self.dropout5 = Dropout(dropout)\n self.linear6 = Linear(dim_feedforward, d_model)\n\n self.norm5 = LayerNorm(d_model)\n self.norm6 = LayerNorm(d_model)\n self.dropout6 = Dropout(dropout)\n self.dropout7 = Dropout(dropout)\n\n def forward(self, src_n_past, src_c_past, src_b_past, src_past_c_mask=None, src_past_c_key_padding_mask=None,\n src_past_b_mask=None, src_past_b_key_padding_mask=None,\n src_mask=None, src_key_padding_mask=None):\n # past layer of chord:\n # calculate k_c,v_c first:\n src_C = src_c_past\n src_C_past2 = self.C_self_attn1(src_C, src_C, src_C, attn_mask=src_past_c_mask,\n key_padding_mask=src_past_c_key_padding_mask)[0]\n src_C = src_C + self.C_dropout1(src_C_past2)\n src_C = self.C_norm1(src_C)\n src_C_past2 = self.C_linear2(self.C_dropout(F.relu(self.C_linear1(src_C))))\n src_C = src_C + self.C_dropout2(src_C_past2)\n src_C = self.C_norm2(src_C)\n\n kc = self.C_norm3(self.C_dropout3(self.C_linear3(src_C)))\n vc = self.C_norm4(self.C_dropout4(self.C_linear4(src_C)))\n\n # calculate Beat_k and Beat_v:\n src_b = src_b_past\n src_b_past2 = self.B_self_attn1(src_b, src_b, src_b, attn_mask=src_past_b_mask,\n key_padding_mask=src_past_b_key_padding_mask)[0]\n src_b = src_b + self.B_dropout1(src_b_past2)\n src_b = self.B_norm1(src_b)\n src_b_past2 = self.B_linear2(self.B_dropout(F.relu(self.B_linear1(src_b))))\n src_b = src_b + self.B_dropout2(src_b_past2)\n src_b = self.C_norm2(src_b)\n\n kb = self.B_norm3(self.B_dropout3(self.B_linear3(src_b)))\n vb = self.B_norm4(self.B_dropout4(self.B_linear4(src_b)))\n\n # # layer0:\n k = torch.cat((src_n_past, kc, kb), dim=0)\n v = torch.cat((src_n_past, vc, vb), dim=0)\n src2 = self.self_attn2(src_n_past, k, v, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]\n # src2 = self.self_attn2(src, src, src, attn_mask=src_mask[:, :2048], key_padding_mask=src_key_padding_mask)[0]\n src = src_n_past + self.dropout5(src2)\n src = self.norm5(src)\n src2 = self.linear6(self.dropout6(F.relu(self.linear5(src2))))\n src = src + self.dropout7(src2)\n src = self.norm6(src)\n return src"
},
{
"identifier": "TransformerEncoderPastLayer",
"path": "model/rpr.py",
"snippet": "class TransformerEncoderPastLayer(Module):\n\n def __init__(self, cpast_layer_dmodel, cpast_layer_nhead, cpast_dim_forward,\n bpast_layer_dmodel, bpast_layer_nhead, bpast_dim_forward,\n d_model, nhead, dim_feedforward=2048, dropout=0.1):\n super(TransformerEncoderPastLayer, self).__init__()\n\n # past_layer of chord\n self.C_self_attn1 = MultiheadAttention(cpast_layer_dmodel, cpast_layer_nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.C_linear1 = Linear(cpast_layer_dmodel, cpast_dim_forward)\n self.C_dropout = Dropout(dropout)\n self.C_linear2 = Linear(cpast_dim_forward, cpast_layer_dmodel)\n\n self.C_norm1 = LayerNorm(cpast_layer_dmodel)\n self.C_norm2 = LayerNorm(cpast_layer_dmodel)\n self.C_dropout1 = Dropout(dropout)\n self.C_dropout2 = Dropout(dropout)\n\n # Implementation of linear for kc and vc\n self.C_norm3 = LayerNorm(cpast_layer_dmodel)\n self.C_norm4 = LayerNorm(cpast_layer_dmodel)\n self.C_dropout3 = Dropout(dropout)\n self.C_dropout4 = Dropout(dropout)\n self.C_linear3 = Linear(cpast_layer_dmodel, cpast_layer_dmodel)\n self.C_linear4 = Linear(cpast_layer_dmodel, cpast_layer_dmodel)\n\n # past_layer of beat\n self.B_self_attn1 = MultiheadAttention(bpast_layer_dmodel, bpast_layer_nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.B_linear1 = Linear(bpast_layer_dmodel, bpast_dim_forward)\n self.B_dropout = Dropout(dropout)\n self.B_linear2 = Linear(bpast_dim_forward, bpast_layer_dmodel)\n\n self.B_norm1 = LayerNorm(bpast_layer_dmodel)\n self.B_norm2 = LayerNorm(bpast_layer_dmodel)\n self.B_dropout1 = Dropout(dropout)\n self.B_dropout2 = Dropout(dropout)\n\n # Implementation of linear for kc and vc\n self.B_norm3 = LayerNorm(bpast_layer_dmodel)\n self.B_norm4 = LayerNorm(bpast_layer_dmodel)\n self.B_dropout3 = Dropout(dropout)\n self.B_dropout4 = Dropout(dropout)\n self.B_linear3 = Linear(bpast_layer_dmodel, bpast_layer_dmodel)\n self.B_linear4 = Linear(bpast_layer_dmodel, bpast_layer_dmodel)\n\n # normal encoder\n self.self_attn2 = MultiheadAttention(d_model, nhead, dropout=dropout)\n self.linear5 = Linear(d_model, dim_feedforward)\n self.dropout5 = Dropout(dropout)\n self.linear6 = Linear(dim_feedforward, d_model)\n\n self.norm5 = LayerNorm(d_model)\n self.norm6 = LayerNorm(d_model)\n self.dropout6 = Dropout(dropout)\n self.dropout7 = Dropout(dropout)\n\n def forward(self, src_n_past, src_c_past, src_b_past, src_past_c_mask=None, src_past_c_key_padding_mask=None,\n src_past_b_mask=None, src_past_b_key_padding_mask=None,\n src_mask=None, src_key_padding_mask=None):\n # past layer of chord:\n # calculate k_c,v_c first:\n src_C = src_c_past\n src_C_past2 = self.C_self_attn1(src_C, src_C, src_C, attn_mask=src_past_c_mask,\n key_padding_mask=src_past_c_key_padding_mask)[0]\n src_C = src_C + self.C_dropout1(src_C_past2)\n src_C = self.C_norm1(src_C)\n src_C_past2 = self.C_linear2(self.C_dropout(F.relu(self.C_linear1(src_C))))\n src_C = src_C + self.C_dropout2(src_C_past2)\n src_C = self.C_norm2(src_C)\n\n kc = self.C_norm3(self.C_dropout3(self.C_linear3(src_C)))\n vc = self.C_norm4(self.C_dropout4(self.C_linear4(src_C)))\n\n # calculate Beat_k and Beat_v:\n src_b = src_b_past\n src_b_past2 = self.B_self_attn1(src_b, src_b, src_b, attn_mask=src_past_b_mask,\n key_padding_mask=src_past_b_key_padding_mask)[0]\n src_b = src_b + self.B_dropout1(src_b_past2)\n src_b = self.B_norm1(src_b)\n src_b_past2 = self.B_linear2(self.B_dropout(F.relu(self.B_linear1(src_b))))\n src_b = src_b + self.B_dropout2(src_b_past2)\n src_b = self.C_norm2(src_b)\n\n kb = self.B_norm3(self.B_dropout3(self.B_linear3(src_b)))\n vb = self.B_norm4(self.B_dropout4(self.B_linear4(src_b)))\n\n # # layer0:\n k = torch.cat((src_n_past, kc, kb), dim=0)\n v = torch.cat((src_n_past, vc, vb), dim=0)\n src2 = self.self_attn2(src_n_past, k, v, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]\n # src2 = self.self_attn2(src, src, src, attn_mask=src_mask[:, :2048], key_padding_mask=src_key_padding_mask)[0]\n src = src_n_past + self.dropout5(src2)\n src = self.norm5(src)\n src2 = self.linear6(self.dropout6(F.relu(self.linear5(src2))))\n src = src + self.dropout7(src2)\n src = self.norm6(src)\n return src"
},
{
"identifier": "TransformerEncoderLayer",
"path": "model/rpr.py",
"snippet": "class TransformerEncoderLayer(Module):\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):\n super(TransformerEncoderLayer, self).__init__()\n self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = Linear(d_model, dim_feedforward)\n self.dropout = Dropout(dropout)\n self.linear2 = Linear(dim_feedforward, d_model)\n\n self.norm1 = LayerNorm(d_model)\n self.norm2 = LayerNorm(d_model)\n self.dropout1 = Dropout(dropout)\n self.dropout2 = Dropout(dropout)\n\n def forward(self, src: Tensor, src_mask=None, src_key_padding_mask=None):\n # if src.size()[0] == src_mask.size()[0]:\n # src2 = self.self_attn(src, src, src, attn_mask=src_mask,\n # key_padding_mask=src_key_padding_mask)[0]\n # else:\n # src2 = src[:2048,:,:]\n # key = src[:2304,:,:]\n # value = torch.cat((src2, src[2304:2560,:,:]), dim=0)\n # src = src2\n # src2 = self.self_attn_2(src2,key,value,attn_mask=src_mask,\n # key_padding_mask=src_key_padding_mask)[0]\n\n src2 = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n src2 = self.linear2(self.dropout(F.relu(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n\n return src"
},
{
"identifier": "TransformerEncoder",
"path": "model/rpr.py",
"snippet": "class TransformerEncoder(Module):\n\n\n def __init__(self, encoder_layer, num_layers, encoder_past, max_seq, c_max_seq, b_max_seq, norm=None):\n super(TransformerEncoder, self).__init__()\n self.past_layers = _get_clones(encoder_past, 1)\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n self.max_seq = max_seq\n self.c_max_seq = c_max_seq\n self.b_max_seq = b_max_seq\n\n def forward(self, src, mask=None, src_key_padding_mask=None):\n\n args = parse_train_args()\n\n def generate_square_subsequent_mask(sz: int) -> Tensor:\n r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n Unmasked positions are filled with float(0.0).\n \"\"\"\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n\n x_n = src[:mask.shape[0], :, :]\n x_c = src[mask.shape[0]:mask.shape[0]+(src.shape[0] // 10 + 1), :, :]\n x_b = src[mask.shape[0]+(src.shape[0] // 10 + 1):, :, :]\n\n\n if args.gpu[0] != -1:\n mask_c = generate_square_subsequent_mask(x_c.shape[0]).cuda(device=args.gpu[0])\n mask_b = generate_square_subsequent_mask(x_b.shape[0]).cuda(device=args.gpu[0])\n mask_zero_c = torch.zeros(x_n.shape[0], x_c.shape[0]).cuda(device=args.gpu[0])\n mask_zero_b = torch.zeros(x_n.shape[0], x_b.shape[0]).cuda(device=args.gpu[0])\n else:\n mask_c = generate_square_subsequent_mask(x_c.shape[0]).cpu()\n mask_b = generate_square_subsequent_mask(x_b.shape[0]).cpu()\n mask_zero_c = torch.zeros(x_n.shape[0], x_c.shape[0]).cpu()\n mask_zero_b = torch.zeros(x_n.shape[0], x_b.shape[0]).cpu()\n\n mask_past_layer = torch.cat((mask, mask_zero_c, mask_zero_b), dim=1)\n\n # past layer of transformer\n output = self.past_layers[0](x_n, x_c, x_b, src_past_c_mask=mask_c, src_past_c_key_padding_mask=src_key_padding_mask,\n src_past_b_mask=mask_b, src_past_b_key_padding_mask=src_key_padding_mask,\n src_mask=mask_past_layer, src_key_padding_mask=src_key_padding_mask)\n\n # origin Transformer\n for i in range(1, self.num_layers):\n output = self.layers[i](output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)\n\n # x2 = src[self.max_seq:, :, :]\n # mask2 = generate_square_subsequent_mask(x2.shape[0]).to(get_device())\n # out_past = self.past_layers(output, src_mask=mask2, src_key_padding_mask=src_key_padding_mask)\n\n if self.norm:\n output = self.norm(output)\n\n return output"
},
{
"identifier": "parse_train_args",
"path": "utilities/argument_funcs.py",
"snippet": "def parse_train_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-input_dir\", type=str, default=\"./dataset/dataset/JSF_SATB\", help=\"Folder of preprocessed and pickled midi files\")\n parser.add_argument(\"-output_dir\", type=str, default=\"./baseline_3loss\", help=\"Folder to save model weights. Saves one every epoch\")\n parser.add_argument(\"-weight_modulus\", type=int, default=1, help=\"How often to save epoch weights (ex: value of 10 means save every 10 epochs)\")\n parser.add_argument(\"-print_modulus\", type=int, default=1, help=\"How often to print train results for a batch (batch loss, learn rate, etc.)\")\n parser.add_argument(\"-word2event\", type=str, default='./dataset/word2event.pkl', help='word table location: *.pkl')\n parser.add_argument(\"-n_workers\", type=int, default=2, help=\"Number of threads for the dataloader\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"--gpu\", default=[2], nargs='+', type=int, help=\"For Multi-GPUs training\")\n parser.add_argument(\"--no_tensorboard\", action=\"store_true\", help=\"Turns off tensorboard result reporting\")\n parser.add_argument('--scheduled_sampling', default=False, help='False means use teacher forcing, True means use scheduled_sampling')\n parser.add_argument(\"--scheduled_sampling_change_ratio\", default=0.5, type=int, help='ratio about mix golden target with output')\n parser.add_argument(\"-continue_weights\", type=str, default=None, help=\"Model weights to continue training based on\")\n parser.add_argument(\"-continue_epoch\", type=int, default=None, help=\"Epoch the continue_weights model was at\")\n\n parser.add_argument(\"-lr\", type=float, default=None, help=\"Constant learn rate. Leave as None for a custom scheduler.\")\n parser.add_argument(\"-ce_smoothing\", type=float, default=None, help=\"Smoothing parameter for smoothed cross entropy loss (defaults to no smoothing)\")\n parser.add_argument(\"-batch_size\", type=int, default=2, help=\"Batch size per gpu to use\")\n parser.add_argument(\"-epochs\", type=int, default=300, help=\"Number of epochs to use\")\n\n parser.add_argument(\"-adv_train\", default=True, help='add discriminator loss')\n parser.add_argument(\"-only_Transformer\", default=False, help='use pure Transformer, default set to false, True only for test')\n parser.add_argument(\"-loss\", default=[0.4, 0.2, 0.8], nargs='+', type=float, help='weights of loss, the last element effect when adv train is True')\n\n parser.add_argument(\"--rpr\", action=\"store_true\", help=\"Use a modified Transformer for Relative Position Representations\")\n parser.add_argument(\"-max_sequence\", type=int, default=2048, help=\"Maximum midi sequence to consider\")\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n\n parser.add_argument(\"-dropout\", type=float, default=0.1, help=\"Dropout rate\")\n\n parser.add_argument(\"--metrics\", default=False, help=\"evaluate TER(token error rate)\")\n\n return parser.parse_args()"
},
{
"identifier": "print_train_args",
"path": "utilities/argument_funcs.py",
"snippet": "def print_train_args(args):\n\n print(SEPERATOR)\n print(\"input_dir:\", args.input_dir)\n print(\"output_dir:\", args.output_dir)\n print(\"weight_modulus:\", args.weight_modulus)\n print(\"print_modulus:\", args.print_modulus)\n print(\"\")\n print(\"n_workers:\", args.n_workers)\n print(\"force_cpu:\", args.force_cpu)\n print(\"tensorboard:\", not args.no_tensorboard)\n print(\"\")\n print(\"continue_weights:\", args.continue_weights)\n print(\"continue_epoch:\", args.continue_epoch)\n print(\"\")\n print(\"lr:\", args.lr)\n print(\"ce_smoothing:\", args.ce_smoothing)\n print(\"batch_size:\", args.batch_size)\n print(\"epochs:\", args.epochs)\n print(\"\")\n print(\"rpr:\", args.rpr)\n print(\"max_sequence:\", args.max_sequence)\n print(\"n_layers:\", args.n_layers)\n print(\"num_heads:\", args.num_heads)\n print(\"d_model:\", args.d_model)\n print(\"\")\n print(\"dim_feedforward:\", args.dim_feedforward)\n print(\"dropout:\", args.dropout)\n print(SEPERATOR)\n print(\"\")"
},
{
"identifier": "write_model_params",
"path": "utilities/argument_funcs.py",
"snippet": "def write_model_params(args, output_file):\n\n o_stream = open(output_file, \"w\")\n\n o_stream.write(\"rpr: \" + str(args.rpr) + \"\\n\")\n o_stream.write(\"lr: \" + str(args.lr) + \"\\n\")\n o_stream.write(\"ce_smoothing: \" + str(args.ce_smoothing) + \"\\n\")\n o_stream.write(\"batch_size: \" + str(args.batch_size) + \"\\n\")\n o_stream.write(\"max_sequence: \" + str(args.max_sequence) + \"\\n\")\n o_stream.write(\"n_layers: \" + str(args.n_layers) + \"\\n\")\n o_stream.write(\"num_heads: \" + str(args.num_heads) + \"\\n\")\n o_stream.write(\"d_model: \" + str(args.d_model) + \"\\n\")\n o_stream.write(\"dim_feedforward: \" + str(args.dim_feedforward) + \"\\n\")\n o_stream.write(\"dropout: \" + str(args.dropout) + \"\\n\")\n\n o_stream.close()"
}
] | import torch
import torch.nn as nn
import random
import pickle
from torch.nn.modules.normalization import LayerNorm
from utilities.constants import *
from utilities.device import get_device
from tqdm import tqdm
from .positional_encoding import PositionalEncoding
from .rpr import TransformerEncoderRPR, TransformerEncoderLayerRPR, TransformerEncoderLayerRPR_, \
TransformerEncoderPastLayer, TransformerEncoderLayer, TransformerEncoder
from typing import Dict, Iterable, Callable
from torch.nn.init import *
from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params | 7,954 |
# MusicTransformer
class CoCoformer(nn.Module):
def __init__(self, word2event, event2word, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,
dropout=0.1, max_sequence=2048, c_max_seq=256, b_max_seq=1024, rpr=False):
super(CoCoformer, self).__init__()
self.dummy = DummyDecoder()
self.nlayers = n_layers
self.nhead = num_heads
self.d_model = d_model
self.d_ff = dim_feedforward
self.dropout = dropout
self.max_seq = max_sequence
self.c_max_seq = c_max_seq
self.b_max_seq = b_max_seq
self.rpr = rpr
# word2event and event2word:
self.word2event = word2event
self.event2word = event2word
# past layer of chord
self.cpast_layer_dmodel = d_model
self.cpast_layer_nhead = 8
self.cpast_dim_forward = 256
self.cpast_layer_max_seq = 256
self.cpast_layer_nlayers = 1
# past layer of beats
self.bpast_layer_dmodel = d_model
self.bpast_layer_nhead = 8
self.bpast_dim_forward = 256
self.bpast_layer_max_seq = 1024
self.bpast_layer_nlayers = 1
# Input embedding
self.n_embedding = nn.Embedding(VOCAB_SIZE, self.d_model)
self.c_embedding = nn.Embedding(VOCAB_SIZE, self.cpast_layer_dmodel)
self.b_embedding = nn.Embedding(VOCAB_SIZE, self.bpast_layer_dmodel)
# Positional encoding
self.n_positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq)
self.c_positional_encoding = PositionalEncoding(self.cpast_layer_dmodel, self.dropout, self.cpast_layer_max_seq)
self.b_positional_encoding = PositionalEncoding(self.bpast_layer_dmodel, self.dropout, self.bpast_layer_max_seq)
# Base transformer
if not self.rpr:
# To make a decoder-only transformer we need to use masked encoder layers
# Dummy decoder to essentially just return the encoder output
encoder_norm = LayerNorm(self.d_model)
encoder_past_layer = TransformerEncoderPastLayer(self.cpast_layer_dmodel, self.cpast_layer_nhead,
self.cpast_dim_forward, self.bpast_layer_dmodel,
self.bpast_layer_nhead, self.bpast_dim_forward,
self.d_model, self.nhead,
self.d_ff, self.dropout)
encoder_layer = TransformerEncoderLayer(self.d_model, self.nhead, self.d_ff, self.dropout)
encoder = TransformerEncoder(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq, self.c_max_seq,
self.b_max_seq, encoder_norm)
self.transformer = nn.Transformer(
d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,
num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,
dim_feedforward=self.d_ff, custom_encoder=encoder, custom_decoder=self.dummy
)
# RPR Transformer
elif self.rpr:
encoder_norm = LayerNorm(self.d_model)
encoder_layer = TransformerEncoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout,
er_len=self.max_seq)
encoder_past_layer = TransformerEncoderLayerRPR_(self.cpast_layer_dmodel, self.cpast_layer_nhead,
self.cpast_dim_forward, self.bpast_layer_dmodel,
self.bpast_layer_nhead, self.bpast_dim_forward,
self.d_model, self.nhead,
self.d_ff, self.dropout, er_len=self.max_seq)
encoder = TransformerEncoderRPR(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq,
self.c_max_seq, self.b_max_seq, encoder_norm)
self.transformer = nn.Transformer(
d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,
num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,
dim_feedforward=self.d_ff, custom_decoder=self.dummy, custom_encoder=encoder
)
# Final output is a softmaxed linear layer
# TODO: verify the size of linear
self.Norm1 = nn.LayerNorm(1024)
self.ReLU = nn.ReLU()
self.Norm2 = nn.LayerNorm(181)
self.Dropout = nn.Dropout(dropout)
self.transLinear = nn.Linear(256, 256)
self.Wout1 = nn.Linear(self.d_model, 1024)
self.Wout2 = nn.Linear(1024, 1024)
self.Wout3 = nn.Linear(1024, VOCAB_SIZE)
self.softmax = nn.Softmax(dim=-1)
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
# forward
def forward(self, x1, x2, x3, mask=True):
|
# MusicTransformer
class CoCoformer(nn.Module):
def __init__(self, word2event, event2word, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,
dropout=0.1, max_sequence=2048, c_max_seq=256, b_max_seq=1024, rpr=False):
super(CoCoformer, self).__init__()
self.dummy = DummyDecoder()
self.nlayers = n_layers
self.nhead = num_heads
self.d_model = d_model
self.d_ff = dim_feedforward
self.dropout = dropout
self.max_seq = max_sequence
self.c_max_seq = c_max_seq
self.b_max_seq = b_max_seq
self.rpr = rpr
# word2event and event2word:
self.word2event = word2event
self.event2word = event2word
# past layer of chord
self.cpast_layer_dmodel = d_model
self.cpast_layer_nhead = 8
self.cpast_dim_forward = 256
self.cpast_layer_max_seq = 256
self.cpast_layer_nlayers = 1
# past layer of beats
self.bpast_layer_dmodel = d_model
self.bpast_layer_nhead = 8
self.bpast_dim_forward = 256
self.bpast_layer_max_seq = 1024
self.bpast_layer_nlayers = 1
# Input embedding
self.n_embedding = nn.Embedding(VOCAB_SIZE, self.d_model)
self.c_embedding = nn.Embedding(VOCAB_SIZE, self.cpast_layer_dmodel)
self.b_embedding = nn.Embedding(VOCAB_SIZE, self.bpast_layer_dmodel)
# Positional encoding
self.n_positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq)
self.c_positional_encoding = PositionalEncoding(self.cpast_layer_dmodel, self.dropout, self.cpast_layer_max_seq)
self.b_positional_encoding = PositionalEncoding(self.bpast_layer_dmodel, self.dropout, self.bpast_layer_max_seq)
# Base transformer
if not self.rpr:
# To make a decoder-only transformer we need to use masked encoder layers
# Dummy decoder to essentially just return the encoder output
encoder_norm = LayerNorm(self.d_model)
encoder_past_layer = TransformerEncoderPastLayer(self.cpast_layer_dmodel, self.cpast_layer_nhead,
self.cpast_dim_forward, self.bpast_layer_dmodel,
self.bpast_layer_nhead, self.bpast_dim_forward,
self.d_model, self.nhead,
self.d_ff, self.dropout)
encoder_layer = TransformerEncoderLayer(self.d_model, self.nhead, self.d_ff, self.dropout)
encoder = TransformerEncoder(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq, self.c_max_seq,
self.b_max_seq, encoder_norm)
self.transformer = nn.Transformer(
d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,
num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,
dim_feedforward=self.d_ff, custom_encoder=encoder, custom_decoder=self.dummy
)
# RPR Transformer
elif self.rpr:
encoder_norm = LayerNorm(self.d_model)
encoder_layer = TransformerEncoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout,
er_len=self.max_seq)
encoder_past_layer = TransformerEncoderLayerRPR_(self.cpast_layer_dmodel, self.cpast_layer_nhead,
self.cpast_dim_forward, self.bpast_layer_dmodel,
self.bpast_layer_nhead, self.bpast_dim_forward,
self.d_model, self.nhead,
self.d_ff, self.dropout, er_len=self.max_seq)
encoder = TransformerEncoderRPR(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq,
self.c_max_seq, self.b_max_seq, encoder_norm)
self.transformer = nn.Transformer(
d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,
num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,
dim_feedforward=self.d_ff, custom_decoder=self.dummy, custom_encoder=encoder
)
# Final output is a softmaxed linear layer
# TODO: verify the size of linear
self.Norm1 = nn.LayerNorm(1024)
self.ReLU = nn.ReLU()
self.Norm2 = nn.LayerNorm(181)
self.Dropout = nn.Dropout(dropout)
self.transLinear = nn.Linear(256, 256)
self.Wout1 = nn.Linear(self.d_model, 1024)
self.Wout2 = nn.Linear(1024, 1024)
self.Wout3 = nn.Linear(1024, VOCAB_SIZE)
self.softmax = nn.Softmax(dim=-1)
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
# forward
def forward(self, x1, x2, x3, mask=True):
| args = parse_train_args() | 8 | 2023-11-01 08:33:08+00:00 | 12k |
serl-robot/serl | serl/agents/vice/vice_learner.py | [
{
"identifier": "batched_random_crop",
"path": "serl/utils/augmentations.py",
"snippet": "def batched_random_crop(key, obs, pixel_key, padding=4):\n imgs = obs[pixel_key]\n keys = jax.random.split(key, imgs.shape[0])\n imgs = jax.vmap(random_crop, (0, 0, None))(keys, imgs, padding)\n return obs.copy(add_or_replace={pixel_key: imgs})"
},
{
"identifier": "SACLearner",
"path": "serl/agents/sac/sac_learner.py",
"snippet": "class SACLearner(Agent):\n critic: TrainState\n target_critic: TrainState\n temp: TrainState\n tau: float\n discount: float\n target_entropy: float\n num_qs: int = struct.field(pytree_node=False)\n num_min_qs: Optional[int] = struct.field(\n pytree_node=False\n ) # See M in RedQ https://arxiv.org/abs/2101.05982\n backup_entropy: bool = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n actor_lr: float = 3e-4,\n critic_lr: float = 3e-4,\n temp_lr: float = 3e-4,\n hidden_dims: Sequence[int] = (256, 256),\n discount: float = 0.99,\n tau: float = 0.005,\n num_qs: int = 2,\n num_min_qs: Optional[int] = None,\n critic_dropout_rate: Optional[float] = None,\n critic_layer_norm: bool = False,\n target_entropy: Optional[float] = None,\n init_temperature: float = 1.0,\n backup_entropy: bool = True,\n ):\n \"\"\"\n An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905\n \"\"\"\n\n action_dim = action_space.shape[-1]\n observations = observation_space.sample()\n actions = action_space.sample()\n\n if target_entropy is None:\n target_entropy = -action_dim / 2\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)\n\n actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)\n actor_def = TanhNormal(actor_base_cls, action_dim)\n actor_params = actor_def.init(actor_key, observations)[\"params\"]\n actor = TrainState.create(\n apply_fn=actor_def.apply,\n params=actor_params,\n tx=optax.adam(learning_rate=actor_lr),\n )\n\n critic_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n dropout_rate=critic_dropout_rate,\n use_layer_norm=critic_layer_norm,\n )\n critic_cls = partial(StateActionValue, base_cls=critic_base_cls)\n critic_def = Ensemble(critic_cls, num=num_qs)\n critic_params = critic_def.init(critic_key, observations, actions)[\"params\"]\n critic = TrainState.create(\n apply_fn=critic_def.apply,\n params=critic_params,\n tx=optax.adam(learning_rate=critic_lr),\n )\n\n target_critic_def = Ensemble(critic_cls, num=num_min_qs or num_qs)\n target_critic = TrainState.create(\n apply_fn=target_critic_def.apply,\n params=critic_params,\n tx=optax.GradientTransformation(lambda _: None, lambda _: None),\n )\n\n temp_def = Temperature(init_temperature)\n temp_params = temp_def.init(temp_key)[\"params\"]\n temp = TrainState.create(\n apply_fn=temp_def.apply,\n params=temp_params,\n tx=optax.adam(learning_rate=temp_lr),\n )\n\n return cls(\n rng=rng,\n actor=actor,\n critic=critic,\n target_critic=target_critic,\n temp=temp,\n target_entropy=target_entropy,\n tau=tau,\n discount=discount,\n num_qs=num_qs,\n num_min_qs=num_min_qs,\n backup_entropy=backup_entropy,\n )\n\n def update_actor(self, batch: DatasetDict) -> Tuple[Agent, Dict[str, float]]:\n key, rng = jax.random.split(self.rng)\n key2, rng = jax.random.split(rng)\n\n def actor_loss_fn(actor_params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n dist = self.actor.apply_fn({\"params\": actor_params}, batch[\"observations\"])\n actions = dist.sample(seed=key)\n log_probs = dist.log_prob(actions)\n qs = self.critic.apply_fn(\n {\"params\": self.critic.params},\n batch[\"observations\"],\n actions,\n True,\n rngs={\"dropout\": key2},\n ) # training=True\n q = qs.mean(axis=0)\n actor_loss = (\n log_probs * self.temp.apply_fn({\"params\": self.temp.params}) - q\n ).mean()\n return actor_loss, {\"actor_loss\": actor_loss, \"entropy\": -log_probs.mean()}\n\n grads, actor_info = jax.grad(actor_loss_fn, has_aux=True)(self.actor.params)\n actor = self.actor.apply_gradients(grads=grads)\n\n return self.replace(actor=actor, rng=rng), actor_info\n\n def update_temperature(self, entropy: float) -> Tuple[Agent, Dict[str, float]]:\n def temperature_loss_fn(temp_params):\n temperature = self.temp.apply_fn({\"params\": temp_params})\n temp_loss = temperature * (entropy - self.target_entropy).mean()\n return temp_loss, {\n \"temperature\": temperature,\n \"temperature_loss\": temp_loss,\n }\n\n grads, temp_info = jax.grad(temperature_loss_fn, has_aux=True)(self.temp.params)\n temp = self.temp.apply_gradients(grads=grads)\n\n return self.replace(temp=temp), temp_info\n\n def update_critic(self, batch: DatasetDict) -> Tuple[TrainState, Dict[str, float]]:\n\n dist = self.actor.apply_fn(\n {\"params\": self.actor.params}, batch[\"next_observations\"]\n )\n\n rng = self.rng\n\n key, rng = jax.random.split(rng)\n next_actions = dist.sample(seed=key)\n\n # Used only for REDQ.\n key, rng = jax.random.split(rng)\n target_params = subsample_ensemble(\n key, self.target_critic.params, self.num_min_qs, self.num_qs\n )\n\n key, rng = jax.random.split(rng)\n next_qs = self.target_critic.apply_fn(\n {\"params\": target_params},\n batch[\"next_observations\"],\n next_actions,\n True,\n rngs={\"dropout\": key},\n ) # training=True\n next_q = next_qs.min(axis=0)\n\n target_q = batch[\"rewards\"] + self.discount * batch[\"masks\"] * next_q\n\n if self.backup_entropy:\n next_log_probs = dist.log_prob(next_actions)\n target_q -= (\n self.discount\n * batch[\"masks\"]\n * self.temp.apply_fn({\"params\": self.temp.params})\n * next_log_probs\n )\n\n key, rng = jax.random.split(rng)\n\n def critic_loss_fn(critic_params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n qs = self.critic.apply_fn(\n {\"params\": critic_params},\n batch[\"observations\"],\n batch[\"actions\"],\n True,\n rngs={\"dropout\": key},\n ) # training=True\n critic_loss = ((qs - target_q) ** 2).mean()\n return critic_loss, {\"critic_loss\": critic_loss, \"q\": qs.mean()}\n\n grads, info = jax.grad(critic_loss_fn, has_aux=True)(self.critic.params)\n critic = self.critic.apply_gradients(grads=grads)\n\n target_critic_params = optax.incremental_update(\n critic.params, self.target_critic.params, self.tau\n )\n target_critic = self.target_critic.replace(params=target_critic_params)\n\n return self.replace(critic=critic, target_critic=target_critic, rng=rng), info\n\n @partial(jax.jit, static_argnames=\"utd_ratio\")\n def update(self, batch: DatasetDict, utd_ratio: int):\n\n new_agent = self\n for i in range(utd_ratio):\n\n def slice(x):\n assert x.shape[0] % utd_ratio == 0\n batch_size = x.shape[0] // utd_ratio\n return x[batch_size * i : batch_size * (i + 1)]\n\n mini_batch = jax.tree_util.tree_map(slice, batch)\n new_agent, critic_info = new_agent.update_critic(mini_batch)\n\n new_agent, actor_info = new_agent.update_actor(mini_batch)\n new_agent, temp_info = new_agent.update_temperature(actor_info[\"entropy\"])\n\n return new_agent, {**actor_info, **critic_info, **temp_info}"
},
{
"identifier": "DrQLearner",
"path": "serl/agents/drq/drq_learner.py",
"snippet": "class DrQLearner(SACLearner):\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n actor_lr: float = 3e-4,\n critic_lr: float = 3e-4,\n temp_lr: float = 3e-4,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n encoder: str = \"d4pg\",\n hidden_dims: Sequence[int] = (256, 256),\n discount: float = 0.99,\n tau: float = 0.005,\n num_qs: int = 2,\n num_min_qs: Optional[int] = None,\n critic_dropout_rate: Optional[float] = None,\n critic_layer_norm: bool = False,\n target_entropy: Optional[float] = None,\n init_temperature: float = 1.0,\n backup_entropy: bool = True,\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n ):\n \"\"\"\n An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905\n \"\"\"\n\n action_dim = action_space.shape[-1]\n observations = observation_space.sample()\n actions = action_space.sample()\n\n if target_entropy is None:\n target_entropy = -action_dim / 2\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n elif encoder == \"resnet\":\n encoder_cls = partial(ResNetV2Encoder, stage_sizes=(2, 2, 2, 2))\n\n actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)\n actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim)\n actor_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=actor_cls,\n latent_dim=latent_dim,\n stop_gradient=True,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n actor_params = actor_def.init(actor_key, observations)[\"params\"]\n actor = TrainState.create(\n apply_fn=actor_def.apply,\n params=actor_params,\n tx=optax.adam(learning_rate=actor_lr),\n )\n\n critic_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n dropout_rate=critic_dropout_rate,\n use_layer_norm=critic_layer_norm,\n )\n critic_cls = partial(StateActionValue, base_cls=critic_base_cls)\n critic_cls = partial(Ensemble, net_cls=critic_cls, num=num_qs)\n critic_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=critic_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n critic_params = critic_def.init(critic_key, observations, actions)[\"params\"]\n critic = TrainState.create(\n apply_fn=critic_def.apply,\n params=critic_params,\n tx=optax.adam(learning_rate=critic_lr),\n )\n target_critic = TrainState.create(\n apply_fn=critic_def.apply,\n params=critic_params,\n tx=optax.GradientTransformation(lambda _: None, lambda _: None),\n )\n\n temp_def = Temperature(init_temperature)\n temp_params = temp_def.init(temp_key)[\"params\"]\n temp = TrainState.create(\n apply_fn=temp_def.apply,\n params=temp_params,\n tx=optax.adam(learning_rate=temp_lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n actor=actor,\n critic=critic,\n target_critic=target_critic,\n temp=temp,\n target_entropy=target_entropy,\n tau=tau,\n discount=discount,\n num_qs=num_qs,\n num_min_qs=num_min_qs,\n backup_entropy=backup_entropy,\n data_augmentation_fn=data_augmentation_fn,\n )\n\n @partial(jax.jit, static_argnames=(\"utd_ratio\", \"pixel_keys\"))\n def update(self, batch: DatasetDict, utd_ratio: int, pixel_keys=(\"pixels\",)):\n '''\n Update the agent's parameters (actor and critic) using the batch of data from the replay buffer.\n We apply data augmentation to both observations and next_observation,\n then we share the encoder params between actor and critic.\n\n :param batch: a batch of data from the replay buffer, a dataset dict\n :param utd_ratio: the number of times to update the critic for each update of the actor\n :param pixel_keys: pixel keys to apply data augmentation to\n :return: the updated agent and the update info dict\n '''\n new_agent = self\n\n if pixel_keys[0] not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n actor = _share_encoder(source=new_agent.critic, target=new_agent.actor)\n new_agent = new_agent.replace(actor=actor)\n\n rng, key = jax.random.split(new_agent.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n }\n )\n new_agent = new_agent.replace(rng=rng)\n return SACLearner.update(new_agent, batch, utd_ratio)"
},
{
"identifier": "Temperature",
"path": "serl/agents/sac/temperature.py",
"snippet": "class Temperature(nn.Module):\n initial_temperature: float = 1.0\n\n @nn.compact\n def __call__(self) -> jnp.ndarray:\n log_temp = self.param(\n \"log_temp\",\n init_fn=lambda key: jnp.full((), jnp.log(self.initial_temperature)),\n )\n return jnp.exp(log_temp)"
},
{
"identifier": "DatasetDict",
"path": "serl/data/dataset.py",
"snippet": "def _check_lengths(dataset_dict: DatasetDict, dataset_len: Optional[int] = None) -> int:\ndef _subselect(dataset_dict: DatasetDict, index: np.ndarray) -> DatasetDict:\ndef _sample(\n dataset_dict: Union[np.ndarray, DatasetDict], indx: np.ndarray\n) -> DatasetDict:\n def __init__(self, dataset_dict: DatasetDict, seed: Optional[int] = None):\n def np_random(self) -> np.random.RandomState:\n def seed(self, seed: Optional[int] = None) -> list:\n def __len__(self) -> int:\n def sample(\n self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None,\n ) -> frozen_dict.FrozenDict:\n def sample_jax(self, batch_size: int, keys: Optional[Iterable[str]] = None):\n def _sample_jax(rng, src, max_indx: int):\n def split(self, ratio: float) -> Tuple[\"Dataset\", \"Dataset\"]:\n def _trajectory_boundaries_and_returns(self) -> Tuple[list, list, list]:\n def filter(\n self, take_top: Optional[float] = None, threshold: Optional[float] = None\n ):\n def normalize_returns(self, scaling: float = 1000):\nclass Dataset(object):"
},
{
"identifier": "TanhNormal",
"path": "serl/distributions/tanh_normal.py",
"snippet": "class Normal(nn.Module):\n def __call__(self, inputs, *args, **kwargs) -> tfd.Distribution:"
},
{
"identifier": "Ensemble",
"path": "serl/networks/ensemble.py",
"snippet": "class Ensemble(nn.Module):\n net_cls: Type[nn.Module]\n num: int = 2\n\n @nn.compact\n def __call__(self, *args):\n ensemble = nn.vmap(\n self.net_cls,\n variable_axes={\"params\": 0},\n split_rngs={\"params\": True, \"dropout\": True},\n in_axes=None,\n out_axes=0,\n axis_size=self.num,\n )\n return ensemble()(*args)"
},
{
"identifier": "MLP",
"path": "serl/networks/mlp.py",
"snippet": "class MLP(nn.Module):\n hidden_dims: Sequence[int]\n activations: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu\n activate_final: bool = False\n use_layer_norm: bool = False\n scale_final: Optional[float] = None\n dropout_rate: Optional[float] = None\n spectral_norm: bool = False\n\n @nn.compact\n def __call__(self, x: jnp.ndarray, training: bool = False) -> jnp.ndarray:\n\n for i, size in enumerate(self.hidden_dims):\n if i + 1 == len(self.hidden_dims) and self.scale_final is not None:\n x = nn.Dense(size, kernel_init=default_init(self.scale_final))(x)\n else:\n x = nn.Dense(size, kernel_init=default_init())(x)\n\n if i + 1 < len(self.hidden_dims) or self.activate_final:\n if self.dropout_rate is not None and self.dropout_rate > 0:\n x = nn.Dropout(rate=self.dropout_rate)(\n x, deterministic=not training\n )\n if self.use_layer_norm:\n x = nn.LayerNorm()(x)\n x = self.activations(x)\n return x"
},
{
"identifier": "PixelMultiplexer",
"path": "serl/networks/pixel_multiplexer.py",
"snippet": "class PixelMultiplexer(nn.Module):\n encoder_cls: Type[nn.Module]\n network_cls: Type[nn.Module]\n latent_dim: int\n stop_gradient: bool = False\n pixel_keys: Tuple[str, ...] = (\"pixels\",)\n depth_keys: Tuple[str, ...] = ()\n\n @nn.compact\n def __call__(\n self,\n observations: Union[FrozenDict, Dict],\n actions: Optional[jnp.ndarray] = None,\n training: bool = False,\n ) -> jnp.ndarray:\n observations = FrozenDict(observations)\n image_obs, state_obs = observations.pop(\"state\")\n reshape_img = lambda x: x.reshape(*x.shape[:-2], -1) / 255.0\n image_obs = jax.tree_map(reshape_img, image_obs)\n\n x = self.encoder_cls(name=f\"image_encoder\")(image_obs, training)\n if self.stop_gradient:\n # We do not update conv layers with policy gradients.\n x = jax.lax.stop_gradient(x)\n x = nn.Dense(512, kernel_init=default_init())(x)\n x = nn.LayerNorm()(x)\n x = nn.tanh(x)\n\n if \"state\" in observations:\n y = nn.Dense(self.latent_dim, kernel_init=default_init())(\n observations[\"state\"]\n )\n y = nn.LayerNorm()(y)\n y = nn.tanh(y)\n\n x = jnp.concatenate([x, y], axis=-1)\n\n if actions is None:\n return self.network_cls()(x, training)\n else:\n return self.network_cls()(x, actions, training)"
},
{
"identifier": "StateActionValue",
"path": "serl/networks/state_action_value.py",
"snippet": "class StateActionValue(nn.Module):\n base_cls: nn.Module\n\n @nn.compact\n def __call__(\n self, observations: jnp.ndarray, actions: jnp.ndarray, *args, **kwargs\n ) -> jnp.ndarray:\n inputs = jnp.concatenate([observations, actions], axis=-1)\n outputs = self.base_cls()(inputs, *args, **kwargs)\n\n value = nn.Dense(1, kernel_init=default_init())(outputs)\n\n return jnp.squeeze(value, -1)"
},
{
"identifier": "TwoD4PGEncoder",
"path": "serl/networks/encoders/two_d4pg_encoder.py",
"snippet": "class TwoD4PGEncoder(nn.Module):\n features: Sequence[int] = (32, 32, 32, 32)\n filters: Sequence[int] = (2, 1, 1, 1)\n strides: Sequence[int] = (2, 1, 1, 1)\n padding: str = \"VALID\"\n activations: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu\n\n @nn.compact\n def __call__(self, x: jnp.ndarray, training=False) -> jnp.ndarray:\n assert len(self.features) == len(self.strides)\n\n processed_tensors = []\n reshape = False\n\n # Loop through all the tensors in the input FrozenDict\n for key, tensor in x.items():\n # Expand dimensions if they are 3\n if tensor.ndim == 3:\n tensor = tensor[None, ...]\n reshape = True\n\n # Apply Conv layers\n for features, filter_, stride in zip(self.features, self.filters, self.strides):\n tensor = nn.Conv(\n features,\n kernel_size=(filter_, filter_),\n strides=(stride, stride),\n kernel_init=default_init(),\n padding=self.padding,\n )(tensor)\n tensor = self.activations(tensor)\n\n tensor = SpatialLearnedEmbeddings(*(tensor.shape[1:]), 8)(tensor)\n processed_tensors.append(tensor)\n\n # Concatenate all processed tensors along the last axis\n concatenated_tensor = jnp.concatenate(processed_tensors, axis=-1)\n\n # Reshape if original tensors were 3D\n if reshape:\n concatenated_tensor = concatenated_tensor.reshape(-1)\n\n return concatenated_tensor"
},
{
"identifier": "MobileNetEncoder",
"path": "serl/networks/encoders/mobilenet_encoder.py",
"snippet": "class MobileNetEncoder(nn.Module):\n mobilenet: Callable[..., Callable]\n params: FrozenDict\n stop_gradient: bool = False\n\n @nn.compact\n def __call__(self, x: jnp.ndarray, training=False, divide_by=False, reshape=False) -> jnp.ndarray:\n '''\n encode an image using the mobilenet encoder\n TODO: it should work for all pretrained encoders, not just mobilenet.\n\n :param x: input image\n :param training: whether the network is in training mode\n :param divide_by: whether to divide the image by 255\n :param reshape: whether to reshape the image before passing into encoder\n :return: the encoded image\n '''\n\n mean = jnp.array((0.485, 0.456, 0.406))[None, ...]\n std = jnp.array((0.229, 0.224, 0.225))[None, ...]\n\n if reshape:\n x = jnp.reshape(x, (*x.shape[:-2], -1))\n\n if divide_by:\n x = x.astype(jnp.float32) / 255.0\n x = (x - mean) / std\n\n if x.ndim == 3:\n x = x[None, ...]\n x = self.mobilenet.apply(self.params, x, mutable=False, training=False)\n elif x.ndim == 4:\n x = self.mobilenet.apply(self.params, x, mutable=False, training=False)\n else:\n raise NotImplementedError('ndim is not 3 or 4')\n\n if self.stop_gradient:\n x = jax.lax.stop_gradient(x)\n\n return x"
},
{
"identifier": "TwoMobileNetEncoder",
"path": "serl/networks/encoders/two_mobilenet_encoder.py",
"snippet": "class TwoMobileNetEncoder(nn.Module):\n mobilenet: nn.Module\n params: FrozenDict\n dropout_rate: float = 0.1\n\n @nn.compact\n def __call__(self, x: FrozenDict[str, jnp.ndarray], training=False) -> jnp.ndarray:\n processed_tensors = []\n reshape = False\n mean = jnp.array((0.485, 0.456, 0.406))[None, ...]\n std = jnp.array((0.229, 0.224, 0.225))[None, ...]\n\n # Loop through all the tensors in the input FrozenDict\n for key, tensor in x.items():\n # Expand dimensions if they are 3\n if tensor.ndim == 3:\n tensor = tensor[None, ...]\n reshape = True\n\n # Apply mobilenet\n tensor = (tensor - mean) / std # normalize using ImageNet stats\n tensor = self.mobilenet.apply(self.params, tensor, training=False)\n # Apply SpatialLearnedEmbeddings and Dropout\n tensor = SpatialLearnedEmbeddings(*(tensor.shape[1:]), 8)(tensor)\n tensor = nn.Dropout(self.dropout_rate)(tensor, deterministic=not training)\n\n processed_tensors.append(tensor)\n\n # Concatenate all processed tensors along the last axis\n concatenated_tensor = jnp.concatenate(processed_tensors, axis=-1)\n\n # Reshape if original tensors were 3D\n if reshape:\n concatenated_tensor = concatenated_tensor.reshape(-1)\n\n return concatenated_tensor"
},
{
"identifier": "EncodedEncoder",
"path": "serl/networks/encoded_encoder.py",
"snippet": "class EncodedEncoder(nn.Module):\n network_cls: Type[nn.Module]\n latent_dim: int\n stop_gradient: bool = False\n pixel_key: str = \"pixels\"\n dropout_rate: float = 0.1\n\n @nn.compact\n def __call__(\n self,\n observations: Union[FrozenDict, Dict],\n training: bool = False,\n ) -> jnp.ndarray:\n observations = FrozenDict(observations)\n x = observations[self.pixel_key]\n\n if x.ndim == 3:\n x = x[None, :]\n\n x = SpatialLearnedEmbeddings(*(x.shape[1:]), 8)(x)\n x = nn.Dropout(self.dropout_rate)(x, deterministic=not training)\n\n if x.shape[0] == 1:\n x = x.reshape(-1)\n else:\n x = x.reshape((x.shape[0], -1))\n\n if self.stop_gradient:\n # We do not update conv layers with policy gradients.\n x = jax.lax.stop_gradient(x)\n\n x = nn.Dense(512, kernel_init=default_init())(x)\n x = nn.LayerNorm()(x)\n x = nn.tanh(x)\n\n return self.network_cls()(x, training)"
},
{
"identifier": "OneDimOutput",
"path": "serl/networks/one_d_output.py",
"snippet": "class OneDimOutput(nn.Module):\n base_cls: nn.Module\n\n @nn.compact\n def __call__(\n self, observations: jnp.ndarray, *args, **kwargs\n ) -> jnp.ndarray:\n if self.base_cls:\n outputs = self.base_cls()(observations, *args, **kwargs)\n else:\n outputs = observations\n\n value = nn.Dense(1, kernel_init=default_init())(outputs)\n return jnp.squeeze(value, -1)"
},
{
"identifier": "_unpack",
"path": "serl/utils/commons.py",
"snippet": "def _unpack(batch: DatasetDict):\n '''\n Helps to minimize CPU to GPU transfer.\n Assuming that if next_observation is missing, it's combined with observation:\n\n :param batch: a batch of data from the replay buffer, a dataset dict\n :return: a batch of unpacked data, a dataset dict\n '''\n\n for pixel_key in batch[\"observations\"].keys():\n if pixel_key not in batch[\"next_observations\"]:\n obs_pixels = batch[\"observations\"][pixel_key][..., :-1]\n next_obs_pixels = batch[\"observations\"][pixel_key][..., 1:]\n\n obs = batch[\"observations\"].copy(add_or_replace={pixel_key: obs_pixels})\n next_obs = batch[\"next_observations\"].copy(\n add_or_replace={pixel_key: next_obs_pixels}\n )\n batch = batch.copy(\n add_or_replace={\"observations\": obs, \"next_observations\": next_obs}\n )\n\n return batch"
},
{
"identifier": "_share_encoder",
"path": "serl/utils/commons.py",
"snippet": "def _share_encoder(source, target):\n '''\n Share encoder params between source and target:\n \n :param source: the source network, TrainState\n :param target: the target network, TrainState\n '''\n\n replacers = {}\n for k, v in source.params.items():\n if \"encoder\" in k:\n replacers[k] = v\n\n # e.g., Use critic conv layers in actor:\n new_params = target.params.copy(add_or_replace=replacers)\n return target.replace(params=new_params)"
}
] | from functools import partial
from itertools import zip_longest
from typing import Callable, Dict, Optional, Sequence, Tuple, OrderedDict
from collections import OrderedDict
from jax import numpy as jnp
from flax import struct
from flax.core import FrozenDict, freeze
from flax.training.train_state import TrainState
from serl.utils.augmentations import batched_random_crop
from serl.agents.sac.sac_learner import SACLearner
from serl.agents.drq.drq_learner import DrQLearner
from serl.agents.sac.temperature import Temperature
from serl.data.dataset import DatasetDict
from serl.distributions import TanhNormal
from serl.networks import MLP, Ensemble, PixelMultiplexer, StateActionValue
from serl.networks.encoders import TwoMobileNetEncoder, MobileNetEncoder, TwoD4PGEncoder
from serl.networks.encoded_encoder import EncodedEncoder
from serl.networks.one_d_output import OneDimOutput
from serl.utils.commons import _unpack, _share_encoder
from jeffnet.linen import create_model, EfficientNet
import gym
import jax
import optax
import flax.linen as nn | 8,215 | """Implementations of algorithms for continuous control."""
class VICELearner(DrQLearner):
vice_classifiers: OrderedDict[str, TrainState]
vice_label_smoothing: float
vice_goal_pool: jnp.ndarray
vice_encoder: TrainState
vice_encoder_params: FrozenDict
@classmethod
def create(
cls,
seed: int,
observation_space: gym.Space,
action_space: gym.Space,
actor_lr: float = 3e-4,
critic_lr: float = 3e-4,
vice_lr: float = 3e-4,
temp_lr: float = 3e-4,
cnn_features: Sequence[int] = (32, 32, 32, 32),
cnn_filters: Sequence[int] = (3, 3, 3, 3),
cnn_strides: Sequence[int] = (2, 1, 1, 1),
cnn_padding: str = "VALID",
latent_dim: int = 50,
encoder: str = "d4pg",
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
num_qs: int = 2,
num_min_qs: Optional[int] = None,
critic_dropout_rate: Optional[float] = None,
vice_dropout_rate: Optional[float] = None,
vice_label_smoothing: float = 0.1,
critic_layer_norm: bool = False,
target_entropy: Optional[float] = None,
init_temperature: float = 1.0,
backup_entropy: bool = True,
pixel_keys: Tuple[str, ...] = ("pixels",),
depth_keys: Tuple[str, ...] = (),
vice_goal_pool: jnp.ndarray = None
):
"""
An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905
"""
action_dim = action_space.shape[-1]
observations = observation_space.sample()
actions = action_space.sample()
if target_entropy is None:
target_entropy = -action_dim
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key, temp_key, vice_encoder_key = jax.random.split(rng, 5)
rng_vice_keys = jax.random.split(rng, 1 + len(pixel_keys))
rng, vice_keys = rng_vice_keys[0], rng_vice_keys[1:]
if encoder == "d4pg":
encoder_cls = partial(
TwoD4PGEncoder,
features=cnn_features,
filters=cnn_filters,
strides=cnn_strides,
padding=cnn_padding,
)
elif encoder == "resnet":
raise NotImplementedError
elif encoder == "mobilenet":
MobileNet, mobilenet_variables = create_model('tf_mobilenetv3_large_100', pretrained=True)
encoder_cls = partial(TwoMobileNetEncoder, mobilenet=MobileNet, params=mobilenet_variables)
actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)
actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim)
| """Implementations of algorithms for continuous control."""
class VICELearner(DrQLearner):
vice_classifiers: OrderedDict[str, TrainState]
vice_label_smoothing: float
vice_goal_pool: jnp.ndarray
vice_encoder: TrainState
vice_encoder_params: FrozenDict
@classmethod
def create(
cls,
seed: int,
observation_space: gym.Space,
action_space: gym.Space,
actor_lr: float = 3e-4,
critic_lr: float = 3e-4,
vice_lr: float = 3e-4,
temp_lr: float = 3e-4,
cnn_features: Sequence[int] = (32, 32, 32, 32),
cnn_filters: Sequence[int] = (3, 3, 3, 3),
cnn_strides: Sequence[int] = (2, 1, 1, 1),
cnn_padding: str = "VALID",
latent_dim: int = 50,
encoder: str = "d4pg",
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
num_qs: int = 2,
num_min_qs: Optional[int] = None,
critic_dropout_rate: Optional[float] = None,
vice_dropout_rate: Optional[float] = None,
vice_label_smoothing: float = 0.1,
critic_layer_norm: bool = False,
target_entropy: Optional[float] = None,
init_temperature: float = 1.0,
backup_entropy: bool = True,
pixel_keys: Tuple[str, ...] = ("pixels",),
depth_keys: Tuple[str, ...] = (),
vice_goal_pool: jnp.ndarray = None
):
"""
An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905
"""
action_dim = action_space.shape[-1]
observations = observation_space.sample()
actions = action_space.sample()
if target_entropy is None:
target_entropy = -action_dim
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key, temp_key, vice_encoder_key = jax.random.split(rng, 5)
rng_vice_keys = jax.random.split(rng, 1 + len(pixel_keys))
rng, vice_keys = rng_vice_keys[0], rng_vice_keys[1:]
if encoder == "d4pg":
encoder_cls = partial(
TwoD4PGEncoder,
features=cnn_features,
filters=cnn_filters,
strides=cnn_strides,
padding=cnn_padding,
)
elif encoder == "resnet":
raise NotImplementedError
elif encoder == "mobilenet":
MobileNet, mobilenet_variables = create_model('tf_mobilenetv3_large_100', pretrained=True)
encoder_cls = partial(TwoMobileNetEncoder, mobilenet=MobileNet, params=mobilenet_variables)
actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)
actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim) | actor_def = PixelMultiplexer( | 8 | 2023-11-02 23:32:24+00:00 | 12k |
tiendatnguyen-vision/Orbit-symmetrize | RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/representation.py | [
{
"identifier": "Group",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/groups.py",
"snippet": "class Group(nn.Module):\n \"\"\" Abstract Group Object which new groups should inherit from. \"\"\"\n\n def __init__(self):\n super().__init__()\n self.lie_algebra = NotImplemented # The continuous generators\n self.discrete_generators = NotImplemented # The discrete generators\n self.z_scale = None # For scale noise for sampling elements\n self.is_orthogonal = None\n self.is_permutation = None\n self.d = NotImplemented # The dimension of the base representation\n self.device = torch.device('cpu')\n self.args = None\n\n def init(self, *args):\n \"\"\" Initialize the group object. \"\"\"\n # get the dimension of the base group representation\n if self.d is NotImplemented:\n if (self.lie_algebra is not NotImplemented) and \\\n len(self.lie_algebra) > 0:\n self.d = self.lie_algebra[0].size(-1)\n if (self.discrete_generators is not NotImplemented) and \\\n len(self.discrete_generators) > 0:\n self.d = self.discrete_generators[0].size(-1)\n\n self.args = args\n\n if self.lie_algebra is NotImplemented:\n self.lie_algebra = torch.zeros((0, self.d, self.d), device=self.device)\n if self.discrete_generators is NotImplemented:\n self.discrete_generators = torch.zeros((0, self.d, self.d), device=self.device)\n\n self.to(self.device)\n\n # set orthogonal flag automatically if not specified\n if self.is_permutation:\n self.is_orthogonal = True\n if self.is_orthogonal is None:\n self.is_orthogonal = True\n if len(self.lie_algebra) != 0:\n Id = torch.eye(self.d, device=self.device)\n A_dense = torch.stack([[email protected](Ai.dtype) for Ai in self.lie_algebra])\n self.is_orthogonal &= rel_err(-A_dense.transpose(2, 1), A_dense) < 1e-6\n if len(self.discrete_generators) != 0:\n Id = torch.eye(self.d, device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators])\n self.is_orthogonal &= rel_err(h_dense.transpose(2, 1)@h_dense, Id[None]) < 1e-6\n\n # set regular flag automatically if not specified\n if self.is_orthogonal and (self.is_permutation is None):\n self.is_permutation = True\n # no infinitesmal generators and all rows have one 1\n self.is_permutation &= (len(self.lie_algebra) == 0)\n if len(self.discrete_generators) != 0:\n Id = torch.eye(self.d, device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators])\n self.is_permutation &= (((h_dense-1).abs()<1e-6).long().sum(-1) == 1).all()\n\n def exp(self, A):\n \"\"\" Matrix exponential \"\"\"\n return torch.linalg.matrix_exp(A)\n\n def num_constraints(self):\n \"\"\" Number of constraints to solve for the group \"\"\"\n return len(self.lie_algebra)+len(self.discrete_generators)\n\n def sample(self):\n \"\"\"Draw a sample from the group (not necessarily Haar measure)\"\"\"\n return self.samples(1)[0]\n\n def samples(self, N):\n \"\"\" Draw N samples from the group (not necessarily Haar measure)\"\"\"\n Id = torch.eye(self.d, device=self.device)\n A_dense = torch.stack([[email protected](Ai.dtype) for Ai in self.lie_algebra]) \\\n if len(self.lie_algebra) \\\n else torch.zeros((0, self.d, self.d), device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators]) \\\n if len(self.discrete_generators) \\\n else torch.zeros((0, self.d, self.d), device=self.device)\n z = torch.randn(N, A_dense.size(0), device=self.device)\n if self.z_scale is not None:\n z *= self.z_scale\n k = torch.randint(-MAX_POWER, MAX_POWER+1, (N, h_dense.size(0), 3), device=self.device)\n return noise2samples(z, k, A_dense, h_dense)\n\n def check_valid_group_elems(self, g):\n \"\"\" Check that the group elements are valid \"\"\"\n return True\n\n def __str__(self):\n return repr(self)\n\n def __repr__(self):\n outstr = f\"{self.__class__}\"\n if self.args:\n outstr += '('+''.join(repr(arg) for arg in self.args)+')'\n return outstr\n\n def __eq__(self, G2): # TODO: more permissive by checking that spans are equal?\n return repr(self) == repr(G2)\n\n def __hash__(self):\n return hash(repr(self))\n\n def __lt__(self, other):\n \"\"\" For sorting purposes only \"\"\"\n return hash(self) < hash(other)\n\n def __mul__(self, other):\n return DirectProduct(self, other)\n\n def forward(self):\n \"\"\" Forward method, unused. \"\"\"\n return None\n\n def to(self, *args, **kwargs):\n \"\"\" Move the group to the specified device \"\"\"\n if isinstance(self.lie_algebra, torch.Tensor):\n self.lie_algebra = self.lie_algebra.to(*args, **kwargs)\n elif isinstance(self.lie_algebra, list):\n self.lie_algebra = [Ai.to(*args, **kwargs) for Ai in self.lie_algebra]\n if isinstance(self.discrete_generators, torch.Tensor):\n self.discrete_generators = self.discrete_generators.to(*args, **kwargs)\n elif isinstance(self.discrete_generators, list):\n self.discrete_generators = [hi.to(*args, **kwargs) for hi in self.discrete_generators]\n if self.z_scale is not None:\n self.z_scale = self.z_scale.to(*args, **kwargs)\n self.device = torch.empty(0).to(*args, **kwargs).device\n return self"
},
{
"identifier": "LinearOperator",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operator_base.py",
"snippet": "class LinearOperator(nn.Module):\n \"\"\" Common interface for performing matrix vector products\n Many iterative methods (e.g. cg, gmres) do not need to know the\n individual entries of a matrix to solve a linear system A*x=b.\n Such solvers only require the computation of matrix vector\n products, A*v where v is a dense vector. This class serves as\n an abstract interface between iterative solvers and matrix-like\n objects.\n To construct a concrete LinearOperator, either pass appropriate\n callables to the constructor of this class, or subclass it.\n A subclass must implement either one of the methods ``_matvec``\n and ``_matmat``, and the attributes/properties ``shape`` (pair of\n integers) and ``dtype`` (may be None). It may call the ``__init__``\n on this class to have these attributes validated. Implementing\n ``_matvec`` automatically implements ``_matmat`` (using a naive\n algorithm) and vice-versa.\n Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``\n to implement the Hermitian adjoint (conjugate transpose). As with\n ``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or\n ``_adjoint`` implements the other automatically. Implementing\n ``_adjoint`` is preferable; ``_rmatvec`` is mostly there for\n backwards compatibility.\n Parameters\n ----------\n shape : tuple\n Matrix dimensions (M, N).\n matvec : callable f(v)\n Returns returns A * v.\n rmatvec : callable f(v)\n Returns A^H * v, where A^H is the conjugate transpose of A.\n matmat : callable f(V)\n Returns A * V, where V is a dense matrix with dimensions (N, K).\n dtype : dtype\n Data type of the matrix.\n rmatmat : callable f(V)\n Returns A^H * V, where V is a dense matrix with dimensions (M, K).\n Attributes\n ----------\n args : tuple\n For linear operators describing products etc. of other linear\n operators, the operands of the binary operation.\n ndim : int\n Number of dimensions (this is always 2)\n See Also\n --------\n aslinearoperator : Construct LinearOperators\n Notes\n -----\n The user-defined matvec() function must properly handle the case\n where v has shape (N,) as well as the (N,1) case. The shape of\n the return type is handled internally by LinearOperator.\n LinearOperator instances can also be multiplied, added with each\n other and exponentiated, all lazily: the result of these operations\n is always a new, composite LinearOperator, that defers linear\n operations to the original operators and combines the results.\n More details regarding how to subclass a LinearOperator and several\n examples of concrete LinearOperator instances can be found in the\n external project `PyLops <https://pylops.readthedocs.io>`_.\n Examples\n --------\n >>> def mv(v):\n ... return torch.tensor([2*v[0], 3*v[1]])\n ...\n >>> A = LinearOperator((2,2), matvec=mv)\n >>> A\n <2x2 _CustomLinearOperator with dtype=float64>\n >>> A.matvec(torch.ones(2))\n tensor([ 2., 3.])\n >>> A * torch.ones(2)\n tensor([ 2., 3.])\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n if cls is LinearOperator:\n # Operate as _CustomLinearOperator factory.\n return super(LinearOperator, cls).__new__(_CustomLinearOperator)\n\n obj = super(LinearOperator, cls).__new__(cls)\n if (type(obj)._matvec == LinearOperator._matvec\n and type(obj)._matmat == LinearOperator._matmat):\n warnings.warn(\"LinearOperator subclass should implement\"\n \" at least one of _matvec and _matmat.\",\n category=RuntimeWarning, stacklevel=2)\n return obj\n\n def __init__(self):\n super().__init__()\n self.ndim = 2\n self.dtype = None\n self.shape = None\n self.device = None\n\n def init(self, dtype, shape, device):\n \"\"\" Initialize this LinearOperator.\n To be called by subclasses. ``dtype`` may be None; ``shape`` should\n be convertible to a length-2 tuple.\n Called from subclasses at the end of the __init__ routine.\n \"\"\"\n if dtype is None:\n dtype = torch.float # force float 32\n else:\n if not isinstance(dtype, torch.dtype):\n dtype = torch_dtype(dtype)\n\n shape = tuple(shape)\n if not isshape(shape):\n raise ValueError(f\"invalid shape {(shape,)} (must be 2-d)\")\n\n self.dtype = dtype\n self.shape = torch.Size(shape)\n self.device = torch_device(device)\n\n def size(self, dim=None):\n \"\"\" Return the size of this LinearOperator.\n This is a synonym for ``shape``.\n \"\"\"\n return self.shape if dim is None else self.shape[dim]\n\n def _matmat(self, V):\n \"\"\" Default matrix-matrix multiplication handler.\n Falls back on the user-defined _matvec method, so defining that will\n define matrix multiplication (though in a very suboptimal way).\n \"\"\"\n return torch.hstack([self.matvec(col.reshape(-1, 1)) for col in V.T])\n\n def _matvec(self, v):\n \"\"\" Default matrix-vector multiplication handler.\n If self is a linear operator of shape (M, N), then this method will\n be called on a shape (N,) or (N, 1) ndarray, and should return a\n shape (M,) or (M, 1) ndarray.\n This default implementation falls back on _matmat, so defining that\n will define matrix-vector multiplication as well.\n \"\"\"\n return self.matmat(v.reshape(-1, 1))\n\n def matvec(self, v):\n \"\"\" Matrix-vector multiplication.\n Performs the operation y=A*v where A is an MxN linear\n operator and v is a column vector or 1-d array.\n Parameters\n ----------\n v : {matrix, ndarray}\n An array with shape (N,) or (N,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (M,) or (M,1) depending\n on the type and shape of the x argument.\n Notes\n -----\n This matvec wraps the user-specified matvec routine or overridden\n _matvec method to ensure that y has the correct shape and type.\n \"\"\"\n M, N = self.shape\n if v.shape != (N,) and v.shape != (N, 1):\n raise ValueError('dimension mismatch')\n\n y = self._matvec(v)\n\n if v.ndim == 1:\n y = y.reshape(M)\n elif v.ndim == 2:\n y = y.reshape(M, 1)\n else:\n raise ValueError('invalid shape returned by user-defined matvec()')\n\n return y\n\n def rmatvec(self, v):\n \"\"\" Adjoint matrix-vector multiplication.\n Performs the operation y = A^H * v where A is an MxN linear\n operator and v is a column vector or 1-d array.\n Parameters\n ----------\n v : {matrix, ndarray}\n An array with shape (M,) or (M,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (N,) or (N,1) depending\n on the type and shape of the v argument.\n Notes\n -----\n This rmatvec wraps the user-specified rmatvec routine or overridden\n _rmatvec method to ensure that y has the correct shape and type.\n \"\"\"\n M, N = self.shape\n\n if v.shape != (M,) and v.shape != (M, 1):\n raise ValueError('dimension mismatch')\n\n y = self._rmatvec(v)\n\n if v.ndim == 1:\n y = y.reshape(N)\n elif v.ndim == 2:\n y = y.reshape(N, 1)\n else:\n raise ValueError('invalid shape returned by user-defined rmatvec()')\n\n return y\n\n def _rmatvec(self, v):\n \"\"\" Default implementation of _rmatvec; defers to adjoint. \"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n # _adjoint not overridden, prevent infinite recursion\n raise NotImplementedError\n return self.H().matvec(v)\n\n def matmat(self, V):\n \"\"\" Matrix-matrix multiplication.\n Performs the operation y=A*V where A is an MxN linear\n operator and V dense N*K matrix or ndarray.\n Parameters\n ----------\n V : {matrix, ndarray}\n An array with shape (N,K).\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or ndarray with shape (M,K) depending on\n the type of the V argument.\n Notes\n -----\n This matmat wraps any user-specified matmat routine or overridden\n _matmat method to ensure that y has the correct type.\n \"\"\"\n if V.ndim != 2:\n raise ValueError(f'expected 2-d ndarray or matrix, not {V.ndim}-d')\n\n if V.size(0) != self.size(1):\n raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')\n\n Y = self._matmat(V)\n return Y\n\n def rmatmat(self, V):\n \"\"\" Adjoint matrix-matrix multiplication.\n Performs the operation y = A^H * V where A is an MxN linear\n operator and V is a column vector or 1-d array, or 2-d array.\n The default implementation defers to the adjoint.\n Parameters\n ----------\n V : {matrix, ndarray}\n A matrix or 2D array.\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or 2D array depending on the type of the input.\n Notes\n -----\n This rmatmat wraps the user-specified rmatmat routine.\n \"\"\"\n if V.ndim != 2:\n raise ValueError(f'expected 2-d matrix, not {V.ndim}-d')\n\n if V.size(0) != self.size(0):\n raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')\n\n Y = self._rmatmat(V)\n return Y\n\n def _rmatmat(self, V):\n \"\"\" Default implementation of _rmatmat defers to rmatvec or adjoint. \"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n return torch.hstack([self.rmatvec(col.reshape(-1, 1)) for col in V.T])\n return self.H().matmat(V)\n\n def forward(self, v):\n \"\"\" Matrix-vector or matrix-matrix multiplication. \"\"\"\n return self*v\n\n def __mul__(self, v):\n return self.dot(v)\n\n def dot(self, v):\n \"\"\" Matrix-matrix or matrix-vector multiplication.\n Parameters\n ----------\n v : array_like\n 1-d or 2-d array, representing a vector or matrix.\n Returns\n -------\n Av : array\n 1-d or 2-d array (depending on the shape of x) that represents\n the result of applying this linear operator on x.\n \"\"\"\n if isinstance(v, LinearOperator):\n return _ProductLinearOperator(self, v)\n if torch.is_tensor(v):\n if v.ndim == 0:\n return _ScaledLinearOperator(self, v)\n if v.ndim == 1 or v.ndim == 2 and v.size(1) == 1:\n return self.matvec(v)\n if v.ndim == 2:\n return self.matmat(v)\n raise ValueError(f'expected 1-d or 2-d array or matrix, got {v}')\n\n def __matmul__(self, other):\n if isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, use '*' instead\")\n return self.__mul__(other)\n\n def __rmatmul__(self, other):\n if isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, use '*' instead\")\n return self.__rmul__(other)\n\n def __rmul__(self, x):\n if isscalar(x):\n return _ScaledLinearOperator(self, x)\n return NotImplemented\n\n def __pow__(self, p):\n if isscalar(p):\n return _PowerLinearOperator(self, p)\n return NotImplemented\n\n def __add__(self, x):\n if isinstance(x, LinearOperator):\n return _SumLinearOperator(self, x)\n if torch.is_tensor(x) and x.ndim == 2:\n return _SumLinearOperator(self, Lazy(x))\n return NotImplemented\n\n def __radd__(self, x):\n return self.__add__(x)\n\n def __neg__(self):\n return _ScaledLinearOperator(self, -1)\n\n def __sub__(self, x):\n return self.__add__(-x)\n\n def __repr__(self):\n M, N = self.shape\n if self.dtype is None:\n dtype = 'unspecified dtype'\n else:\n dtype = 'dtype=' + str(self.dtype)\n\n return f'<{M}x{N} {self.__class__.__name__} with {dtype}>'\n\n def adjoint(self):\n \"\"\" Hermitian adjoint.\n Returns the Hermitian adjoint of self, aka the Hermitian\n conjugate or Hermitian transpose. For a complex matrix, the\n Hermitian adjoint is equal to the conjugate transpose.\n Can be abbreviated self.H instead of self.adjoint().\n Returns\n -------\n A_H : LinearOperator\n Hermitian adjoint of self.\n \"\"\"\n return self._adjoint()\n\n def H(self):\n \"\"\" Hermitian adjoint. \"\"\"\n return self.adjoint()\n\n def transpose(self):\n \"\"\" Transpose this linear operator.\n Returns a LinearOperator that represents the transpose of this one.\n Can be abbreviated self.T instead of self.transpose().\n \"\"\"\n return self._transpose()\n\n def t(self):\n \"\"\" Transpose this linear operator. \"\"\"\n return self.transpose()\n\n def _adjoint(self):\n \"\"\" Default implementation of _adjoint; defers to rmatvec. \"\"\"\n return _AdjointLinearOperator(self)\n\n def _transpose(self):\n \"\"\" Default implementation of _transpose; defers to rmatvec + conj\"\"\"\n return _TransposedLinearOperator(self)\n\n def invt(self):\n \"\"\" Default implementation of inverse transpose; defers to inv + T \"\"\"\n return (self ** -1).transpose()\n\n def to_dense(self):\n \"\"\" Default implementation of to_dense which produces the dense\n matrix corresponding to the given lazy matrix. Defaults to\n multiplying by the identity \"\"\"\n return [email protected](self.size(-1), device=self.device)\n\n def to(self, device):\n \"\"\" Move this linear operator to a new device. \"\"\"\n self.device = torch.empty(0).to(device).device\n return self"
},
{
"identifier": "ConcatLazy",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py",
"snippet": "class ConcatLazy(LinearOperator):\n \"\"\" Produces a linear operator equivalent to concatenating\n a collection of matrices Ms along axis=0 \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n assert all(M.size(0) == Ms[0].size(0) for M in Ms),\\\n f\"Trying to concatenate matrices of different sizes {[M.shape for M in Ms]}\"\n shape = (sum(M.size(0) for M in Ms), Ms[0].size(1))\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matmat(self, V):\n return torch.cat([M@V for M in self.Ms])\n\n def _rmatmat(self, V):\n Vs = torch.chunk(V, len(self.Ms))\n return sum(Mi.t()@Vi for Mi, Vi in zip(self.Ms, Vs))\n\n def to_dense(self):\n dense_Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return torch.cat(dense_Ms)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self"
},
{
"identifier": "I",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py",
"snippet": "class I(LinearOperator):\n \"\"\" Identity operator. \"\"\"\n\n def __init__(self, d, device=None):\n super().__init__()\n shape = (d, d)\n self.init(None, shape, device)\n\n def _matmat(self, V): # (c,k)\n return V\n\n def _matvec(self, v):\n return v\n\n def _adjoint(self):\n return self\n\n def invt(self):\n return self"
},
{
"identifier": "lazify",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py",
"snippet": "def lazify(x):\n \"\"\" Convert a tensor LinearOperator. \"\"\"\n if isinstance(x, LinearOperator):\n return x\n if torch.is_tensor(x):\n return Lazy(x)\n raise NotImplementedError"
},
{
"identifier": "densify",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py",
"snippet": "def densify(x):\n \"\"\" Convert a LinearOperator to a dense tensor. \"\"\"\n if isinstance(x, LinearOperator):\n return x.to_dense()\n if torch.is_tensor(x):\n return x\n raise NotImplementedError"
},
{
"identifier": "LazyJVP",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py",
"snippet": "class LazyJVP(LinearOperator):\n \"\"\" Lazy Jacobian-vector product. \"\"\"\n\n def __init__(self, operator_fn, X, TX):\n super().__init__()\n self.operator_fn = operator_fn\n self.X = X\n self.TX = TX\n self.init(torch.float, operator_fn(X).shape, X.device)\n self.to(self.device)\n\n def vjp(self, v):\n \"\"\" Computes the vector-Jacobian product \"\"\"\n return torch.autograd.functional.jvp(\n lambda x: self.operator_fn(x)@v, [self.X], [self.TX])[1]\n\n def vjp_T(self, v):\n \"\"\" Computes the vector-Jacobian product \"\"\"\n return torch.autograd.functional.jvp(\n lambda x: self.operator_fn(x).t()@v, [self.X], [self.TX])[1]\n\n def _matmat(self, V):\n return self.vjp(V)\n\n def _matvec(self, v):\n return self.vjp(v)\n\n def _rmatmat(self, V):\n return self.vjp_T(V)\n\n def to(self, device):\n self.X = self.X.to(device)\n self.TX = self.TX.to(device)\n self.device = self.X.device\n return self"
},
{
"identifier": "LazyPerm",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py",
"snippet": "class LazyPerm(LinearOperator):\n \"\"\" Lazy permutation. \"\"\"\n\n def __init__(self, perm):\n super().__init__()\n self.perm = perm\n shape = (len(perm), len(perm))\n self.init(None, shape, perm.device)\n\n def _matmat(self, V):\n return V[self.perm]\n\n def _matvec(self, v):\n return v[self.perm]\n\n def _adjoint(self):\n return LazyPerm(torch.argsort(self.perm))\n\n def invt(self):\n return self\n\n def to(self, device):\n self.perm = self.perm.to(device)\n self.device = self.perm.device\n return self"
},
{
"identifier": "LazyDirectSum",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py",
"snippet": "class LazyDirectSum(LinearOperator):\n \"\"\" Lazy direct sum. \"\"\"\n\n def __init__(self, Ms, multiplicities=None):\n super().__init__()\n self.Ms = Ms\n self.multiplicities = [1 for _ in Ms] if multiplicities is None else multiplicities\n shape = (sum(Mi.size(0)*c for Mi, c in zip(Ms, multiplicities)),\n sum(Mi.size(0)*c for Mi, c in zip(Ms, multiplicities)))\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return lazy_direct_matmat(v, self.Ms, self.multiplicities)\n\n def _matmat(self, V): # (n,k)\n return lazy_direct_matmat(V, self.Ms, self.multiplicities)\n\n def _adjoint(self):\n return LazyDirectSum([Mi.t() for Mi in self.Ms])\n\n def invt(self):\n return LazyDirectSum([M.invt() for M in self.Ms])\n\n def to_dense(self):\n Ms_all = [M for M, c in zip(self.Ms, self.multiplicities)\n for _ in range(c)]\n Ms_all = [Mi.to_dense() if isinstance(Mi, LinearOperator)\n else Mi for Mi in Ms_all]\n return torch.block_diag(*Ms_all)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self"
},
{
"identifier": "LazyKron",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py",
"snippet": "class LazyKron(LinearOperator):\n \"\"\" Lazy tensor product. \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n shape = product([Mi.size(0) for Mi in Ms]), product([Mi.size(1) for Mi in Ms])\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return self._matmat(v).reshape(-1)\n\n def _matmat(self, V):\n eV = V.reshape(*[Mi.size(-1) for Mi in self.Ms], -1)\n for i, M in enumerate(self.Ms):\n eV_front = torch.movedim(eV, i, 0)\n MeV_front = (M@eV_front.reshape(M.size(-1), -1)).reshape(M.size(0), *eV_front.shape[1:])\n eV = torch.movedim(MeV_front, 0, i)\n return eV.reshape(self.size(0), eV.size(-1))\n\n def _adjoint(self):\n return LazyKron([Mi.t() for Mi in self.Ms])\n\n def invt(self):\n return LazyKron([M.invt() for M in self.Ms])\n\n def to_dense(self):\n self.to(self.device)\n Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return reduce(torch.kron, Ms)\n\n def __new__(cls, Ms):\n if len(Ms) == 1:\n return Ms[0]\n return super().__new__(cls)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self"
},
{
"identifier": "LazyKronsum",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py",
"snippet": "class LazyKronsum(LinearOperator):\n \"\"\" Lazy tensor sum. \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n shape = product([Mi.size(0) for Mi in Ms]), product([Mi.size(1) for Mi in Ms])\n dtype = torch.float\n device = get_device(Ms)\n self.init(dtype, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return self._matmat(v).reshape(-1)\n\n def _matmat(self, V):\n eV = V.reshape(*[Mi.size(-1) for Mi in self.Ms], -1)\n out = 0*eV\n for i, M in enumerate(self.Ms):\n eV_front = torch.movedim(eV, i, 0)\n M, eV_front = dtype_cast(M, eV_front)\n MeV_front = (M@eV_front.reshape(M.size(-1), -1)).reshape(M.size(0), *eV_front.shape[1:])\n out, MeV_front = dtype_cast(out, MeV_front)\n out += torch.movedim(MeV_front, 0, i)\n return out.reshape(self.size(0), eV.size(-1))\n\n def _adjoint(self):\n return LazyKronsum([Mi.t() for Mi in self.Ms])\n\n def to_dense(self):\n Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return reduce(kronsum, Ms)\n\n def __new__(cls, Ms):\n if len(Ms) == 1:\n return Ms[0]\n return super().__new__(cls)\n\n # could also be implemented as follows,\n # but fusing the sum into a single linearOperator is faster\n # def lazy_kronsum(Ms):\n # n = len(Ms)\n # lprod = np.cumprod([1]+[mi.size(-1) for mi in Ms])\n # rprod = np.cumprod([1]+[mi.size(-1) for mi in reversed(Ms)])[::-1]\n # return reduce(lambda a,b: a+b,[lazy_kron([I(lprod[i]),Mi,I(rprod[i+1])])\n # for i,Mi in enumerate(Ms)])\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self"
},
{
"identifier": "lazy_direct_matmat",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py",
"snippet": "def lazy_direct_matmat(v, Ms, mults):\n \"\"\" Computes the matrix-vector product of a direct sum of matrices\n with a vector. \"\"\"\n k = v.size(1) if len(v.shape) > 1 else 1\n i = 0\n y = []\n for M, multiplicity in zip(Ms, mults):\n i_end = i+multiplicity*M.size(-1)\n elems = M@v[i:i_end][None].reshape(k*multiplicity, M.size(-1)).t()\n y.append(elems.t().reshape(k, multiplicity*M.size(0)).t())\n i = i_end\n y = torch.cat(y) # concatenate over rep axis\n return y"
},
{
"identifier": "product",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py",
"snippet": "def product(c):\n \"\"\" Product of a list of numbers. \"\"\"\n return reduce(lambda a, b: a*b, c)"
},
{
"identifier": "orthogonal_complement",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py",
"snippet": "def orthogonal_complement(proj):\n \"\"\" Computes the orthogonal complement to a given matrix proj\"\"\"\n _, S, Vh = torch.linalg.svd(proj, full_matrices=True)\n rank = (S > 1e-5).sum()\n return Vh[rank:].conj().t()"
},
{
"identifier": "krylov_constraint_solve",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py",
"snippet": "def krylov_constraint_solve(C, tol=1e-5):\n \"\"\" Computes the solution basis Q for the linear constraint CQ=0 and QᵀQ=I\n up to specified tolerance with C expressed as a LinearOperator. \"\"\"\n r = 5\n if C.size(0)*r*2 > 2e9:\n raise RuntimeError(f\"Solns for contraints {C.shape} too large to fit in memory\")\n found_rank = 5\n while found_rank == r:\n r *= 2 # Iterative doubling of rank until large enough to include the full solution space\n if C.size(0)*r > 2e9:\n logging.error(\"Hit memory limits, switching to \"\n \"sample equivariant subspace of size %r\", found_rank)\n break\n Q = krylov_constraint_solve_upto_r(C, r, tol)\n found_rank = Q.size(-1)\n return Q"
},
{
"identifier": "get_device",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py",
"snippet": "def get_device(operators, devices=None):\n \"\"\" Returns the device of the first operator that has a device attribute. \"\"\"\n if devices is None:\n devices = []\n for obj in operators:\n if obj is not None and hasattr(obj, 'device') and obj.device.type != 'cpu':\n return obj.device\n return torch.device('cpu')"
}
] | import math
import logging
import itertools
import torch
from functools import lru_cache as cache, reduce
from collections import defaultdict
from plum import dispatch
from torch import nn
from ..groups import Group
from .linear_operator_base import LinearOperator
from .linear_operators import ConcatLazy, I, lazify, densify, LazyJVP, LazyPerm, \
LazyDirectSum, LazyKron, LazyKronsum, lazy_direct_matmat, product
from .utils import orthogonal_complement, krylov_constraint_solve, get_device | 9,066 | """ The base Representation class. """
class Rep(nn.Module):
""" The base Representation class. Representation objects formalize the vector space V
on which the group acts, the group representation matrix ρ(g), and the Lie Algebra
representation dρ(A) in a single object. Representations act as types for vectors coming
from V. These types can be manipulated and transformed with the built in operators
⊕,⊗,dual, as well as incorporating custom representations. Representation objects should
be immutable.
At minimum, new representations need to implement ``rho``, ``__str__``."""
def __init__(self):
super().__init__()
self.is_permutation = False
self._size = None
self.G = None
def rho(self, M):
""" Group representation of the matrix M of shape (d,d)"""
raise NotImplementedError
def drho(self, A):
""" Lie Algebra representation of the matrix A of shape (d,d)"""
In = torch.eye(A.size(0), dtype=A.dtype, device=A.device)
| """ The base Representation class. """
class Rep(nn.Module):
""" The base Representation class. Representation objects formalize the vector space V
on which the group acts, the group representation matrix ρ(g), and the Lie Algebra
representation dρ(A) in a single object. Representations act as types for vectors coming
from V. These types can be manipulated and transformed with the built in operators
⊕,⊗,dual, as well as incorporating custom representations. Representation objects should
be immutable.
At minimum, new representations need to implement ``rho``, ``__str__``."""
def __init__(self):
super().__init__()
self.is_permutation = False
self._size = None
self.G = None
def rho(self, M):
""" Group representation of the matrix M of shape (d,d)"""
raise NotImplementedError
def drho(self, A):
""" Lie Algebra representation of the matrix A of shape (d,d)"""
In = torch.eye(A.size(0), dtype=A.dtype, device=A.device) | return LazyJVP(self.rho, In, A) | 6 | 2023-11-01 07:19:02+00:00 | 12k |
xenxxxx/BitPay-Crypto-Signal-Trading-Bot | tests/conftest.py | [
{
"identifier": "leverage_trade",
"path": "tests/conftest_trades.py",
"snippet": "def leverage_trade(fee):\n \"\"\"\n 5 hour short limit trade on kraken\n\n Short trade\n fee: 0.25% base\n interest_rate: 0.05% per day\n open_rate: 0.123 base\n close_rate: 0.128 base\n amount: 615 crypto\n stake_amount: 15.129 base\n borrowed: 60.516 base\n leverage: 5\n hours: 5\n interest: borrowed * interest_rate * ceil(1 + hours/4)\n = 60.516 * 0.0005 * ceil(1 + 5/4) = 0.090774 base\n open_value: (amount * open_rate) + (amount * open_rate * fee)\n = (615.0 * 0.123) + (615.0 * 0.123 * 0.0025)\n = 75.83411249999999\n\n close_value: (amount_closed * close_rate) - (amount_closed * close_rate * fee) - interest\n = (615.0 * 0.128) - (615.0 * 0.128 * 0.0025) - 0.090774\n = 78.432426\n total_profit = close_value - open_value\n = 78.432426 - 75.83411249999999\n = 2.5983135000000175\n total_profit_percentage = ((close_value/open_value)-1) * leverage\n = ((78.432426/75.83411249999999)-1) * 5\n = 0.1713156134055116\n \"\"\"\n trade = Trade(\n pair='DOGE/BTC',\n stake_amount=15.129,\n amount=615.0,\n leverage=5.0,\n amount_requested=615.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_rate=0.123,\n close_rate=0.128,\n close_profit=0.1713156134055116,\n close_profit_abs=2.5983135000000175,\n exchange='kraken',\n is_open=False,\n strategy='DefaultStrategy',\n timeframe=5,\n exit_reason='sell_signal',\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=300),\n close_date=datetime.now(tz=timezone.utc),\n interest_rate=0.0005\n )\n o = Order.parse_from_ccxt_object(leverage_order(), 'DOGE/BTC', 'sell')\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(leverage_order_sell(), 'DOGE/BTC', 'sell')\n trade.orders.append(o)\n return trade"
},
{
"identifier": "mock_trade_1",
"path": "tests/conftest_trades.py",
"snippet": "def mock_trade_1(fee, is_short: bool):\n trade = Trade(\n pair='ETH/BTC',\n stake_amount=0.001,\n amount=123.0,\n amount_requested=123.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n is_open=True,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=17),\n open_rate=0.123,\n exchange='binance',\n strategy='StrategyTestV3',\n timeframe=5,\n is_short=is_short\n )\n o = Order.parse_from_ccxt_object(mock_order_1(is_short), 'ETH/BTC', entry_side(is_short))\n trade.orders.append(o)\n return trade"
},
{
"identifier": "mock_trade_2",
"path": "tests/conftest_trades.py",
"snippet": "def mock_trade_2(fee, is_short: bool):\n \"\"\"\n Closed trade...\n \"\"\"\n trade = Trade(\n pair='ETC/BTC',\n stake_amount=0.001,\n amount=123.0,\n amount_requested=123.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_rate=0.123,\n close_rate=0.128,\n close_profit=-0.005 if is_short else 0.005,\n close_profit_abs=-0.005584127 if is_short else 0.000584127,\n exchange='binance',\n is_open=False,\n strategy='StrategyTestV3',\n timeframe=5,\n enter_tag='TEST1',\n exit_reason='sell_signal',\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20),\n close_date=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n is_short=is_short\n )\n o = Order.parse_from_ccxt_object(mock_order_2(is_short), 'ETC/BTC', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_2_sell(is_short), 'ETC/BTC', exit_side(is_short))\n trade.orders.append(o)\n return trade"
},
{
"identifier": "mock_trade_3",
"path": "tests/conftest_trades.py",
"snippet": "def mock_trade_3(fee, is_short: bool):\n \"\"\"\n Closed trade\n \"\"\"\n trade = Trade(\n pair='XRP/BTC',\n stake_amount=0.001,\n amount=123.0,\n amount_requested=123.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_rate=0.05,\n close_rate=0.06,\n close_profit=-0.01 if is_short else 0.01,\n close_profit_abs=-0.001155 if is_short else 0.000155,\n exchange='binance',\n is_open=False,\n strategy='StrategyTestV3',\n timeframe=5,\n exit_reason='roi',\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20),\n close_date=datetime.now(tz=timezone.utc),\n is_short=is_short\n )\n o = Order.parse_from_ccxt_object(mock_order_3(is_short), 'XRP/BTC', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_3_sell(is_short), 'XRP/BTC', exit_side(is_short))\n trade.orders.append(o)\n return trade"
},
{
"identifier": "mock_trade_4",
"path": "tests/conftest_trades.py",
"snippet": "def mock_trade_4(fee, is_short: bool):\n \"\"\"\n Simulate prod entry\n \"\"\"\n trade = Trade(\n pair='ETC/BTC',\n stake_amount=0.001,\n amount=123.0,\n amount_requested=124.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=14),\n is_open=True,\n open_rate=0.123,\n exchange='binance',\n strategy='StrategyTestV3',\n timeframe=5,\n is_short=is_short,\n stop_loss_pct=0.10\n )\n o = Order.parse_from_ccxt_object(mock_order_4(is_short), 'ETC/BTC', entry_side(is_short))\n trade.orders.append(o)\n return trade"
},
{
"identifier": "mock_trade_5",
"path": "tests/conftest_trades.py",
"snippet": "def mock_trade_5(fee, is_short: bool):\n \"\"\"\n Simulate prod entry with stoploss\n \"\"\"\n trade = Trade(\n pair='XRP/BTC',\n stake_amount=0.001,\n amount=123.0,\n amount_requested=124.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=12),\n is_open=True,\n open_rate=0.123,\n exchange='binance',\n strategy='SampleStrategy',\n enter_tag='TEST1',\n stoploss_order_id=f'prod_stoploss_{direc(is_short)}_3455',\n timeframe=5,\n is_short=is_short,\n stop_loss_pct=0.10,\n )\n o = Order.parse_from_ccxt_object(mock_order_5(is_short), 'XRP/BTC', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_5_stoploss(is_short), 'XRP/BTC', 'stoploss')\n trade.orders.append(o)\n return trade"
},
{
"identifier": "mock_trade_6",
"path": "tests/conftest_trades.py",
"snippet": "def mock_trade_6(fee, is_short: bool):\n \"\"\"\n Simulate prod entry with open exit order\n \"\"\"\n trade = Trade(\n pair='LTC/BTC',\n stake_amount=0.001,\n amount=2.0,\n amount_requested=2.0,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=5),\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n is_open=True,\n open_rate=0.15,\n exchange='binance',\n strategy='SampleStrategy',\n enter_tag='TEST2',\n timeframe=5,\n is_short=is_short\n )\n o = Order.parse_from_ccxt_object(mock_order_6(is_short), 'LTC/BTC', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_6_sell(is_short), 'LTC/BTC', exit_side(is_short))\n trade.orders.append(o)\n return trade"
},
{
"identifier": "short_trade",
"path": "tests/conftest_trades.py",
"snippet": "def short_trade(fee):\n \"\"\"\n 10 minute short limit trade on binance\n\n Short trade\n fee: 0.25% base\n interest_rate: 0.05% per day\n open_rate: 0.123 base\n close_rate: 0.128 base\n amount: 123.0 crypto\n stake_amount: 15.129 base\n borrowed: 123.0 crypto\n time-periods: 10 minutes(rounds up to 1/24 time-period of 1 day)\n interest: borrowed * interest_rate * time-periods\n = 123.0 * 0.0005 * 1/24 = 0.0025625 crypto\n open_value: (amount * open_rate) - (amount * open_rate * fee)\n = (123 * 0.123) - (123 * 0.123 * 0.0025)\n = 15.091177499999999\n amount_closed: amount + interest = 123 + 0.0025625 = 123.0025625\n close_value: (amount_closed * close_rate) + (amount_closed * close_rate * fee)\n = (123.0025625 * 0.128) + (123.0025625 * 0.128 * 0.0025)\n = 15.78368882\n total_profit = open_value - close_value\n = 15.091177499999999 - 15.78368882\n = -0.6925113200000013\n total_profit_percentage = total_profit / stake_amount\n = -0.6925113200000013 / 15.129\n = -0.04577376693766946\n\n \"\"\"\n trade = Trade(\n pair='ETC/BTC',\n stake_amount=15.129,\n amount=123.0,\n amount_requested=123.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_rate=0.123,\n # close_rate=0.128,\n # close_profit=-0.04577376693766946,\n # close_profit_abs=-0.6925113200000013,\n exchange='binance',\n is_open=True,\n strategy='DefaultStrategy',\n timeframe=5,\n exit_reason='sell_signal',\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20),\n # close_date=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n is_short=True\n )\n o = Order.parse_from_ccxt_object(short_order(), 'ETC/BTC', 'sell')\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(exit_short_order(), 'ETC/BTC', 'sell')\n trade.orders.append(o)\n return trade"
},
{
"identifier": "mock_trade_usdt_1",
"path": "tests/conftest_trades_usdt.py",
"snippet": "def mock_trade_usdt_1(fee, is_short: bool):\n \"\"\"\n Simulate prod entry with open sell order\n \"\"\"\n trade = Trade(\n pair='LTC/USDT',\n stake_amount=20.0,\n amount=2.0,\n amount_requested=2.0,\n open_date=datetime.now(tz=timezone.utc) - timedelta(days=2, minutes=20),\n close_date=datetime.now(tz=timezone.utc) - timedelta(days=2, minutes=5),\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n is_open=False,\n open_rate=10.0,\n close_rate=8.0,\n close_profit=-0.2,\n close_profit_abs=-4.09,\n exchange='binance',\n strategy='SampleStrategy',\n timeframe=5,\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_1(is_short), 'LTC/USDT', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_usdt_1_exit(is_short),\n 'LTC/USDT', exit_side(is_short))\n trade.orders.append(o)\n return trade"
},
{
"identifier": "mock_trade_usdt_2",
"path": "tests/conftest_trades_usdt.py",
"snippet": "def mock_trade_usdt_2(fee, is_short: bool):\n \"\"\"\n Closed trade...\n \"\"\"\n trade = Trade(\n pair='NEO/USDT',\n stake_amount=200.0,\n amount=100.0,\n amount_requested=100.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_rate=2.0,\n close_rate=2.05,\n close_profit=0.05,\n close_profit_abs=3.9875,\n exchange='binance',\n is_open=False,\n strategy='StrategyTestV2',\n timeframe=5,\n enter_tag='TEST1',\n exit_reason='exit_signal',\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20),\n close_date=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_2(is_short), 'NEO/USDT', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(\n mock_order_usdt_2_exit(is_short), 'NEO/USDT', exit_side(is_short))\n trade.orders.append(o)\n return trade"
},
{
"identifier": "mock_trade_usdt_3",
"path": "tests/conftest_trades_usdt.py",
"snippet": "def mock_trade_usdt_3(fee, is_short: bool):\n \"\"\"\n Closed trade\n \"\"\"\n trade = Trade(\n pair='XRP/USDT',\n stake_amount=30.0,\n amount=30.0,\n amount_requested=30.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_rate=1.0,\n close_rate=1.1,\n close_profit=0.1,\n close_profit_abs=2.8425,\n exchange='binance',\n is_open=False,\n strategy='StrategyTestV2',\n timeframe=5,\n enter_tag='TEST3',\n exit_reason='roi',\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20),\n close_date=datetime.now(tz=timezone.utc),\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_3(is_short), 'XRP/USDT', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_usdt_3_exit(is_short),\n 'XRP/USDT', exit_side(is_short))\n trade.orders.append(o)\n return trade"
},
{
"identifier": "mock_trade_usdt_4",
"path": "tests/conftest_trades_usdt.py",
"snippet": "def mock_trade_usdt_4(fee, is_short: bool):\n \"\"\"\n Simulate prod entry\n \"\"\"\n trade = Trade(\n pair='NEO/USDT',\n stake_amount=20.0,\n amount=10.0,\n amount_requested=10.01,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=14),\n is_open=True,\n open_rate=2.0,\n exchange='binance',\n strategy='StrategyTestV2',\n timeframe=5,\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_4(is_short), 'NEO/USDT', entry_side(is_short))\n trade.orders.append(o)\n return trade"
},
{
"identifier": "mock_trade_usdt_5",
"path": "tests/conftest_trades_usdt.py",
"snippet": "def mock_trade_usdt_5(fee, is_short: bool):\n \"\"\"\n Simulate prod entry with stoploss\n \"\"\"\n trade = Trade(\n pair='XRP/USDT',\n stake_amount=20.0,\n amount=10.0,\n amount_requested=10.01,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=12),\n is_open=True,\n open_rate=2.0,\n exchange='binance',\n strategy='SampleStrategy',\n stoploss_order_id=f'prod_stoploss_3455_{direc(is_short)}',\n timeframe=5,\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_5(is_short), 'XRP/USDT', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_usdt_5_stoploss(is_short), 'XRP/USDT', 'stoploss')\n trade.orders.append(o)\n return trade"
},
{
"identifier": "mock_trade_usdt_6",
"path": "tests/conftest_trades_usdt.py",
"snippet": "def mock_trade_usdt_6(fee, is_short: bool):\n \"\"\"\n Simulate prod entry with open sell order\n \"\"\"\n trade = Trade(\n pair='LTC/USDT',\n stake_amount=20.0,\n amount=2.0,\n amount_requested=2.0,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=5),\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n is_open=True,\n open_rate=10.0,\n exchange='binance',\n strategy='SampleStrategy',\n timeframe=5,\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_6(is_short), 'LTC/USDT', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_usdt_6_exit(is_short),\n 'LTC/USDT', exit_side(is_short))\n trade.orders.append(o)\n return trade"
},
{
"identifier": "mock_trade_usdt_7",
"path": "tests/conftest_trades_usdt.py",
"snippet": "def mock_trade_usdt_7(fee, is_short: bool):\n trade = Trade(\n pair='ADA/USDT',\n stake_amount=20.0,\n amount=10.0,\n amount_requested=10.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n is_open=True,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=17),\n open_rate=2.0,\n exchange='binance',\n strategy='StrategyTestV2',\n timeframe=5,\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_7(is_short), 'ADA/USDT', entry_side(is_short))\n trade.orders.append(o)\n return trade"
}
] | import json
import logging
import re
import numpy as np
import pandas as pd
import pytest
import builtins
from copy import deepcopy
from datetime import timedelta
from pathlib import Path
from typing import Optional
from unittest.mock import MagicMock, Mock, PropertyMock
from freqtrade import constants
from freqtrade.commands import Arguments
from freqtrade.data.converter import ohlcv_to_dataframe, trades_list_to_df
from freqtrade.edge import PairInfo
from freqtrade.enums import CandleType, MarginMode, RunMode, SignalDirection, TradingMode
from freqtrade.exchange import Exchange
from freqtrade.exchange.exchange import timeframe_to_minutes
from freqtrade.freqtradebot import FreqtradeBot
from freqtrade.persistence import LocalTrade, Order, Trade, init_db
from freqtrade.resolvers import ExchangeResolver
from freqtrade.util import dt_ts
from freqtrade.util.datetime_helpers import dt_now
from freqtrade.worker import Worker
from tests.conftest_trades import (leverage_trade, mock_trade_1, mock_trade_2, mock_trade_3,
mock_trade_4, mock_trade_5, mock_trade_6, short_trade)
from tests.conftest_trades_usdt import (mock_trade_usdt_1, mock_trade_usdt_2, mock_trade_usdt_3,
mock_trade_usdt_4, mock_trade_usdt_5, mock_trade_usdt_6,
mock_trade_usdt_7) | 7,390 | """
patch_freqtradebot(mocker, config)
return FreqtradeBot(config)
def get_patched_worker(mocker, config) -> Worker:
"""
This function patches _init_modules() to not call dependencies
:param mocker: a Mocker object to apply patches
:param config: Config to pass to the bot
:return: Worker
"""
patch_freqtradebot(mocker, config)
return Worker(args=None, config=config)
def patch_get_signal(
freqtrade: FreqtradeBot,
enter_long=True,
exit_long=False,
enter_short=False,
exit_short=False,
enter_tag: Optional[str] = None,
exit_tag: Optional[str] = None,
) -> None:
"""
:param mocker: mocker to patch IStrategy class
:return: None
"""
# returns (Signal-direction, signaname)
def patched_get_entry_signal(*args, **kwargs):
direction = None
if enter_long and not any([exit_long, enter_short]):
direction = SignalDirection.LONG
if enter_short and not any([exit_short, enter_long]):
direction = SignalDirection.SHORT
return direction, enter_tag
freqtrade.strategy.get_entry_signal = patched_get_entry_signal
def patched_get_exit_signal(pair, timeframe, dataframe, is_short):
if is_short:
return enter_short, exit_short, exit_tag
else:
return enter_long, exit_long, exit_tag
# returns (enter, exit)
freqtrade.strategy.get_exit_signal = patched_get_exit_signal
freqtrade.exchange.refresh_latest_ohlcv = lambda p: None
def create_mock_trades(fee, is_short: Optional[bool] = False, use_db: bool = True):
"""
Create some fake trades ...
:param is_short: Optional bool, None creates a mix of long and short trades.
"""
def add_trade(trade):
if use_db:
Trade.session.add(trade)
else:
LocalTrade.add_bt_trade(trade)
is_short1 = is_short if is_short is not None else True
is_short2 = is_short if is_short is not None else False
# Simulate dry_run entries
trade = mock_trade_1(fee, is_short1)
add_trade(trade)
trade = mock_trade_2(fee, is_short1)
add_trade(trade)
trade = mock_trade_3(fee, is_short2)
add_trade(trade)
trade = mock_trade_4(fee, is_short2)
add_trade(trade)
trade = mock_trade_5(fee, is_short2)
add_trade(trade)
trade = mock_trade_6(fee, is_short1)
add_trade(trade)
if use_db:
Trade.commit()
def create_mock_trades_with_leverage(fee, use_db: bool = True):
"""
Create some fake trades ...
"""
if use_db:
Trade.session.rollback()
def add_trade(trade):
if use_db:
Trade.session.add(trade)
else:
LocalTrade.add_bt_trade(trade)
# Simulate dry_run entries
trade = mock_trade_1(fee, False)
add_trade(trade)
trade = mock_trade_2(fee, False)
add_trade(trade)
trade = mock_trade_3(fee, False)
add_trade(trade)
trade = mock_trade_4(fee, False)
add_trade(trade)
trade = mock_trade_5(fee, False)
add_trade(trade)
trade = mock_trade_6(fee, False)
add_trade(trade)
| # pragma pylint: disable=missing-docstring
logging.getLogger('').setLevel(logging.INFO)
# Do not mask numpy errors as warnings that no one read, raise the exсeption
np.seterr(all='raise')
CURRENT_TEST_STRATEGY = 'StrategyTestV3'
TRADE_SIDES = ('long', 'short')
EXMS = 'freqtrade.exchange.exchange.Exchange'
def pytest_addoption(parser):
parser.addoption('--longrun', action='store_true', dest="longrun",
default=False, help="Enable long-run tests (ccxt compat)")
def pytest_configure(config):
config.addinivalue_line(
"markers", "longrun: mark test that is running slowly and should not be run regularily"
)
if not config.option.longrun:
setattr(config.option, 'markexpr', 'not longrun')
def log_has(line, logs):
"""Check if line is found on some caplog's message."""
return any(line == message for message in logs.messages)
def log_has_when(line, logs, when):
"""Check if line is found in caplog's messages during a specified stage"""
return any(line == message.message for message in logs.get_records(when))
def log_has_re(line, logs):
"""Check if line matches some caplog's message."""
return any(re.match(line, message) for message in logs.messages)
def num_log_has(line, logs):
"""Check how many times line is found in caplog's messages."""
return sum(line == message for message in logs.messages)
def num_log_has_re(line, logs):
"""Check how many times line matches caplog's messages."""
return sum(bool(re.match(line, message)) for message in logs.messages)
def get_args(args):
return Arguments(args).get_parsed_arg()
def generate_test_data(timeframe: str, size: int, start: str = '2020-07-05'):
np.random.seed(42)
tf_mins = timeframe_to_minutes(timeframe)
base = np.random.normal(20, 2, size=size)
date = pd.date_range(start, periods=size, freq=f'{tf_mins}min', tz='UTC')
df = pd.DataFrame({
'date': date,
'open': base,
'high': base + np.random.normal(2, 1, size=size),
'low': base - np.random.normal(2, 1, size=size),
'close': base + np.random.normal(0, 1, size=size),
'volume': np.random.normal(200, size=size)
}
)
df = df.dropna()
return df
def generate_test_data_raw(timeframe: str, size: int, start: str = '2020-07-05'):
""" Generates data in the ohlcv format used by ccxt """
df = generate_test_data(timeframe, size, start)
df['date'] = df.loc[:, 'date'].view(np.int64) // 1000 // 1000
return list(list(x) for x in zip(*(df[x].values.tolist() for x in df.columns)))
# Source: https://stackoverflow.com/questions/29881236/how-to-mock-asyncio-coroutines
# TODO: This should be replaced with AsyncMock once support for python 3.7 is dropped.
def get_mock_coro(return_value=None, side_effect=None):
async def mock_coro(*args, **kwargs):
if side_effect:
if isinstance(side_effect, list):
effect = side_effect.pop(0)
else:
effect = side_effect
if isinstance(effect, Exception):
raise effect
if callable(effect):
return effect(*args, **kwargs)
return effect
else:
return return_value
return Mock(wraps=mock_coro)
def patched_configuration_load_config_file(mocker, config) -> None:
mocker.patch(
'freqtrade.configuration.load_config.load_config_file',
lambda *args, **kwargs: config
)
def patch_exchange(
mocker,
api_mock=None,
id='binance',
mock_markets=True,
mock_supported_modes=True
) -> None:
mocker.patch(f'{EXMS}._load_async_markets', return_value={})
mocker.patch(f'{EXMS}.validate_config', MagicMock())
mocker.patch(f'{EXMS}.validate_timeframes', MagicMock())
mocker.patch(f'{EXMS}.id', PropertyMock(return_value=id))
mocker.patch(f'{EXMS}.name', PropertyMock(return_value=id.title()))
mocker.patch(f'{EXMS}.precisionMode', PropertyMock(return_value=2))
if mock_markets:
if isinstance(mock_markets, bool):
mock_markets = get_markets()
mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=mock_markets))
if mock_supported_modes:
mocker.patch(
f'freqtrade.exchange.{id}.{id.capitalize()}._supported_trading_mode_margin_pairs',
PropertyMock(return_value=[
(TradingMode.MARGIN, MarginMode.CROSS),
(TradingMode.MARGIN, MarginMode.ISOLATED),
(TradingMode.FUTURES, MarginMode.CROSS),
(TradingMode.FUTURES, MarginMode.ISOLATED)
])
)
if api_mock:
mocker.patch(f'{EXMS}._init_ccxt', return_value=api_mock)
else:
mocker.patch(f'{EXMS}._init_ccxt', MagicMock())
mocker.patch(f'{EXMS}.timeframes', PropertyMock(
return_value=['5m', '15m', '1h', '1d']))
def get_patched_exchange(mocker, config, api_mock=None, id='binance',
mock_markets=True, mock_supported_modes=True) -> Exchange:
patch_exchange(mocker, api_mock, id, mock_markets, mock_supported_modes)
config['exchange']['name'] = id
try:
exchange = ExchangeResolver.load_exchange(config, load_leverage_tiers=True)
except ImportError:
exchange = Exchange(config)
return exchange
def patch_wallet(mocker, free=999.9) -> None:
mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(
return_value=free
))
def patch_whitelist(mocker, conf) -> None:
mocker.patch('freqtrade.freqtradebot.FreqtradeBot._refresh_active_whitelist',
MagicMock(return_value=conf['exchange']['pair_whitelist']))
def patch_edge(mocker) -> None:
# "ETH/BTC",
# "LTC/BTC",
# "XRP/BTC",
# "NEO/BTC"
mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock(
return_value={
'NEO/BTC': PairInfo(-0.20, 0.66, 3.71, 0.50, 1.71, 10, 25),
'LTC/BTC': PairInfo(-0.21, 0.66, 3.71, 0.50, 1.71, 11, 20),
}
))
mocker.patch('freqtrade.edge.Edge.calculate', MagicMock(return_value=True))
# Functions for recurrent object patching
def patch_freqtradebot(mocker, config) -> None:
"""
This function patch _init_modules() to not call dependencies
:param mocker: a Mocker object to apply patches
:param config: Config to pass to the bot
:return: None
"""
mocker.patch('freqtrade.freqtradebot.RPCManager', MagicMock())
patch_exchange(mocker)
mocker.patch('freqtrade.freqtradebot.RPCManager._init', MagicMock())
mocker.patch('freqtrade.freqtradebot.RPCManager.send_msg', MagicMock())
patch_whitelist(mocker, config)
mocker.patch('freqtrade.freqtradebot.ExternalMessageConsumer')
mocker.patch('freqtrade.configuration.config_validation._validate_consumers')
def get_patched_freqtradebot(mocker, config) -> FreqtradeBot:
"""
This function patches _init_modules() to not call dependencies
:param mocker: a Mocker object to apply patches
:param config: Config to pass to the bot
:return: FreqtradeBot
"""
patch_freqtradebot(mocker, config)
return FreqtradeBot(config)
def get_patched_worker(mocker, config) -> Worker:
"""
This function patches _init_modules() to not call dependencies
:param mocker: a Mocker object to apply patches
:param config: Config to pass to the bot
:return: Worker
"""
patch_freqtradebot(mocker, config)
return Worker(args=None, config=config)
def patch_get_signal(
freqtrade: FreqtradeBot,
enter_long=True,
exit_long=False,
enter_short=False,
exit_short=False,
enter_tag: Optional[str] = None,
exit_tag: Optional[str] = None,
) -> None:
"""
:param mocker: mocker to patch IStrategy class
:return: None
"""
# returns (Signal-direction, signaname)
def patched_get_entry_signal(*args, **kwargs):
direction = None
if enter_long and not any([exit_long, enter_short]):
direction = SignalDirection.LONG
if enter_short and not any([exit_short, enter_long]):
direction = SignalDirection.SHORT
return direction, enter_tag
freqtrade.strategy.get_entry_signal = patched_get_entry_signal
def patched_get_exit_signal(pair, timeframe, dataframe, is_short):
if is_short:
return enter_short, exit_short, exit_tag
else:
return enter_long, exit_long, exit_tag
# returns (enter, exit)
freqtrade.strategy.get_exit_signal = patched_get_exit_signal
freqtrade.exchange.refresh_latest_ohlcv = lambda p: None
def create_mock_trades(fee, is_short: Optional[bool] = False, use_db: bool = True):
"""
Create some fake trades ...
:param is_short: Optional bool, None creates a mix of long and short trades.
"""
def add_trade(trade):
if use_db:
Trade.session.add(trade)
else:
LocalTrade.add_bt_trade(trade)
is_short1 = is_short if is_short is not None else True
is_short2 = is_short if is_short is not None else False
# Simulate dry_run entries
trade = mock_trade_1(fee, is_short1)
add_trade(trade)
trade = mock_trade_2(fee, is_short1)
add_trade(trade)
trade = mock_trade_3(fee, is_short2)
add_trade(trade)
trade = mock_trade_4(fee, is_short2)
add_trade(trade)
trade = mock_trade_5(fee, is_short2)
add_trade(trade)
trade = mock_trade_6(fee, is_short1)
add_trade(trade)
if use_db:
Trade.commit()
def create_mock_trades_with_leverage(fee, use_db: bool = True):
"""
Create some fake trades ...
"""
if use_db:
Trade.session.rollback()
def add_trade(trade):
if use_db:
Trade.session.add(trade)
else:
LocalTrade.add_bt_trade(trade)
# Simulate dry_run entries
trade = mock_trade_1(fee, False)
add_trade(trade)
trade = mock_trade_2(fee, False)
add_trade(trade)
trade = mock_trade_3(fee, False)
add_trade(trade)
trade = mock_trade_4(fee, False)
add_trade(trade)
trade = mock_trade_5(fee, False)
add_trade(trade)
trade = mock_trade_6(fee, False)
add_trade(trade)
| trade = short_trade(fee) | 7 | 2023-11-07 18:46:03+00:00 | 12k |
awslabs/optimizing-multitask-training-through-dynamic-pipelines | scripts/simulation/compare_batching_methods.py | [
{
"identifier": "ProfileBasedCostModelWithRC",
"path": "dynapipe/data_opt/cost_models.py",
"snippet": "class ProfileBasedCostModelWithRC(object):\n \"\"\"\n Wrapper class for multiple ProfileBasedCostModel objects, one for each\n tensor parallel degree and recomputation method.\n \"\"\"\n\n def __init__(\n self,\n profile_paths=None,\n _serialized_cms: Optional[Dict[Tuple[int, str], bytes]] = None,\n ) -> None:\n self.cost_models: dict[str, ProfileBasedCostModel] = {}\n if _serialized_cms is not None:\n for cm_key, serialized_cm in _serialized_cms.items():\n self.cost_models[cm_key] = ProfileBasedCostModel.deserialize(\n serialized_cm\n )\n return\n if not isinstance(profile_paths, list):\n # profile_paths is a dir\n assert os.path.isdir(profile_paths), (\n f\"Profile path {profile_paths} is not a directory \"\n \"or list of paths\"\n )\n profile_paths = [\n os.path.join(profile_paths, x)\n for x in os.listdir(profile_paths)\n if x.startswith(\"microbench\") and x.endswith(\"txt\")\n ]\n # separate paths by cost model key (tp_size, rc_type)\n self.per_key_profile_paths = defaultdict(list)\n for path in profile_paths:\n cm_key = self._parse_cm_key(path)\n self.per_key_profile_paths[cm_key].append(path)\n for cm_key, paths in self.per_key_profile_paths.items():\n self.cost_models[cm_key] = ProfileBasedCostModel(paths)\n\n def _parse_cm_key(self, filename):\n basename = os.path.basename(filename)\n if \"rc_full_uniform\" in basename:\n rc_type = \"full\"\n elif \"rc_selective\" in basename:\n rc_type = \"selective\"\n else:\n rc_type = \"none\"\n tp_size = int(basename.split(\"_\")[1][2:])\n return tp_size, rc_type\n\n def _check_valid_cm_key(self, cm_key):\n assert (\n cm_key in self.cost_models\n ), f\"Key {cm_key} not recorded in profile.\"\n\n def is_valid_stage(self, tp_size, rc_type, stage):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].is_valid_stage(stage)\n\n def valid_stages(self, tp_size, rc_type):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].valid_stages()\n\n def supported_sequence_lengths(self, tp_size, rc_type, stage):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].supported_sequence_lengths(\n stage\n )\n\n def get_cost(\n self,\n tp_size,\n rc_type,\n stage,\n seq_len,\n mbs,\n ):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the computation cost.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_cost(\n stage, seq_len, mbs\n )\n\n def get_stored_activation(self, tp_size, rc_type, stage, seq_len, mbs):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the stored activation.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_stored_activation(\n stage, seq_len, mbs\n )\n\n def get_peak_activation(self, tp_size, rc_type, stage, seq_len, mbs):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the peak activation.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_peak_activation(\n stage, seq_len, mbs\n )\n\n def get_model_state(\n self,\n tp_size,\n rc_type,\n stage,\n n_shards=1,\n zero_stage=0,\n param_factor=None,\n ):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the model state.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_model_state(\n stage,\n n_shards=n_shards,\n zero_stage=zero_stage,\n param_factor=param_factor,\n )\n\n def get_raw_cost_model(self, tp_size, rc_type):\n \"\"\"Get the raw cost model for the given TP degree and recomputation\n type.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)]\n\n def save(self, path):\n serialized_dict = {}\n for cm_key, cost_model in self.cost_models.items():\n serialized_dict[cm_key] = cost_model.serialize()\n with open(path, \"wb\") as f:\n pickle.dump(serialized_dict, f)\n\n @classmethod\n def load(cls, path):\n with open(path, \"rb\") as f:\n serialized_dict = pickle.load(f)\n return cls(_serialized_cms=serialized_dict)"
},
{
"identifier": "DataAssignmentOptimizer",
"path": "dynapipe/data_opt/optimizer.py",
"snippet": "class DataAssignmentOptimizer(object):\n \"\"\"Data assignment optimizer.\n\n Optimizes the assignment of a mini-batch of data into micro-batches.\n \"\"\"\n\n def __init__(\n self,\n cost_model: ProfileBasedCostModelWithRC,\n model_spec: TransformerModelSpec,\n n_executors: int,\n n_layers_per_stage: int,\n n_chunks_per_device: int = 1,\n dp_size: int = 1,\n tp_size: int = 1,\n zero_stage: int = 0,\n device_memory_limit: float = float(\"inf\"),\n round_seqlen_multiple=8,\n per_mb_memory_fraction=None,\n len_pack_sep_tokens=1,\n len_decoder_additional_tokens=2,\n seqlen_offset=0,\n ):\n \"\"\"Optimizer for assigning data samples into micro-batches.\n cost_model: cost model for the model used\n model_spec: model specification\n n_executors: number of stages of the pipelined model\n n_layers_per_stage: number of layers per each pipeline stage\n n_chunks_per_device: number of chunks per device\n (> 1 indicating interleaved schedule)\n dp_size: data parallelism degree\n tp_size: tensor parallelism degree\n zero_stage: stage of ZeRO optimizer\n device_memory_limit: memory limit in MB (MegaBytes)\n round_seqlen_multiple: always round sequence length to multiple of\n this number, required for some kernels\n default: 8\n len_pack_sep_tokens: number of tokens used to separate samples in the\n packed sequence, only used when enable_packing\n is True during optimization.\n len_decoder_additional_tokens: number of additional tokens added to\n the decoder sequence length other than\n the target sequence, e.g. <bos>, <eos>\n seqlen_offset: should be set 1 for decoder only models, whose input\n and target sequences are data sequence length - 1\n 0 for encoder-decoder models.\n \"\"\"\n self.cost_model = cost_model\n self.n_executors = n_executors\n self.n_layers_per_stage = n_layers_per_stage\n # create memory model\n self.model_spec = model_spec\n self.memory_limit = device_memory_limit\n self.dp_size = dp_size\n self.tp_size = tp_size\n self.zero_stage = zero_stage\n self.round_seqlen_multiple = round_seqlen_multiple\n self.len_pack_sep_tokens = len_pack_sep_tokens\n self.len_decoder_additional_tokens = len_decoder_additional_tokens\n self.n_chunks_per_device = n_chunks_per_device\n self.per_mb_memory_fraction = per_mb_memory_fraction\n self.seqlen_offset = seqlen_offset\n\n def _round_seqlen(self, seqlen, decoder=False):\n if decoder:\n seqlen += self.len_decoder_additional_tokens\n seqlen -= self.seqlen_offset\n return (\n (seqlen + self.round_seqlen_multiple - 1)\n // self.round_seqlen_multiple\n * self.round_seqlen_multiple\n + self.seqlen_offset\n )\n\n def _solve_sample_order_tsp_problem(\n self,\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n bottleneck_tsp=True,\n dist_function=\"sum\",\n use_clustering=True,\n distance_threshold=16,\n ):\n \"\"\"Solve the TSP problem to determine the sample order.\"\"\"\n if dist_function == \"sum\":\n\n def _f_dist(x, y):\n return abs(int(x[0]) - int(y[0])) + abs(int(x[1]) - int(y[1]))\n\n elif dist_function == \"max\":\n\n def _f_dist(x, y):\n return max(\n abs(int(x[0]) - int(y[0])), abs(int(x[1]) - int(y[1]))\n )\n\n elif dist_function == \"square\":\n\n def _f_dist(x, y):\n return (int(x[0]) - int(y[0])) ** 2 + (\n int(x[1]) - int(y[1])\n ) ** 2\n\n else:\n raise ValueError(\n \"Unknown distance function: {}\".format(dist_function)\n )\n\n def _get_distance_matrix(points):\n # add a dummy point at the beginning\n # to transform it into an open TSP problem\n distance_matrix = [[0] * (len(points) + 1)]\n for x in points:\n row = [0]\n for y in points:\n row.append(_f_dist(x, y))\n distance_matrix.append(row)\n return distance_matrix\n\n input_points = list(\n zip(sample_sequence_lengths, decoder_sample_sequence_lengths)\n )\n if use_clustering:\n vectors_np = np.array(input_points)\n clustering = AgglomerativeClustering(\n n_clusters=None,\n distance_threshold=distance_threshold,\n linkage=\"complete\",\n ).fit(vectors_np)\n labels = clustering.labels_\n n_clusters = max(labels) + 1\n cluster_to_samples = [[] for _ in range(n_clusters)]\n cluster_to_data = [[] for _ in range(n_clusters)]\n for sample_idx, label in enumerate(labels):\n cluster_to_samples[label].append(sample_idx)\n cluster_to_data[label].append(input_points[sample_idx])\n # compute cluster centroids\n cluster_to_center = [None] * n_clusters\n for cluster_label, data in enumerate(cluster_to_data):\n cluster_to_center[cluster_label] = tuple(np.mean(data, axis=0))\n # compute tsp for cluster centroids\n distance_matrix = np.array(_get_distance_matrix(cluster_to_center))\n permutation = list(\n np.array(\n elkai.solve_int_matrix(\n distance_matrix, 1, bottleneck=bottleneck_tsp\n )\n )\n - 1\n )[1:]\n # reconstruct orig order\n result = []\n for cluster_label in permutation:\n result += cluster_to_samples[cluster_label]\n # sanity check result is a valid permutation\n assert sorted(result) == list(range(len(result)))\n return result\n\n distance_matrix = np.array(_get_distance_matrix(input_points))\n permutation = list(\n np.array(\n elkai.solve_int_matrix(\n distance_matrix, 1, bottleneck=bottleneck_tsp\n )\n )\n - 1\n )[1:]\n return permutation\n\n def _pack(\n self,\n sequence: list,\n current_enc_length,\n current_dec_length,\n target_enc_length,\n target_dec_length,\n next_idx,\n samples_with_ids,\n consumed,\n ):\n for j in range(next_idx, len(samples_with_ids)):\n if consumed[j]:\n continue\n (\n seqlen_to_pack,\n dec_seqlen_to_pack,\n sample_id_to_pack,\n ) = samples_with_ids[j]\n if (\n current_enc_length + seqlen_to_pack <= target_enc_length\n and current_dec_length + dec_seqlen_to_pack\n <= target_dec_length\n ):\n sequence.append(sample_id_to_pack)\n current_enc_length += seqlen_to_pack\n current_dec_length += dec_seqlen_to_pack\n consumed[j] = True\n return current_enc_length, current_dec_length\n\n def _uniform_partition(self, samples_with_ids, microbatch_size):\n max_sequence_length = max([x[0] for x in samples_with_ids])\n max_decoder_sequence_length = max([x[1] for x in samples_with_ids])\n\n # round sequence length to multiple of round_seqlen_multiple\n max_sequence_length = self._round_seqlen(max_sequence_length)\n max_decoder_sequence_length = self._round_seqlen(\n max_decoder_sequence_length, decoder=True\n )\n # pack all sequences into fixed sequence length\n target_src_seqlen = max_sequence_length\n target_tgt_seqlen = (\n max_decoder_sequence_length - self.len_decoder_additional_tokens\n )\n consumed = [False] * len(samples_with_ids)\n sequences = []\n for seqlen, dec_seqlen, idx in samples_with_ids:\n if consumed[idx]:\n continue\n curr_sequence = []\n curr_sequence_seqlen = seqlen\n curr_sequence_dec_seqlen = dec_seqlen\n curr_sequence.append(idx)\n curr_sequence_seqlen, curr_sequence_dec_seqlen = self._pack(\n curr_sequence,\n curr_sequence_seqlen,\n curr_sequence_dec_seqlen,\n target_src_seqlen,\n target_tgt_seqlen,\n idx + 1,\n samples_with_ids,\n consumed,\n )\n sequences.append(curr_sequence)\n consumed[idx] = True\n # divide sequences into microbatches\n microbatches = []\n for i in range(0, len(sequences), microbatch_size):\n microbatches.append(sequences[i : i + microbatch_size])\n return microbatches\n\n def _token_based_partition(self, samples_with_ids, microbatch_tokens):\n microbatches = []\n current_microbatch_tokens = 0\n current_microbatch = []\n for seqlen, dec_seqlen, idx in samples_with_ids:\n rounded_seqlen = self._round_seqlen(seqlen)\n rounded_dec_seqlen = self._round_seqlen(dec_seqlen, decoder=True)\n if (\n current_microbatch_tokens + rounded_seqlen + rounded_dec_seqlen\n > microbatch_tokens\n ):\n if len(current_microbatch) > 0:\n microbatches.append(current_microbatch.copy())\n current_microbatch = []\n current_microbatch_tokens = 0\n current_microbatch.append([idx])\n current_microbatch_tokens += seqlen + dec_seqlen\n if len(current_microbatch) > 0:\n microbatches.append(current_microbatch)\n return microbatches\n\n def _subset_partition(self, micro_batch_costs):\n # partition the microbatches into subsets\n # create a mapping from microbatch index to its cost\n mb_cost_map = {}\n for i, mb in enumerate(micro_batch_costs):\n mb_cost_map[i] = mb\n return prtpy.partition(\n algorithm=prtpy.partitioning.kk,\n numbins=self.dp_size,\n items=mb_cost_map,\n )\n\n def generate_microbatches(\n self,\n sample_sequence_lengths,\n available_rc_types=None,\n decoder_sample_sequence_lengths=None,\n disable_tsp=False,\n bottleneck_tsp=False,\n tsp_dist_function=\"sum\",\n tsp_use_clustering=True,\n tsp_cluster_distance_threshold=16,\n partition_method=\"dp\",\n uniform_partition_batch_size=None,\n token_based_partition_mb_tokens=None,\n enable_packing=False,\n ):\n if available_rc_types is None:\n available_rc_types = [\"none\", \"selective\", \"full\"]\n if (\n self.n_chunks_per_device > 1\n and decoder_sample_sequence_lengths is None\n ):\n raise ValueError(\n \"Interleaved schedule with non-encoder-decoder models \"\n \"are not supported yet.\"\n )\n # stage 1: determine the order of samples\n if decoder_sample_sequence_lengths is None:\n samples_with_ids = [\n (seqlen, 0, i)\n for i, seqlen in enumerate(sample_sequence_lengths)\n ]\n # single sequence, sorting suffices\n samples_with_ids.sort(reverse=True)\n else:\n if partition_method == \"uniform\":\n assert uniform_partition_batch_size is not None, (\n \"uniform_partition_batch_size must be specified \"\n \"when partition_method is 'uniform'\"\n )\n # uniform partitioning, don't need to solve TSP\n samples_with_ids = [\n (seqlen, dec_seqlen, i)\n for i, (seqlen, dec_seqlen) in enumerate(\n zip(\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n )\n )\n ]\n else:\n # multiple sequences, use TSP or 2 level sorting\n # to find the optimal order\n if disable_tsp:\n samples_with_ids = [\n (seqlen, dec_seqlen, i)\n for i, (seqlen, dec_seqlen) in enumerate(\n zip(\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n )\n )\n ]\n # sort first by encoder sequence length, then by decoder\n samples_with_ids.sort(reverse=True)\n else:\n permutation = self._solve_sample_order_tsp_problem(\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n bottleneck_tsp=bottleneck_tsp,\n dist_function=tsp_dist_function,\n use_clustering=tsp_use_clustering,\n distance_threshold=tsp_cluster_distance_threshold,\n )\n samples_with_ids = [\n (\n sample_sequence_lengths[i],\n decoder_sample_sequence_lengths[i],\n int(i),\n )\n for i in permutation\n ]\n # stage 2: splitting and packing\n # we first calculate the model states memory and subtract it\n # from the memory limit\n # We assume that GPU0 is the bottleneck GPU, which holds Embedding\n # and Encoder of the model if not interleaved, and holds Embedding,\n # Encoder and Decoder of the model if interleaved.\n # rc_type doesn't matter here\n model_states_memory = self.cost_model.get_model_state(\n self.tp_size,\n \"none\",\n \"Embedding\",\n n_shards=self.dp_size,\n zero_stage=self.zero_stage,\n )\n encoder_model_state = self.cost_model.get_model_state(\n self.tp_size,\n \"none\",\n \"Encoder\",\n n_shards=self.dp_size,\n zero_stage=self.zero_stage,\n )\n if decoder_sample_sequence_lengths is not None:\n decoder_model_state = self.cost_model.get_model_state(\n self.tp_size,\n \"none\",\n \"Decoder\",\n n_shards=self.dp_size,\n zero_stage=self.zero_stage,\n )\n else:\n decoder_model_state = 0\n if self.n_chunks_per_device == 1:\n # not interleaved\n layer_states = max(encoder_model_state, decoder_model_state)\n else:\n # interleaved\n layer_states = encoder_model_state + decoder_model_state\n layer_states = layer_states * self.n_chunks_per_device / 2\n layer_states *= self.n_layers_per_stage\n model_states_memory += layer_states\n available_memory = self.memory_limit - model_states_memory\n\n if (\n self.per_mb_memory_fraction is not None\n and self.per_mb_memory_fraction > 0\n ):\n preferred_memory_limit = (\n self.per_mb_memory_fraction * available_memory\n )\n else:\n preferred_memory_limit = available_memory / self.n_executors\n for memory_type, memory_limit in [\n (\"preferred\", preferred_memory_limit),\n (\"available\", available_memory),\n ]:\n # first try to find a partition that do not need special schedule\n # if not found, only make sure that each single microbatch\n # fits in memory\n for rc_type in available_rc_types:\n if partition_method == \"dp\":\n # use dynamic programming to find optimal\n # sequential partition\n (\n objective_value,\n microbatches,\n microbatch_costs,\n ) = cpp_consecutive_partition_dp(\n self.cost_model.get_raw_cost_model(\n self.tp_size, rc_type\n ),\n self.n_executors,\n self.n_chunks_per_device,\n self.n_layers_per_stage,\n self.dp_size,\n memory_limit,\n available_memory,\n samples_with_ids,\n enable_packing=enable_packing,\n round_seqlen_multiple=self.round_seqlen_multiple,\n len_pack_sep_tokens=self.len_pack_sep_tokens,\n len_decoder_additional_tokens=self.len_decoder_additional_tokens, # noqa\n )\n elif partition_method == \"token_based\":\n assert token_based_partition_mb_tokens is not None, (\n \"token_based_partition_mb_tokens must be specified \"\n \"when partition_method is 'token_based'\"\n )\n # token based partitioning\n microbatches = self._token_based_partition(\n samples_with_ids, token_based_partition_mb_tokens\n )\n # dummy objective value, not used\n objective_value = (\n 0,\n 0,\n 0,\n [0] * len(microbatches),\n [0] * len(microbatches),\n )\n # dummy microbatch costs\n microbatch_costs = [0] * len(microbatches)\n elif partition_method == \"uniform\":\n microbatches = self._uniform_partition(\n samples_with_ids, uniform_partition_batch_size\n )\n # dummy objective value, not used\n objective_value = (\n 0,\n 0,\n 0,\n [0] * len(microbatches),\n [0] * len(microbatches),\n )\n # dummy microbatch costs\n microbatch_costs = [0] * len(microbatches)\n else:\n raise ValueError(\n \"unknown partition method: {}\".format(partition_method)\n )\n if math.isinf(objective_value[0]) or math.isnan(\n objective_value[0]\n ):\n # memory limit is too small\n continue\n # sanity check microbatches:\n # make sure that each index appears once and only once\n all_indices = set()\n for mb in microbatches:\n for sample in mb:\n for index in sample:\n assert (\n index not in all_indices\n ), \"index {} appears more than once\".format(index)\n all_indices.add(index)\n assert sorted(list(all_indices)) == list(\n range(len(samples_with_ids))\n ), (\n \"not all indices appear in microbatches: \"\n \"{} v.s. {}. Input seqlens: {}, target seqlens: {}\".format(\n len(all_indices),\n len(samples_with_ids),\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n )\n )\n # partition microbatches into subsets, each for one data\n # parallel group\n if self.dp_size > 1:\n partitioned_microbatch_ids = self._subset_partition(\n microbatch_costs\n )\n partitioned_microbatches = []\n for mb_ids in partitioned_microbatch_ids:\n partitioned_microbatches.append(\n [microbatches[i] for i in sorted(mb_ids)]\n )\n else:\n partitioned_microbatches = [microbatches]\n return (\n objective_value,\n partitioned_microbatches,\n memory_type,\n rc_type,\n (available_memory, model_states_memory, memory_limit),\n )\n # no feasible microbatch split found\n return None, None, None, None, None"
},
{
"identifier": "TransformerModelSpec",
"path": "dynapipe/model.py",
"snippet": "class TransformerModelSpec:\n # Default setting:\n # * mlp_hidden_size = 4x hidden_dim\n # * kv_channels = hidden_dim // num_attn_heads\n # * use FP16 mixed precision training with Adam optimizer.\n n_encoder_layers: int\n n_decoder_layers: int\n hidden_dim: int\n num_attn_heads: int\n mlp_hidden_dim: Union[None, int] = None\n kv_channels: Union[None, int] = None\n bytes_per_element: int = 2\n optimizer_state_multiplier: int = 12\n\n def __post_init__(self):\n if self.mlp_hidden_dim is None:\n # if not specified, use the 4x hidden dim as it is the norm\n self.mlp_hidden_dim = self.hidden_dim * 4\n if self.kv_channels is None:\n # if not specified, use the hidden_dim // num_attn_heads\n assert self.hidden_dim % self.num_attn_heads == 0\n self.kv_channels = self.hidden_dim // self.num_attn_heads\n\n def serialize(self) -> bytes:\n def _serialize_int(x: int):\n return x.to_bytes(4, \"little\")\n\n return b\"\".join(\n [\n _serialize_int(x)\n for x in [\n self.n_encoder_layers,\n self.n_decoder_layers,\n self.hidden_dim,\n self.num_attn_heads,\n self.mlp_hidden_dim,\n self.kv_channels,\n self.bytes_per_element,\n self.optimizer_state_multiplier,\n ]\n ]\n )\n\n @classmethod\n def deserialize(cls, data: bytes):\n def _deserialize_int(data: bytes):\n return int.from_bytes(data, \"little\")\n\n return cls(\n *[_deserialize_int(data[i * 4 : (i + 1) * 4]) for i in range(8)]\n )"
}
] | import argparse
import math
import jsonlines
import numpy as np
import pickle
from multiprocessing import Pool
from typing import Optional
from tqdm import tqdm
from dynapipe.data_opt.cost_models import ProfileBasedCostModelWithRC
from dynapipe.data_opt.optimizer import DataAssignmentOptimizer
from dynapipe.model import TransformerModelSpec | 8,779 | global_batch,
method,
model,
mbs=None,
dataopt: Optional[DataAssignmentOptimizer] = None,
):
if method == "none":
# no micro-batching, directly pad to max sequence length
batch_np = np.array(global_batch)
max_input_seqlen = np.max(batch_np[:, 0])
max_target_seqlen = np.max(batch_np[:, 1])
mbs = len(global_batch)
return [(mbs, max_input_seqlen, max_target_seqlen)], "none"
elif method == "dynamic":
enc_seqlens = [x[0] for x in global_batch]
dec_seqlens = [x[1] for x in global_batch]
out = dataopt.generate_microbatches(
enc_seqlens,
decoder_sample_sequence_lengths=dec_seqlens
if model == "t5"
else None,
bottleneck_tsp=False,
partition_method="dp",
enable_packing=False,
tsp_dist_function="sum",
)
if out[0] is None:
with open("dataopt.pkl", "wb") as f:
pickle.dump(dataopt, f)
with open("global_batch.pkl", "wb") as f:
pickle.dump(global_batch, f)
return None, None
(
objective_values,
microbatches,
memory_type,
rc_type,
(avail_mem, model_state, per_mb_memory_limit),
) = out
micro_batch_shapes = []
assert len(microbatches) == 1
for microbatch in microbatches[0]:
mbs = len(microbatch)
max_enc_seqlen = 0
max_dec_seqlen = 0
for sequence in microbatch:
assert len(sequence) == 1
enc_seqlen = enc_seqlens[sequence[0]]
dec_seqlen = dec_seqlens[sequence[0]]
max_enc_seqlen = max(max_enc_seqlen, enc_seqlen)
max_dec_seqlen = max(max_dec_seqlen, dec_seqlen)
micro_batch_shapes.append((mbs, max_enc_seqlen, max_dec_seqlen))
return micro_batch_shapes, rc_type
elif method in ["fixed_mbs", "packing"]:
assert mbs is not None
microbatches = []
sorted_batch = sorted(global_batch, reverse=True)
for i in range(0, len(sorted_batch), mbs):
batch_np = np.array(sorted_batch[i : i + mbs])
max_input_seqlen = np.max(batch_np[:, 0])
max_target_seqlen = np.max(batch_np[:, 1])
microbatches.append(
(len(batch_np), max_input_seqlen, max_target_seqlen)
)
return microbatches, "none"
elif method == "fixed_tokens":
assert mbs is not None
enc_seqlens = [x[0] for x in global_batch]
dec_seqlens = [x[1] for x in global_batch]
out = dataopt.generate_microbatches(
enc_seqlens,
decoder_sample_sequence_lengths=dec_seqlens
if args.model == "t5"
else None,
bottleneck_tsp=False,
partition_method="token_based",
enable_packing=False,
token_based_partition_mb_tokens=mbs,
tsp_dist_function="sum",
)
(
objective_values,
microbatches,
memory_type,
rc_type,
(avail_mem, model_state, per_mb_memory_limit),
) = out
if out[0] is None:
return None, None
micro_batch_shapes = []
assert len(microbatches) == 1
for microbatch in microbatches[0]:
mbs = len(microbatch)
max_enc_seqlen = 0
max_dec_seqlen = 0
for sequence in microbatch:
assert len(sequence) == 1
enc_seqlen = enc_seqlens[sequence[0]]
dec_seqlen = dec_seqlens[sequence[0]]
max_enc_seqlen = max(max_enc_seqlen, enc_seqlen)
max_dec_seqlen = max(max_dec_seqlen, dec_seqlen)
micro_batch_shapes.append((mbs, max_enc_seqlen, max_dec_seqlen))
return micro_batch_shapes, rc_type
else:
raise ValueError(
"Unsupported micro-batching method: {}".format(method)
)
def count_tokens(microbatches):
total_tokens = 0
for mbs, enc_seqlen, dec_seqlen in microbatches:
total_tokens += mbs * (enc_seqlen + dec_seqlen)
return total_tokens
def get_execution_time_and_memory(
microbatches,
rc_type,
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
def parse_args():
parser = argparse.ArgumentParser("Compare batching methods")
parser.add_argument(
"-t",
"--method",
type=str,
choices=["none", "packing", "dynamic", "fixed_mbs", "fixed_tokens"],
required=True,
help="Micro-batching method to use.",
)
parser.add_argument(
"-s",
"--max-seqlen-range",
type=str,
default="2048",
help="Range of maximum sequence length to simulate. "
"Format as comma separated list of integers.",
)
parser.add_argument(
"-di",
"--input-dataset",
type=str,
required=True,
help="Path to a Megatron-LM processed indexfile, "
"which records the sequence length of samples in npy "
"format. For input sequences.",
)
parser.add_argument(
"-dt",
"--target-dataset",
type=str,
required=True,
help="Dataset path for target sequences.",
)
parser.add_argument(
"-c",
"--cost-model",
type=str,
required=True,
help="Path to a cost model file, needed for dynamic " " batching.",
)
parser.add_argument(
"-m",
"--model",
type=str,
required=True,
choices=["gpt", "t5"],
help="Model to use.",
)
parser.add_argument(
"-g",
"--global-batch-size",
type=int,
default=65536,
help="Global batch size.",
)
parser.add_argument(
"-o",
"--output",
type=str,
default="compare_batching_methods.jsonl",
help="Output file.",
)
parser.add_argument(
"-ml",
"--mem-limit",
type=float,
default=float("inf"),
help="Memory limit for the data assignment optimizer.",
)
parser.add_argument(
"-ppr",
"--pp-degree-range",
type=str,
default="1",
help="Range of pipeline stages to simulate.",
)
parser.add_argument(
"-tpd",
"--tp-degree",
type=int,
default=1,
help="TP degree to simulate.",
)
parser.add_argument(
"-p",
"--num-processes",
type=int,
default=64,
help="Number of processes to use.",
)
args = parser.parse_args()
args.max_seqlen_range = [int(x) for x in args.max_seqlen_range.split(",")]
args.pp_degree_range = [int(x) for x in args.pp_degree_range.split(",")]
return args
def get_powers_of_2_up_to(n):
return [2**i for i in range(math.floor(math.log2(n)) + 1)]
def get_candidate_mbs(maxn=512):
return get_powers_of_2_up_to(maxn)
def get_candidate_tokens(maxn=65536):
return [x for x in get_powers_of_2_up_to(maxn) if x >= 32]
def get_sequence_lengths(dataset_path, max_seqlen):
"""Get the sequence lengths from a Megatron-LM processed dataset."""
with open(dataset_path, "rb") as f:
dataset = np.load(f)
# dataset contains 3 columns: [start_id, end_id, sequence_length]
# we only need the sequence length
return np.clip(dataset[:, 2], 1, max_seqlen).astype(np.int32)[:100000]
def get_global_batches(input_seqlens, target_seqlens, gbs=65536):
"""Get the number of global batches for a given global batch size."""
global_batches = []
current_batch = []
current_batch_size = 0
for input_seqlen, target_seqlen in zip(input_seqlens, target_seqlens):
if current_batch_size + input_seqlen + target_seqlen > gbs:
global_batches.append(current_batch.copy())
current_batch = []
current_batch_size = 0
current_batch.append((input_seqlen, target_seqlen))
current_batch_size += input_seqlen + target_seqlen
if current_batch:
global_batches.append(current_batch.copy())
return global_batches
def get_model_spec(pp_degree, model="gpt"):
if model == "gpt":
return TransformerModelSpec(4 * pp_degree, 0, 4096, 32, 16384, 128)
elif model == "t5":
return TransformerModelSpec(
2 * pp_degree, 2 * pp_degree, 1024, 128, 65536, 128
)
else:
raise ValueError("Unsupported model: {}".format(model))
def get_dataopt(
pp_degree, cost_model, model="gpt", memlimit=float("inf"), tp_degree=1
):
num_stages = pp_degree
model_spec = get_model_spec(pp_degree, model)
zero_stage = 0
n_layers_per_stage = 4
dp_size = 1
dataopt = DataAssignmentOptimizer(
cost_model,
model_spec,
num_stages,
n_layers_per_stage,
n_chunks_per_device=1,
dp_size=dp_size,
tp_size=tp_degree,
zero_stage=zero_stage,
device_memory_limit=memlimit,
seqlen_offset=1 if model == "gpt" else 0,
)
return dataopt
def pack_sequences(enc_seqlens, dec_seqlens, max_seqlen, model):
current_enc_seq_len = 0
current_dec_seq_len = 0
packed_enc_seqlens = []
packed_dec_seqlens = []
for enc_seqlen, dec_seqlen in zip(enc_seqlens, dec_seqlens):
if (
current_enc_seq_len + enc_seqlen > max_seqlen
or current_dec_seq_len + dec_seqlen > max_seqlen
):
packed_enc_seqlens.append(max_seqlen)
if model == "gpt":
packed_dec_seqlens.append(0)
else:
packed_dec_seqlens.append(max_seqlen)
current_enc_seq_len = 0
current_dec_seq_len = 0
current_enc_seq_len += enc_seqlen
current_dec_seq_len += dec_seqlen
if current_enc_seq_len > 0:
packed_enc_seqlens.append(max_seqlen)
if model == "gpt":
packed_dec_seqlens.append(0)
else:
packed_dec_seqlens.append(max_seqlen)
return packed_enc_seqlens, packed_dec_seqlens
def get_microbatches(
global_batch,
method,
model,
mbs=None,
dataopt: Optional[DataAssignmentOptimizer] = None,
):
if method == "none":
# no micro-batching, directly pad to max sequence length
batch_np = np.array(global_batch)
max_input_seqlen = np.max(batch_np[:, 0])
max_target_seqlen = np.max(batch_np[:, 1])
mbs = len(global_batch)
return [(mbs, max_input_seqlen, max_target_seqlen)], "none"
elif method == "dynamic":
enc_seqlens = [x[0] for x in global_batch]
dec_seqlens = [x[1] for x in global_batch]
out = dataopt.generate_microbatches(
enc_seqlens,
decoder_sample_sequence_lengths=dec_seqlens
if model == "t5"
else None,
bottleneck_tsp=False,
partition_method="dp",
enable_packing=False,
tsp_dist_function="sum",
)
if out[0] is None:
with open("dataopt.pkl", "wb") as f:
pickle.dump(dataopt, f)
with open("global_batch.pkl", "wb") as f:
pickle.dump(global_batch, f)
return None, None
(
objective_values,
microbatches,
memory_type,
rc_type,
(avail_mem, model_state, per_mb_memory_limit),
) = out
micro_batch_shapes = []
assert len(microbatches) == 1
for microbatch in microbatches[0]:
mbs = len(microbatch)
max_enc_seqlen = 0
max_dec_seqlen = 0
for sequence in microbatch:
assert len(sequence) == 1
enc_seqlen = enc_seqlens[sequence[0]]
dec_seqlen = dec_seqlens[sequence[0]]
max_enc_seqlen = max(max_enc_seqlen, enc_seqlen)
max_dec_seqlen = max(max_dec_seqlen, dec_seqlen)
micro_batch_shapes.append((mbs, max_enc_seqlen, max_dec_seqlen))
return micro_batch_shapes, rc_type
elif method in ["fixed_mbs", "packing"]:
assert mbs is not None
microbatches = []
sorted_batch = sorted(global_batch, reverse=True)
for i in range(0, len(sorted_batch), mbs):
batch_np = np.array(sorted_batch[i : i + mbs])
max_input_seqlen = np.max(batch_np[:, 0])
max_target_seqlen = np.max(batch_np[:, 1])
microbatches.append(
(len(batch_np), max_input_seqlen, max_target_seqlen)
)
return microbatches, "none"
elif method == "fixed_tokens":
assert mbs is not None
enc_seqlens = [x[0] for x in global_batch]
dec_seqlens = [x[1] for x in global_batch]
out = dataopt.generate_microbatches(
enc_seqlens,
decoder_sample_sequence_lengths=dec_seqlens
if args.model == "t5"
else None,
bottleneck_tsp=False,
partition_method="token_based",
enable_packing=False,
token_based_partition_mb_tokens=mbs,
tsp_dist_function="sum",
)
(
objective_values,
microbatches,
memory_type,
rc_type,
(avail_mem, model_state, per_mb_memory_limit),
) = out
if out[0] is None:
return None, None
micro_batch_shapes = []
assert len(microbatches) == 1
for microbatch in microbatches[0]:
mbs = len(microbatch)
max_enc_seqlen = 0
max_dec_seqlen = 0
for sequence in microbatch:
assert len(sequence) == 1
enc_seqlen = enc_seqlens[sequence[0]]
dec_seqlen = dec_seqlens[sequence[0]]
max_enc_seqlen = max(max_enc_seqlen, enc_seqlen)
max_dec_seqlen = max(max_dec_seqlen, dec_seqlen)
micro_batch_shapes.append((mbs, max_enc_seqlen, max_dec_seqlen))
return micro_batch_shapes, rc_type
else:
raise ValueError(
"Unsupported micro-batching method: {}".format(method)
)
def count_tokens(microbatches):
total_tokens = 0
for mbs, enc_seqlen, dec_seqlen in microbatches:
total_tokens += mbs * (enc_seqlen + dec_seqlen)
return total_tokens
def get_execution_time_and_memory(
microbatches,
rc_type, | cost_model: ProfileBasedCostModelWithRC, | 0 | 2023-11-08 07:58:20+00:00 | 12k |
apple/ml-reed | reed/data/preference_dataset.py | [
{
"identifier": "TrajectoryReplayBuffer",
"path": "BPref/replay_buffer.py",
"snippet": "class TrajectoryReplayBuffer:\n \"\"\"\n Buffer to store trajectories of environment transitions. Unlike ReplayBuffer, which stores all transitions in a\n flat manner, transitions are sorted by trajectory. Each trajectory corresponds to an episode.\n \"\"\"\n _RELABEL_BATCH_SIZE = 256\n\n def __init__(self, capacity: int, device: torch.device, window: int = 1, num_envs: t.Optional[int] = None,\n image_observations: t.Optional[t.Union[int, np.ndarray]] = None):\n \"\"\"\n Args:\n capacity: the number of trajectories to hold in memory\n device: the device sampled transitions should be put on\n window: no idea - part of the original code and is used in add_batch(...) which has not yet been refactored\n num_envs: the number of environment instances used to train the policy. Only needs to be specified when the\n number is >1. Some algorithms train on multiple instances of an environment at once, e.g. PPO.\n Not currently used, but not yet removed because we have not tested with an algorithm that needs\n multiple environment instances.\n image_observations: (default = false) whether to collect image observations in addition to state\n observations. This is helpful to use when the policy is trained on the state, but you\n want to visualize the trajectories or the reward model is trained on images.\n\n \"\"\"\n self.capacity = capacity\n self.device = device\n\n self.observations: t.Optional[np.ndarray] = None\n self.actions: t.Optional[np.ndarray] = None\n self.rewards: t.Optional[np.ndarray] = None\n self.not_dones: t.Optional[np.ndarray] = None\n self.not_dones_no_max: t.Optional[np.ndarray] = None\n self.trajectory_lengths: t.List = []\n self.window = window\n self.env_rewards: t.Optional[np.ndarray] = None\n self.image_observations: t.Optional[np.ndarray] = None\n # track whether to collect image observations - when not None, specifies the dimensions of the images\n self._collect_image_observations = image_observations\n\n # track the trajectories as a list of Trajectory\n self.trajectories: t.List[Trajectory] = []\n\n self.idx = 0\n self.last_save = 0\n self.full = False\n\n def __len__(self):\n return np.sum(self.trajectory_lengths) - len(self.trajectory_lengths)\n\n def __getitem__(self, flat_indx: t.Union[int, t.Tuple[int, int], t.List[int]]) -> TRANSITION:\n \"\"\"\n Get the transition at the given index\n\n Args:\n flat_indx: the index assuming transitions are stored flat instead of nested in trajectories\n - when an integer is specified, a single transition is retrieved\n - when a tuple of integers is given, a slice is retrieved as if the transitions are stored flat\n\n Returns:\n current observation\n action\n reward\n next observation\n whether the episode ended\n whether the episode ended without reaching max steps\n image version of current observation (optional)\n \"\"\"\n if isinstance(flat_indx, int) or isinstance(flat_indx, np.int64):\n traj_indx, trans_indx = self._flat_indx_to_trajectory_index(flat_indx)\n # check we are grabbing from a trajectory currently being accumulated\n # When the done signal is given, the current trajectory being accumulated is converted to a trajectory,\n # is added to the list of trajectories, and the values used to accumulate the next trajectory are set to\n # done. The next trajectory is not started until the call to add(...) after the done signal is received.\n # Therefore, we need to check whether the trajectory to pull from is actually the last completed trajectory\n # prior to starting a new trajectory. This is why we compare the length of the lists containing trajectory\n # lengths and the list containing the trajectories.\n if (traj_indx == len(self.trajectory_lengths) - 1\n and len(self.trajectory_lengths) > len(self.trajectories)):\n # we need to grab from the trajectory currently being populated\n return (self.observations[trans_indx].astype(np.float32), self.actions[trans_indx].astype(np.float32),\n self.rewards[trans_indx].astype(np.float32), self.observations[trans_indx + 1].astype(np.float32),\n self.not_dones[trans_indx].astype(np.float32),\n self.not_dones_no_max[trans_indx].astype(np.float32),\n (self.env_rewards[trans_indx].astype(np.float32)\n if self.env_rewards is not None\n else None),\n ((self.image_observations[trans_indx].astype(np.float32))\n if self.image_observations is not None\n else None),\n ((self.image_observations[trans_indx+1].astype(np.float32))\n if self.image_observations is not None\n else None))\n else:\n # grab from a previously completed trajectory\n transition: Transition = self.trajectories[traj_indx][trans_indx]\n return (transition.observation.astype(np.float32), transition.action.astype(np.float32),\n transition.reward.astype(np.float32), transition.next_observation.astype(np.float32),\n transition.not_done.astype(np.float32), transition.not_done_no_max.astype(np.float32),\n transition.env_reward.astype(np.float32),\n (transition.image_observation.astype(np.float32)\n if transition.image_observation is not None\n else None),\n (transition.next_image_observation.astype(np.float32)\n if transition.next_image_observation is not None\n else None))\n elif isinstance(flat_indx, t.List):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n not_dones = []\n not_dones_no_max = []\n env_rewards = []\n image_observations = []\n next_image_observations = []\n for indx in flat_indx:\n observation, action, reward, next_observation, not_done, not_done_no_max, env_reward, image_observation, next_image_observation = self[indx]\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n next_observations.append(next_observation)\n not_dones.append(not_done)\n not_dones_no_max.append(not_done_no_max)\n if env_reward is not None:\n env_rewards.append(env_reward)\n if image_observation is not None:\n image_observations.append(image_observation)\n if next_image_observation is not None:\n next_image_observations.append(next_image_observation)\n return (np.asarray(observations, dtype=np.float32), np.asarray(actions, dtype=np.float32),\n np.asarray(rewards, dtype=np.float32), np.asarray(next_observations, dtype=np.float32),\n np.asarray(not_dones, dtype=np.float32), np.asarray(not_dones_no_max, dtype=np.float32),\n (np.asarray(env_rewards, dtype=np.float32) if len(env_rewards) > 0 else None),\n (np.asarray(image_observations, dtype=np.float32) if self._collect_image_observations else None),\n (np.asarray(next_image_observations, dtype=np.float32) if self._collect_image_observations else None))\n else:\n # get the locations of the start and end transitions\n start_traj_indx, start_trans_indx = self._flat_indx_to_trajectory_index(flat_indx[0])\n end_traj_indx, end_trans_indx = self._flat_indx_to_trajectory_index(flat_indx[1])\n # check that we are not spanning trajectories\n if start_traj_indx == end_traj_indx:\n # grab the sub-trajectory\n sub_trajectory = self.trajectories[start_traj_indx][tuple((start_trans_indx, end_trans_indx))]\n else:\n # grab what remains of the trajectory\n end_trans_indx = len(self.trajectories[start_traj_indx]) - 1\n sub_trajectory = self.trajectories[start_traj_indx][tuple((start_trans_indx, end_trans_indx))]\n return (sub_trajectory.initial_observations,\n sub_trajectory.actions,\n sub_trajectory.rewards,\n sub_trajectory.next_observations,\n sub_trajectory.not_dones,\n sub_trajectory.not_dones_no_max,\n sub_trajectory.env_rewards,\n (sub_trajectory.initial_image_observations\n if sub_trajectory.initial_image_observations is not None\n else None),\n (sub_trajectory.next_image_observations\n if sub_trajectory.next_image_observations is not None\n else None))\n\n @property\n def trajectory_count(self) -> int:\n \"\"\"\n The number of trajectories in the buffer\n \"\"\"\n return len(self.trajectories)\n\n @property\n def all_not_dones(self) -> np.ndarray:\n \"\"\"\n Rewards from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.not_dones, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_rewards(self) -> np.ndarray:\n \"\"\"\n Rewards from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.rewards, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_environment_rewards(self) -> np.ndarray:\n \"\"\"\n Environment rewards from all trajectories and all transitions\n \"\"\"\n return np.concatenate([np.expand_dims(traj.rewards, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_initial_image_observations(self) -> np.ndarray:\n \"\"\"\n Image observations from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.initial_image_observations, axis=0)\n for traj in self.trajectories],\n axis=0)\n\n @property\n def all_next_image_observations(self) -> np.ndarray:\n \"\"\"\n Image observations from the state-action pairs from all trajectories and all transitions,\n\n The result of a transition\n \"\"\"\n return np.concatenate([np.expand_dims(traj.next_image_observations, axis=0)\n for traj in self.trajectories],\n axis=0)\n\n @property\n def all_initial_observations(self) -> np.ndarray:\n \"\"\"\n observations from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.initial_observations, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_next_observations(self) -> np.ndarray:\n \"\"\"\n Observations from the state-action pairs from all trajectories and all transitions\n\n The result of a transition\n \"\"\"\n return np.concatenate([np.expand_dims(traj.next_observations, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_actions(self) -> np.ndarray:\n \"\"\"\n Actions from the state-action pairs from all trajectories and all transitions\n \"\"\"\n return np.concatenate([np.expand_dims(traj.actions, axis=0) for traj in self.trajectories], axis=0)\n\n def _flat_indx_to_trajectory_index(self, flat_indx: int) -> t.Tuple[int, int]:\n \"\"\"\n Converts an index that assumes the transitions are flat to a trajectory and transition (w/in trajectory) index\n\n Args:\n flat_indx: the index assuming transitions are stored flat\n\n Returns:\n the index of the trajectory containing the transition\n the index of the transition within the trajectory\n \"\"\"\n # need to figure out which transition indices are stored in which trajectories\n transition_cumulative_sum = np.cumsum(self.trajectory_lengths)\n # the trajectory containing the transition is at the first index where the cumulative sum of transitions is\n # less than the transition index\n target_trajectory_indx = int(np.argmax(flat_indx < transition_cumulative_sum))\n # get the transition's index within the trajectory as the different between the flat index and the cumulative\n # sum at the previous trajectory - tells us how far into the target trajectory the transition is\n if target_trajectory_indx == 0:\n transition_trajectory_indx = flat_indx\n else:\n transition_trajectory_indx = flat_indx - transition_cumulative_sum[target_trajectory_indx - 1]\n return target_trajectory_indx, transition_trajectory_indx\n\n def _add_transition(self, observation: np.ndarray, action: np.ndarray, reward: float, done: t.Union[float, bool],\n done_no_max: t.Union[float, bool],\n env_reward: t.Optional[float] = None, image_observations: t.Optional[np.ndarray] = None):\n \"\"\"\n Track the transition and update the length of the trajectory currently being accumulated\n\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observations: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n \"\"\"\n self.observations = np.concatenate([self.observations, np.expand_dims(observation, axis=0)], axis=0)\n self.actions = np.concatenate([self.actions, np.expand_dims(action, axis=0)], axis=0)\n self.rewards = np.concatenate([self.rewards, np.asarray(reward).reshape(1, 1)], axis=0)\n if type(done) is float:\n self.not_dones = np.concatenate([self.not_dones,\n np.asarray(not done, dtype=np.float32).reshape(1, 1)], axis=0)\n self.not_dones_no_max = np.concatenate([self.not_dones_no_max,\n np.asarray(not done_no_max, dtype=np.float32).reshape(1, 1)],\n axis=0)\n else:\n self.not_dones = np.concatenate([self.not_dones,\n np.asarray(~done, dtype=np.float32).reshape(1, 1)], axis=0)\n self.not_dones_no_max = np.concatenate([self.not_dones_no_max,\n np.asarray(~done_no_max, dtype=np.float32).reshape(1, 1)],\n axis=0)\n\n self.trajectory_lengths[-1] += 1\n if env_reward is not None:\n self.env_rewards = np.concatenate([self.env_rewards,\n np.asarray(env_reward, dtype=np.float32).reshape(1, 1)], axis=0)\n\n if image_observations is not None and self._collect_image_observations:\n self.image_observations = np.concatenate([self.image_observations, np.expand_dims(image_observations, axis=0)], axis=0)\n\n def _start_trajectory(self, observation: np.ndarray,\n action: np.ndarray,\n reward: float,\n done: t.Union[float, bool],\n done_no_max: t.Union[float, bool],\n env_reward: t.Optional[float] = None,\n image_observations: t.Optional[np.ndarray] = None):\n \"\"\"\n Start a new trajectory and track the transition\n\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observations: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n \"\"\"\n self.observations = np.expand_dims(observation, axis=0).astype(dtype=np.float32)\n self.actions = np.expand_dims(action, axis=0).astype(dtype=np.float32)\n self.rewards = np.asarray(reward, dtype=np.float32).reshape(1, 1)\n if type(done) is float:\n self.not_dones = np.asarray(not done, dtype=np.float32).reshape(1, 1)\n self.not_dones_no_max = np.asarray(not done_no_max, dtype=np.float32).reshape(1, 1)\n else:\n self.not_dones = np.asarray(~done, dtype=np.float32).reshape(1, 1)\n self.not_dones_no_max = np.asarray(~done_no_max, dtype=np.float32).reshape(1, 1)\n\n self.trajectory_lengths.append(1)\n\n if env_reward is not None:\n self.env_rewards = np.asarray(env_reward, dtype=np.float32).reshape(1, 1)\n\n if image_observations is not None and self._collect_image_observations:\n self.image_observations = np.expand_dims(image_observations, axis=0).astype(dtype=np.float32)\n\n def add(self, observation, action, reward, next_observation, done, done_no_max,\n env_reward: t.Optional[float] = None, image_observation: t.Optional[np.ndarray] = None,\n image_next_observation: t.Optional[np.ndarray] = None):\n \"\"\"\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n next_observation: only used when an episode is completed to ensure the last observation is captured\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observation: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n image_next_observation: (optional) the image-based next observation -> should not be given when next_observation is also\n and image. This should be used when you want to accumulate the images separately from the\n trained policy.\n \"\"\"\n if self.observations is None:\n self._start_trajectory(observation, action, reward, done, done_no_max, env_reward, image_observation)\n elif done:\n self._add_transition(observation, action, reward, done, done_no_max, env_reward, image_observation)\n # the episode has ended, so we need to track the next observation\n self.observations = np.concatenate([self.observations, np.expand_dims(next_observation, axis=0)], axis=0)\n if image_next_observation is not None:\n self.image_observations = np.concatenate([self.image_observations,\n np.expand_dims(image_next_observation, axis=0)], axis=0)\n # create the trajectory\n self.trajectories.append(Trajectory(self.observations.astype(dtype=np.float32),\n (self.image_observations.astype(dtype=np.float32)\n if self.image_observations is not None\n else None),\n actions=self.actions.astype(dtype=np.float32),\n rewards=self.rewards.astype(dtype=np.float32),\n not_dones=self.not_dones.astype(dtype=np.float32),\n not_dones_no_max=self.not_dones_no_max.astype(dtype=np.float32),\n env_rewards=self.env_rewards.astype(dtype=np.float32)))\n # check if the inclusion of the just completed trajectory puts the buffer at capacity\n # if it does, remove the first trajectory as this is a FIFO buffer\n if np.sum(self.trajectory_lengths) >= self.capacity:\n self.trajectories = self.trajectories[1:]\n self.trajectory_lengths = self.trajectory_lengths[1:]\n self.observations = None\n self.actions = None\n self.rewards = None\n self.not_dones = None\n self.not_dones_no_max = None\n self.env_rewards = None\n self.image_observations = None\n else:\n self._add_transition(observation, action, reward, done, done_no_max, env_reward, image_observation)\n\n self.idx = (self.idx + 1) % self.capacity\n self.full = self.full or self.idx == 0\n\n def relabel_with_predictor(self, predictor, state_action_formatter: PreProcessInference):\n \"\"\"\n Relabel the rewards stored in the replay buffer using the given predictor\n\n Args:\n predictor: network that will consume state-action pairs and assign a reward\n state_action_formatter: formats the states and actions for consumption by the reward model\n \"\"\"\n print(\"Relabelling the replay buffer with the updated reward model.\")\n for trajectory in self.trajectories:\n # the number of batches to run through the model\n total_iter = int(len(trajectory) / self._RELABEL_BATCH_SIZE)\n # handle the case where we have more transitions than is evenly divisible by the batch size\n if len(trajectory) > self._RELABEL_BATCH_SIZE * total_iter:\n total_iter += 1\n # collect and process each batch to be passed through predictor\n for index in range(total_iter):\n start_indx = index * self._RELABEL_BATCH_SIZE\n # make sure we don't have an end index that is after the end of the trajectory\n end_indx = min((index + 1) * self._RELABEL_BATCH_SIZE, len(trajectory))\n\n # pull out the actions from the transitions that will be relabelled\n actions = trajectory.actions[start_indx:end_indx]\n # we need to handle the case where the reward model operates off of images\n if predictor.image_observations:\n observations = trajectory.all_image_observations[start_indx:end_indx]\n else:\n observations = trajectory.all_observations[start_indx:end_indx]\n formatted_state_action = state_action_formatter.format_state_action(observations, actions, batch_sa=True)\n pred_reward = predictor.r_hat_batch(formatted_state_action)\n # update the rewards assigned to the transitions\n trajectory.rewards[start_indx:end_indx] = pred_reward\n\n def sample(self, batch_size: int):\n indxs = list(np.random.randint(0, np.sum(self.trajectory_lengths) - 1, size=batch_size))\n observations, actions, rewards, next_observations, not_dones, not_dones_no_max, env_rewards, image_observations, next_image_observations = self[indxs]\n observations = torch.as_tensor(observations, device=self.device).float()\n actions = torch.as_tensor(actions, device=self.device)\n rewards = torch.as_tensor(rewards, device=self.device)\n next_observations = torch.as_tensor(next_observations, device=self.device).float()\n not_dones = torch.as_tensor(not_dones, device=self.device)\n not_dones_no_max = torch.as_tensor(not_dones_no_max, device=self.device)\n env_rewards = torch.as_tensor(env_rewards, device=self.device)\n image_observations = (torch.as_tensor(image_observations, device=self.device).float() if self._collect_image_observations else None)\n next_image_observations = (torch.as_tensor(next_image_observations, device=self.device).float() if self._collect_image_observations else None)\n return observations, actions, rewards, next_observations, not_dones, not_dones_no_max, env_rewards, image_observations, next_image_observations\n\n def sample_state_ent(self, batch_size: int):\n observations, actions, rewards, next_observations, not_dones, not_dones_no_max, _, _, _ = self.sample(batch_size)\n full_observation = torch.as_tensor(np.concatenate([traj.all_observations for traj in self.trajectories], axis=0),\n device=self.device)\n return observations, full_observation, actions, rewards, next_observations, not_dones, not_dones_no_max\n\n def save(self, out_directory: Path, env_id: str, step: int):\n \"\"\"\n Save the replay buffer to disk as a npz archive\n Args:\n out_directory: location where replay buffer will be saved\n env_id: the environment within which the data was generated\n step: the number of policy training steps taken to produce this dataset\n \"\"\"\n # create the ZipFile object\n zip_obj = ZipFile(out_directory / f\"{env_id}_replay_buffer_{step}.zip\", \"w\")\n\n # write each trajectory file to disk and to the zip archive\n for traj_id, trajectory in enumerate(self.trajectories):\n trajectory.save(out_directory / f\"{traj_id}.npz\")\n zip_obj.write(out_directory / f\"{traj_id}.npz\")\n # close the Zip File\n zip_obj.close()\n\n @staticmethod\n def from_directory(directory_path: Path,\n device: torch.device = 'cuda') -> \"TrajectoryReplayBuffer\":\n \"\"\"\n Create a TrajectoryReplay buffer from a directory of npz archive trajectories\n\n Args:\n directory_path: the location of the npz_archive on disk\n device: the device sampled transitions should be pushed to\n Returns:\n populated trajectory replay buffer\n \"\"\"\n # accumulate the trajectories\n trajectories = []\n trajectory_lengths = []\n # determine how many transitions are in the replay buffer\n capacity = 0\n # load each trajectory from disk\n for traj_filename in directory_path.iterdir():\n # we only load data from npz archives, so we need to skip anything else\n if not traj_filename.suffix == \".npz\": continue\n # load the trajectory from disk\n traj = Trajectory.from_npz(traj_filename)\n # track the trajectory\n trajectories.append(traj)\n # track the trajectory's length\n trajectory_lengths.append(len(traj))\n # track the trajectory's length\n capacity += len(traj)\n # create the buffer\n _buffer = TrajectoryReplayBuffer(capacity=capacity, device=device)\n # add the trajectories to the buffer\n _buffer.trajectories = trajectories\n _buffer.trajectory_lengths = trajectory_lengths\n\n return _buffer"
},
{
"identifier": "PreProcessInference",
"path": "reed/data/preprocess_images.py",
"snippet": "class PreProcessInference:\n \"\"\"\n Preprocess the data for inference by the reward, SSC, and SFC models\n \"\"\"\n def __init__(self,\n image_observations: bool = False,\n grayscale_images: bool = True,\n normalize_images: bool = True,\n environment_id: str = \"dmc\"):\n \"\"\"\n Args:\n image_observations: whether the observations are images\n grayscale_images: whether images observations should be in grayscale\n normalize_images: whether the image observations should be normalized\n environment_id: the environment from which the data is coming\n \"\"\"\n self.image_observations = image_observations\n self.grayscale_images = grayscale_images\n self.normalize_images = normalize_images\n self.environment_id = environment_id\n\n @staticmethod\n def _channel_first_to_last(observation: np.ndarray,\n batch_states: bool = False,\n by_trajectory: bool = False) -> np.ndarray:\n \"\"\"\n Move the channel from the first dimension to the last dimension\n \"\"\"\n if batch_states and by_trajectory:\n return np.transpose(observation, (0, 1, 3, 4, 2))\n elif batch_states:\n return np.transpose(observation, (0, 2, 3, 1))\n else:\n return np.transpose(observation, (1, 2, 0))\n\n @staticmethod\n def _channel_last_to_first(observation: np.ndarray, batch_states: bool = False,\n by_trajectory: bool = False) -> np.ndarray:\n \"\"\"\n Move the channel from the last dimension to the first dimension\n Args:\n observation: the state observations\n batch_states: whether a batch of state is to be processed\n by_trajectory: whether the batch of states is structured by trajectory -> should only be\n True when batch_sa=True\n Returns:\n the image with the channel dimension moved from first to last\n \"\"\"\n # permute the input so that the channels are in the first dimension of the images\n if batch_states and by_trajectory:\n return np.transpose(observation, (0, 1, 4, 2, 3))\n elif batch_states:\n return np.transpose(observation, (0, 3, 1, 2))\n else:\n # permute the input so that the channels are in the first dimension\n obs = np.transpose(observation, (2, 0, 1))\n # add a dimension along the front for concatenation into the buffer\n return np.expand_dims(obs, axis=0)\n\n def format_state(self, obs: np.ndarray, batch_states: bool = False,\n by_trajectory: bool = False, channel_first: bool = False) -> np.ndarray:\n \"\"\"\n Args:\n obs: the state observations\n batch_states: whether a batch of state is to be processed\n by_trajectory: whether the batch of states is structured by trajectory -> should only be\n True when batch_sa=True\n channel_first: whether the channel dimension is first when the observations are images.\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n if channel_first:\n # move the channel dimension from first to last to avoid a bunch of logic in our formatting methods\n # that handles variable locations for the channel dimension\n obs = self._channel_first_to_last(observation=obs,\n batch_states=batch_states,\n by_trajectory=by_trajectory)\n if self.grayscale_images:\n obs = _to_grayscale(observation=obs)\n if self.normalize_images:\n # TODO: add normalization based on pixel mean and standard deviation instead of scaling 0 to 1\n obs = np.divide(obs, 255.)\n # move the channel dimension from first to last\n return self._channel_last_to_first(observation=obs, batch_states=batch_states, by_trajectory=by_trajectory)\n\n else:\n return obs.reshape(1, obs.shape[1:]) if batch_states else obs.reshape(1, obs.shape[0])\n\n def format_state_action(self, obs: np.ndarray, act: np.ndarray,\n batch_sa: bool = False, by_trajectory: bool = False,\n channel_first: bool = False) -> np.ndarray:\n \"\"\"\n Args:\n obs: the state observations\n act: the actions associated with each state observation\n batch_sa: whether a batch of state-action pairs is to be processed\n by_trajectory: whether the batch of state-action pairs is structured by trajectory -> should only be\n True when batch_sa=True\n channel_first: whether the channel dimension is first when the observations are images.\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n if channel_first:\n # move the channel dimension from first to last to avoid a bunch of logic in our formatting methods\n # that handles variable locations for the channel dimension\n obs = self._channel_first_to_last(observation=obs,\n batch_states=batch_sa,\n by_trajectory=by_trajectory)\n if self.grayscale_images:\n obs = _to_grayscale(observation=obs)\n if self.normalize_images:\n # TODO: add normalization based on pixel mean and standard deviation instead of scaling 0 to 1\n obs = np.divide(obs, 255.)\n\n # get the dimensions of the image\n obs_dim = obs.shape[-3:]\n assert len(obs_dim) == 3\n # add the actions to the image channels and permute the input so that the channels are in the first\n # dimension of the images\n if batch_sa and by_trajectory:\n repeated_actions = np.tile(act.reshape((act.shape[0], act.shape[1], 1, 1, act.shape[-1])),\n (1, 1, obs_dim[0], obs_dim[1], 1))\n elif batch_sa:\n repeated_actions = np.tile(act.reshape((act.shape[0], 1, 1, act.shape[-1])),\n (1, obs_dim[0], obs_dim[1], 1))\n else:\n repeated_actions = np.tile(act.reshape((1, 1, -1)), (obs_dim[0], obs_dim[1], 1))\n sa_t = np.concatenate((obs, repeated_actions), axis=-1)\n return self._channel_last_to_first(sa_t, batch_states=batch_sa, by_trajectory=by_trajectory)\n else:\n sa_t = np.concatenate([obs, act], axis=-1)\n if batch_sa:\n return sa_t\n else:\n return sa_t.reshape(1, -1)"
}
] | import typing as t
import time
import shutil
import yaml
import numpy as np
import torch
import torch.functional as F
from pathlib import Path
from zipfile import ZipFile
from BPref.replay_buffer import TrajectoryReplayBuffer
from reed.data.preprocess_images import PreProcessInference | 9,365 | batch_size = 100
with torch.no_grad():
total_dists = []
for full_idx in range(len(obs) // batch_size + 1):
full_start = full_idx * batch_size
if full_start < len(obs):
full_end = (full_idx + 1) * batch_size
dists = []
for idx in range(len(full_obs) // batch_size + 1):
start = idx * batch_size
if start < len(full_obs):
end = (idx + 1) * batch_size
dist = torch.norm(
obs[full_start:full_end, None, :].to(device) - full_obs[None, start:end, :].to(device),
dim=-1, p=2
)
dists.append(dist)
dists = torch.cat(dists, dim=1)
small_dists = torch.torch.min(dists, dim=1).values
total_dists.append(small_dists)
total_dists = torch.cat(total_dists)
return total_dists.unsqueeze(1)
class _PreferenceLabeller:
def __init__(self, label_margin: float = 0.0, teacher_beta: float = -1, teacher_gamma: float = 1,
teacher_eps_mistake: float = 0, teacher_eps_skip: float = 0, teacher_eps_equal: float = 0):
"""
Assigns preference labels to the trajectory pairs following the strategy specified by the parameters
Args:
label_margin:
teacher_beta
teacher_gamma: used to determine how much influence each reward has on the preference label based on
order within the trajectory. Used to compute the return
teacher_eps_mistake: the frequency with which the teacher assigns an incorrect label
teacher_eps_skip: the frequency with which the teacher does not assign a label
teacher_eps_equal: the maximum difference between trajectory returns for the two trajectories to be labelled
as equally preferred
"""
self.teacher_beta = teacher_beta
self.teacher_gamma = teacher_gamma
self.teacher_eps_mistake = teacher_eps_mistake
self.teacher_eps_skip = teacher_eps_skip
self.teacher_eps_equal = teacher_eps_equal
self.teacher_thres_skip = 0
self.teacher_thres_equal = 0
self.label_margin = label_margin
self.label_target = 1 - 2 * self.label_margin
def get_label(self, sa_t_1, sa_t_2, r_t_1, r_t_2):
"""
For each trajectory pair, assign a preference label
Assigning a preference label can involve not labelling a trajectory pair, in which case the trajectory pair
is removed from trajectories one and trajectories two
Args:
sa_t_1: the state-action pairs from trajectories one
sa_t_2: the state-action pairs from trajectories two
r_t_1: the reward per transition in the trajectories one
r_t_2: the reward per transition in the trajectories two
"""
sum_r_t_1 = np.sum(r_t_1, axis=1)
sum_r_t_2 = np.sum(r_t_2, axis=1)
# skip the query
if self.teacher_thres_skip > 0:
max_r_t = np.maximum(sum_r_t_1, sum_r_t_2)
max_index = (max_r_t > self.teacher_thres_skip).reshape(-1)
if sum(max_index) == 0:
return None, None, None, None, []
sa_t_1 = sa_t_1[max_index]
sa_t_2 = sa_t_2[max_index]
r_t_1 = r_t_1[max_index]
r_t_2 = r_t_2[max_index]
sum_r_t_1 = np.sum(r_t_1, axis=1)
sum_r_t_2 = np.sum(r_t_2, axis=1)
# equally preferable
margin_index = (np.abs(sum_r_t_1 - sum_r_t_2) < self.teacher_thres_equal).reshape(-1)
# perfectly rational
seg_size = r_t_1.shape[1]
temp_r_t_1 = r_t_1.copy()
temp_r_t_2 = r_t_2.copy()
for index in range(seg_size - 1):
temp_r_t_1[:, :index + 1] *= self.teacher_gamma
temp_r_t_2[:, :index + 1] *= self.teacher_gamma
sum_r_t_1 = np.sum(temp_r_t_1, axis=1)
sum_r_t_2 = np.sum(temp_r_t_2, axis=1)
rational_labels = 1 * (sum_r_t_1 < sum_r_t_2)
if self.teacher_beta > 0: # Bradley-Terry rational model
r_hat = torch.cat([torch.Tensor(sum_r_t_1),
torch.Tensor(sum_r_t_2)], dim=-1)
r_hat = r_hat * self.teacher_beta
ent = F.softmax(r_hat, dim=-1)[:, 1]
labels = torch.bernoulli(ent).int().numpy().reshape(-1, 1).to_numpy()
else:
labels = rational_labels
# making a mistake
len_labels = labels.shape[0]
rand_num = np.random.rand(len_labels)
noise_index = rand_num <= self.teacher_eps_mistake
labels[noise_index] = 1 - labels[noise_index]
# equally preferable
labels[margin_index] = -1
return sa_t_1, sa_t_2, r_t_1, r_t_2, labels
class PreferenceDataset:
def __init__(self, observation_dim: t.Union[t.Tuple, int], action_dim: t.Union[t.Tuple, int], capacity: int,
size_segment: int, out_path: Path, image_observations: bool, grayscale_images: bool,
| #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2023 Apple Inc. All Rights Reserved.
#
PREFERENCE_TRIPLET = t.Tuple[np.ndarray, np.ndarray, np.ndarray]
PREFERENCE_TRIPLET_BATCH = t.Tuple[np.ndarray, np.ndarray, np.ndarray]
def KCenterGreedy(obs, full_obs, num_new_sample):
selected_index = []
current_index = list(range(obs.shape[0]))
new_obs = obs
new_full_obs = full_obs
start_time = time.time()
for count in range(num_new_sample):
dist = compute_smallest_dist(new_obs, new_full_obs)
max_index = torch.argmax(dist)
max_index = max_index.item()
if count == 0:
selected_index.append(max_index)
else:
selected_index.append(current_index[max_index])
current_index = current_index[0:max_index] + current_index[max_index + 1:]
new_obs = obs[current_index]
new_full_obs = np.concatenate([
full_obs,
obs[selected_index]],
axis=0)
return selected_index
def compute_smallest_dist(obs, full_obs, device: torch.device):
obs = torch.from_numpy(obs).float()
full_obs = torch.from_numpy(full_obs).float()
batch_size = 100
with torch.no_grad():
total_dists = []
for full_idx in range(len(obs) // batch_size + 1):
full_start = full_idx * batch_size
if full_start < len(obs):
full_end = (full_idx + 1) * batch_size
dists = []
for idx in range(len(full_obs) // batch_size + 1):
start = idx * batch_size
if start < len(full_obs):
end = (idx + 1) * batch_size
dist = torch.norm(
obs[full_start:full_end, None, :].to(device) - full_obs[None, start:end, :].to(device),
dim=-1, p=2
)
dists.append(dist)
dists = torch.cat(dists, dim=1)
small_dists = torch.torch.min(dists, dim=1).values
total_dists.append(small_dists)
total_dists = torch.cat(total_dists)
return total_dists.unsqueeze(1)
class _PreferenceLabeller:
def __init__(self, label_margin: float = 0.0, teacher_beta: float = -1, teacher_gamma: float = 1,
teacher_eps_mistake: float = 0, teacher_eps_skip: float = 0, teacher_eps_equal: float = 0):
"""
Assigns preference labels to the trajectory pairs following the strategy specified by the parameters
Args:
label_margin:
teacher_beta
teacher_gamma: used to determine how much influence each reward has on the preference label based on
order within the trajectory. Used to compute the return
teacher_eps_mistake: the frequency with which the teacher assigns an incorrect label
teacher_eps_skip: the frequency with which the teacher does not assign a label
teacher_eps_equal: the maximum difference between trajectory returns for the two trajectories to be labelled
as equally preferred
"""
self.teacher_beta = teacher_beta
self.teacher_gamma = teacher_gamma
self.teacher_eps_mistake = teacher_eps_mistake
self.teacher_eps_skip = teacher_eps_skip
self.teacher_eps_equal = teacher_eps_equal
self.teacher_thres_skip = 0
self.teacher_thres_equal = 0
self.label_margin = label_margin
self.label_target = 1 - 2 * self.label_margin
def get_label(self, sa_t_1, sa_t_2, r_t_1, r_t_2):
"""
For each trajectory pair, assign a preference label
Assigning a preference label can involve not labelling a trajectory pair, in which case the trajectory pair
is removed from trajectories one and trajectories two
Args:
sa_t_1: the state-action pairs from trajectories one
sa_t_2: the state-action pairs from trajectories two
r_t_1: the reward per transition in the trajectories one
r_t_2: the reward per transition in the trajectories two
"""
sum_r_t_1 = np.sum(r_t_1, axis=1)
sum_r_t_2 = np.sum(r_t_2, axis=1)
# skip the query
if self.teacher_thres_skip > 0:
max_r_t = np.maximum(sum_r_t_1, sum_r_t_2)
max_index = (max_r_t > self.teacher_thres_skip).reshape(-1)
if sum(max_index) == 0:
return None, None, None, None, []
sa_t_1 = sa_t_1[max_index]
sa_t_2 = sa_t_2[max_index]
r_t_1 = r_t_1[max_index]
r_t_2 = r_t_2[max_index]
sum_r_t_1 = np.sum(r_t_1, axis=1)
sum_r_t_2 = np.sum(r_t_2, axis=1)
# equally preferable
margin_index = (np.abs(sum_r_t_1 - sum_r_t_2) < self.teacher_thres_equal).reshape(-1)
# perfectly rational
seg_size = r_t_1.shape[1]
temp_r_t_1 = r_t_1.copy()
temp_r_t_2 = r_t_2.copy()
for index in range(seg_size - 1):
temp_r_t_1[:, :index + 1] *= self.teacher_gamma
temp_r_t_2[:, :index + 1] *= self.teacher_gamma
sum_r_t_1 = np.sum(temp_r_t_1, axis=1)
sum_r_t_2 = np.sum(temp_r_t_2, axis=1)
rational_labels = 1 * (sum_r_t_1 < sum_r_t_2)
if self.teacher_beta > 0: # Bradley-Terry rational model
r_hat = torch.cat([torch.Tensor(sum_r_t_1),
torch.Tensor(sum_r_t_2)], dim=-1)
r_hat = r_hat * self.teacher_beta
ent = F.softmax(r_hat, dim=-1)[:, 1]
labels = torch.bernoulli(ent).int().numpy().reshape(-1, 1).to_numpy()
else:
labels = rational_labels
# making a mistake
len_labels = labels.shape[0]
rand_num = np.random.rand(len_labels)
noise_index = rand_num <= self.teacher_eps_mistake
labels[noise_index] = 1 - labels[noise_index]
# equally preferable
labels[margin_index] = -1
return sa_t_1, sa_t_2, r_t_1, r_t_2, labels
class PreferenceDataset:
def __init__(self, observation_dim: t.Union[t.Tuple, int], action_dim: t.Union[t.Tuple, int], capacity: int,
size_segment: int, out_path: Path, image_observations: bool, grayscale_images: bool, | collect_image_pref_dataset: bool, state_action_formatter: PreProcessInference, | 1 | 2023-11-06 23:14:20+00:00 | 12k |
ApolloAuto/apollo-model-yolox | yolox/models/yolox.py | [
{
"identifier": "YOLOXHead",
"path": "yolox/models/yolo_head.py",
"snippet": "class YOLOXHead(nn.Module):\n def __init__(\n self,\n num_classes,\n width=1.0,\n strides=[8, 16, 32],\n in_channels=[256, 512, 1024],\n act=\"silu\",\n depthwise=False,\n ):\n \"\"\"\n Args:\n act (str): activation type of conv. Defalut value: \"silu\".\n depthwise (bool): whether apply depthwise conv in conv branch. Defalut value: False.\n \"\"\"\n super().__init__()\n\n self.num_classes = num_classes\n self.decode_in_inference = True # for deploy, set to False\n\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n self.cls_preds = nn.ModuleList()\n self.reg_preds = nn.ModuleList()\n self.obj_preds = nn.ModuleList()\n self.stems = nn.ModuleList()\n Conv = DWConv if depthwise else BaseConv\n\n for i in range(len(in_channels)):\n self.stems.append(\n BaseConv(\n in_channels=int(in_channels[i] * width),\n out_channels=int(256 * width),\n ksize=1,\n stride=1,\n act=act,\n )\n )\n self.cls_convs.append(\n nn.Sequential(\n *[\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n ]\n )\n )\n self.reg_convs.append(\n nn.Sequential(\n *[\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n ]\n )\n )\n self.cls_preds.append(\n nn.Conv2d(\n in_channels=int(256 * width),\n out_channels=self.num_classes,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n )\n self.reg_preds.append(\n nn.Conv2d(\n in_channels=int(256 * width),\n out_channels=4,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n )\n self.obj_preds.append(\n nn.Conv2d(\n in_channels=int(256 * width),\n out_channels=1,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n )\n\n self.use_l1 = False\n self.l1_loss = nn.L1Loss(reduction=\"none\")\n self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction=\"none\")\n self.iou_loss = IOUloss(reduction=\"none\")\n self.strides = strides\n self.grids = [torch.zeros(1)] * len(in_channels)\n\n def initialize_biases(self, prior_prob):\n for conv in self.cls_preds:\n b = conv.bias.view(1, -1)\n b.data.fill_(-math.log((1 - prior_prob) / prior_prob))\n conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n for conv in self.obj_preds:\n b = conv.bias.view(1, -1)\n b.data.fill_(-math.log((1 - prior_prob) / prior_prob))\n conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def forward(self, xin, labels=None, imgs=None):\n outputs = []\n origin_preds = []\n x_shifts = []\n y_shifts = []\n expanded_strides = []\n\n for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(\n zip(self.cls_convs, self.reg_convs, self.strides, xin)\n ):\n x = self.stems[k](x)\n cls_x = x\n reg_x = x\n\n cls_feat = cls_conv(cls_x)\n cls_output = self.cls_preds[k](cls_feat)\n\n reg_feat = reg_conv(reg_x)\n reg_output = self.reg_preds[k](reg_feat)\n obj_output = self.obj_preds[k](reg_feat)\n\n if self.training:\n output = torch.cat([reg_output, obj_output, cls_output], 1)\n output, grid = self.get_output_and_grid(\n output, k, stride_this_level, xin[0].type()\n )\n x_shifts.append(grid[:, :, 0])\n y_shifts.append(grid[:, :, 1])\n expanded_strides.append(\n torch.zeros(1, grid.shape[1])\n .fill_(stride_this_level)\n .type_as(xin[0])\n )\n if self.use_l1:\n batch_size = reg_output.shape[0]\n hsize, wsize = reg_output.shape[-2:]\n reg_output = reg_output.view(\n batch_size, 1, 4, hsize, wsize\n )\n reg_output = reg_output.permute(0, 1, 3, 4, 2).reshape(\n batch_size, -1, 4\n )\n origin_preds.append(reg_output.clone())\n\n else:\n output = torch.cat(\n [reg_output, obj_output.sigmoid(), cls_output.sigmoid()], 1\n )\n\n outputs.append(output)\n\n if self.training:\n return self.get_losses(\n imgs,\n x_shifts,\n y_shifts,\n expanded_strides,\n labels,\n torch.cat(outputs, 1),\n origin_preds,\n dtype=xin[0].dtype,\n )\n else:\n self.hw = [x.shape[-2:] for x in outputs]\n # [batch, n_anchors_all, 85]\n outputs = torch.cat(\n [x.flatten(start_dim=2) for x in outputs], dim=2\n ).permute(0, 2, 1)\n if self.decode_in_inference:\n return self.decode_outputs(outputs, dtype=xin[0].type())\n else:\n return outputs\n\n def get_output_and_grid(self, output, k, stride, dtype):\n grid = self.grids[k]\n\n batch_size = output.shape[0]\n n_ch = 5 + self.num_classes\n hsize, wsize = output.shape[-2:]\n if grid.shape[2:4] != output.shape[2:4]:\n yv, xv = meshgrid([torch.arange(hsize), torch.arange(wsize)])\n grid = torch.stack((xv, yv), 2).view(1, 1, hsize, wsize, 2).type(dtype)\n self.grids[k] = grid\n\n output = output.view(batch_size, 1, n_ch, hsize, wsize)\n output = output.permute(0, 1, 3, 4, 2).reshape(\n batch_size, hsize * wsize, -1\n )\n grid = grid.view(1, -1, 2)\n output[..., :2] = (output[..., :2] + grid) * stride\n output[..., 2:4] = torch.exp(output[..., 2:4]) * stride\n return output, grid\n\n def decode_outputs(self, outputs, dtype):\n grids = []\n strides = []\n for (hsize, wsize), stride in zip(self.hw, self.strides):\n yv, xv = meshgrid([torch.arange(hsize), torch.arange(wsize)])\n grid = torch.stack((xv, yv), 2).view(1, -1, 2)\n grids.append(grid)\n shape = grid.shape[:2]\n strides.append(torch.full((*shape, 1), stride))\n\n grids = torch.cat(grids, dim=1).type(dtype)\n strides = torch.cat(strides, dim=1).type(dtype)\n\n outputs = torch.cat([\n (outputs[..., 0:2] + grids) * strides,\n torch.exp(outputs[..., 2:4]) * strides,\n outputs[..., 4:]\n ], dim=-1)\n return outputs\n\n def get_losses(\n self,\n imgs,\n x_shifts,\n y_shifts,\n expanded_strides,\n labels,\n outputs,\n origin_preds,\n dtype,\n ):\n bbox_preds = outputs[:, :, :4] # [batch, n_anchors_all, 4]\n obj_preds = outputs[:, :, 4:5] # [batch, n_anchors_all, 1]\n cls_preds = outputs[:, :, 5:] # [batch, n_anchors_all, n_cls]\n\n # calculate targets\n nlabel = (labels.sum(dim=2) > 0).sum(dim=1) # number of objects\n\n total_num_anchors = outputs.shape[1]\n x_shifts = torch.cat(x_shifts, 1) # [1, n_anchors_all]\n y_shifts = torch.cat(y_shifts, 1) # [1, n_anchors_all]\n expanded_strides = torch.cat(expanded_strides, 1)\n if self.use_l1:\n origin_preds = torch.cat(origin_preds, 1)\n\n cls_targets = []\n reg_targets = []\n l1_targets = []\n obj_targets = []\n fg_masks = []\n\n num_fg = 0.0\n num_gts = 0.0\n\n for batch_idx in range(outputs.shape[0]):\n num_gt = int(nlabel[batch_idx])\n num_gts += num_gt\n if num_gt == 0:\n cls_target = outputs.new_zeros((0, self.num_classes))\n reg_target = outputs.new_zeros((0, 4))\n l1_target = outputs.new_zeros((0, 4))\n obj_target = outputs.new_zeros((total_num_anchors, 1))\n fg_mask = outputs.new_zeros(total_num_anchors).bool()\n else:\n gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5]\n gt_classes = labels[batch_idx, :num_gt, 0]\n bboxes_preds_per_image = bbox_preds[batch_idx]\n\n try:\n (\n gt_matched_classes,\n fg_mask,\n pred_ious_this_matching,\n matched_gt_inds,\n num_fg_img,\n ) = self.get_assignments( # noqa\n batch_idx,\n num_gt,\n gt_bboxes_per_image,\n gt_classes,\n bboxes_preds_per_image,\n expanded_strides,\n x_shifts,\n y_shifts,\n cls_preds,\n obj_preds,\n )\n except RuntimeError as e:\n # TODO: the string might change, consider a better way\n if \"CUDA out of memory. \" not in str(e):\n raise # RuntimeError might not caused by CUDA OOM\n\n logger.error(\n \"OOM RuntimeError is raised due to the huge memory cost during label assignment. \\\n CPU mode is applied in this batch. If you want to avoid this issue, \\\n try to reduce the batch size or image size.\"\n )\n torch.cuda.empty_cache()\n (\n gt_matched_classes,\n fg_mask,\n pred_ious_this_matching,\n matched_gt_inds,\n num_fg_img,\n ) = self.get_assignments( # noqa\n batch_idx,\n num_gt,\n gt_bboxes_per_image,\n gt_classes,\n bboxes_preds_per_image,\n expanded_strides,\n x_shifts,\n y_shifts,\n cls_preds,\n obj_preds,\n \"cpu\",\n )\n\n torch.cuda.empty_cache()\n num_fg += num_fg_img\n\n cls_target = F.one_hot(\n gt_matched_classes.to(torch.int64), self.num_classes\n ) * pred_ious_this_matching.unsqueeze(-1)\n obj_target = fg_mask.unsqueeze(-1)\n reg_target = gt_bboxes_per_image[matched_gt_inds]\n if self.use_l1:\n l1_target = self.get_l1_target(\n outputs.new_zeros((num_fg_img, 4)),\n gt_bboxes_per_image[matched_gt_inds],\n expanded_strides[0][fg_mask],\n x_shifts=x_shifts[0][fg_mask],\n y_shifts=y_shifts[0][fg_mask],\n )\n\n cls_targets.append(cls_target)\n reg_targets.append(reg_target)\n obj_targets.append(obj_target.to(dtype))\n fg_masks.append(fg_mask)\n if self.use_l1:\n l1_targets.append(l1_target)\n\n cls_targets = torch.cat(cls_targets, 0)\n reg_targets = torch.cat(reg_targets, 0)\n obj_targets = torch.cat(obj_targets, 0)\n fg_masks = torch.cat(fg_masks, 0)\n if self.use_l1:\n l1_targets = torch.cat(l1_targets, 0)\n\n num_fg = max(num_fg, 1)\n loss_iou = (\n self.iou_loss(bbox_preds.view(-1, 4)[fg_masks], reg_targets)\n ).sum() / num_fg\n loss_obj = (\n self.bcewithlog_loss(obj_preds.view(-1, 1), obj_targets)\n ).sum() / num_fg\n loss_cls = (\n self.bcewithlog_loss(\n cls_preds.view(-1, self.num_classes)[fg_masks], cls_targets\n )\n ).sum() / num_fg\n if self.use_l1:\n loss_l1 = (\n self.l1_loss(origin_preds.view(-1, 4)[fg_masks], l1_targets)\n ).sum() / num_fg\n else:\n loss_l1 = 0.0\n\n reg_weight = 5.0\n loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1\n\n return (\n loss,\n reg_weight * loss_iou,\n loss_obj,\n loss_cls,\n loss_l1,\n num_fg / max(num_gts, 1),\n )\n\n def get_l1_target(self, l1_target, gt, stride, x_shifts, y_shifts, eps=1e-8):\n l1_target[:, 0] = gt[:, 0] / stride - x_shifts\n l1_target[:, 1] = gt[:, 1] / stride - y_shifts\n l1_target[:, 2] = torch.log(gt[:, 2] / stride + eps)\n l1_target[:, 3] = torch.log(gt[:, 3] / stride + eps)\n return l1_target\n\n @torch.no_grad()\n def get_assignments(\n self,\n batch_idx,\n num_gt,\n gt_bboxes_per_image,\n gt_classes,\n bboxes_preds_per_image,\n expanded_strides,\n x_shifts,\n y_shifts,\n cls_preds,\n obj_preds,\n mode=\"gpu\",\n ):\n\n if mode == \"cpu\":\n print(\"-----------Using CPU for the Current Batch-------------\")\n gt_bboxes_per_image = gt_bboxes_per_image.cpu().float()\n bboxes_preds_per_image = bboxes_preds_per_image.cpu().float()\n gt_classes = gt_classes.cpu().float()\n expanded_strides = expanded_strides.cpu().float()\n x_shifts = x_shifts.cpu()\n y_shifts = y_shifts.cpu()\n\n fg_mask, geometry_relation = self.get_geometry_constraint(\n gt_bboxes_per_image,\n expanded_strides,\n x_shifts,\n y_shifts,\n )\n \n # NOTE: Fix `selected index k out of range`\n npa: int = fg_mask.sum().item() # number of positive anchors\n if npa == 0:\n gt_matched_classes = torch.zeros(0, device=fg_mask.device).long()\n pred_ious_this_matching = torch.rand(0, device=fg_mask.device)\n matched_gt_inds = gt_matched_classes\n num_fg = npa\n\n if mode == \"cpu\":\n gt_matched_classes = gt_matched_classes.cuda()\n fg_mask = fg_mask.cuda()\n pred_ious_this_matching = pred_ious_this_matching.cuda()\n matched_gt_inds = matched_gt_inds.cuda()\n num_fg = num_fg.cuda()\n\n return (\n gt_matched_classes,\n fg_mask,\n pred_ious_this_matching,\n matched_gt_inds,\n num_fg,\n )\n \n bboxes_preds_per_image = bboxes_preds_per_image[fg_mask]\n cls_preds_ = cls_preds[batch_idx][fg_mask]\n obj_preds_ = obj_preds[batch_idx][fg_mask]\n num_in_boxes_anchor = bboxes_preds_per_image.shape[0]\n\n if mode == \"cpu\":\n gt_bboxes_per_image = gt_bboxes_per_image.cpu()\n bboxes_preds_per_image = bboxes_preds_per_image.cpu()\n\n pair_wise_ious = bboxes_iou(gt_bboxes_per_image, bboxes_preds_per_image, False)\n\n gt_cls_per_image = (\n F.one_hot(gt_classes.to(torch.int64), self.num_classes)\n .float()\n )\n pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)\n\n if mode == \"cpu\":\n cls_preds_, obj_preds_ = cls_preds_.cpu(), obj_preds_.cpu()\n\n with torch.cuda.amp.autocast(enabled=False):\n cls_preds_ = (\n cls_preds_.float().sigmoid_() * obj_preds_.float().sigmoid_()\n ).sqrt()\n pair_wise_cls_loss = F.binary_cross_entropy(\n cls_preds_.unsqueeze(0).repeat(num_gt, 1, 1),\n gt_cls_per_image.unsqueeze(1).repeat(1, num_in_boxes_anchor, 1),\n reduction=\"none\"\n ).sum(-1)\n del cls_preds_\n\n cost = (\n pair_wise_cls_loss\n + 3.0 * pair_wise_ious_loss\n + float(1e6) * (~geometry_relation)\n )\n\n (\n num_fg,\n gt_matched_classes,\n pred_ious_this_matching,\n matched_gt_inds,\n ) = self.simota_matching(cost, pair_wise_ious, gt_classes, num_gt, fg_mask)\n del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss\n\n if mode == \"cpu\":\n gt_matched_classes = gt_matched_classes.cuda()\n fg_mask = fg_mask.cuda()\n pred_ious_this_matching = pred_ious_this_matching.cuda()\n matched_gt_inds = matched_gt_inds.cuda()\n\n return (\n gt_matched_classes,\n fg_mask,\n pred_ious_this_matching,\n matched_gt_inds,\n num_fg,\n )\n\n def get_geometry_constraint(\n self, gt_bboxes_per_image, expanded_strides, x_shifts, y_shifts,\n ):\n \"\"\"\n Calculate whether the center of an object is located in a fixed range of\n an anchor. This is used to avert inappropriate matching. It can also reduce\n the number of candidate anchors so that the GPU memory is saved.\n \"\"\"\n expanded_strides_per_image = expanded_strides[0]\n x_centers_per_image = ((x_shifts[0] + 0.5) * expanded_strides_per_image).unsqueeze(0)\n y_centers_per_image = ((y_shifts[0] + 0.5) * expanded_strides_per_image).unsqueeze(0)\n\n # in fixed center\n center_radius = 1.5\n center_dist = expanded_strides_per_image.unsqueeze(0) * center_radius\n gt_bboxes_per_image_l = (gt_bboxes_per_image[:, 0:1]) - center_dist\n gt_bboxes_per_image_r = (gt_bboxes_per_image[:, 0:1]) + center_dist\n gt_bboxes_per_image_t = (gt_bboxes_per_image[:, 1:2]) - center_dist\n gt_bboxes_per_image_b = (gt_bboxes_per_image[:, 1:2]) + center_dist\n\n c_l = x_centers_per_image - gt_bboxes_per_image_l\n c_r = gt_bboxes_per_image_r - x_centers_per_image\n c_t = y_centers_per_image - gt_bboxes_per_image_t\n c_b = gt_bboxes_per_image_b - y_centers_per_image\n center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)\n is_in_centers = center_deltas.min(dim=-1).values > 0.0\n anchor_filter = is_in_centers.sum(dim=0) > 0\n geometry_relation = is_in_centers[:, anchor_filter]\n\n return anchor_filter, geometry_relation\n\n def simota_matching(self, cost, pair_wise_ious, gt_classes, num_gt, fg_mask):\n matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)\n\n n_candidate_k = min(10, pair_wise_ious.size(1))\n # close augmention like mosaic will core when dt equals 0\n # https://github.com/Megvii-BaseDetection/YOLOX/issues/778\n topk_ious, _ = torch.topk(pair_wise_ious, n_candidate_k, dim=1)\n dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)\n for gt_idx in range(num_gt):\n _, pos_idx = torch.topk(\n cost[gt_idx], k=dynamic_ks[gt_idx], largest=False\n )\n matching_matrix[gt_idx][pos_idx] = 1\n\n del topk_ious, dynamic_ks, pos_idx\n\n anchor_matching_gt = matching_matrix.sum(0)\n # deal with the case that one anchor matches multiple ground-truths\n if anchor_matching_gt.max() > 1:\n multiple_match_mask = anchor_matching_gt > 1\n _, cost_argmin = torch.min(cost[:, multiple_match_mask], dim=0)\n matching_matrix[:, multiple_match_mask] *= 0\n matching_matrix[cost_argmin, multiple_match_mask] = 1\n fg_mask_inboxes = anchor_matching_gt > 0\n num_fg = fg_mask_inboxes.sum().item()\n\n fg_mask[fg_mask.clone()] = fg_mask_inboxes\n\n matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)\n gt_matched_classes = gt_classes[matched_gt_inds]\n\n pred_ious_this_matching = (matching_matrix * pair_wise_ious).sum(0)[\n fg_mask_inboxes\n ]\n return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds\n\n def visualize_assign_result(self, xin, labels=None, imgs=None, save_prefix=\"assign_vis_\"):\n # original forward logic\n outputs, x_shifts, y_shifts, expanded_strides = [], [], [], []\n # TODO: use forward logic here.\n\n for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(\n zip(self.cls_convs, self.reg_convs, self.strides, xin[0])\n ):\n x = self.stems[k](x)\n cls_x = x\n reg_x = x\n\n cls_feat = cls_conv(cls_x)\n cls_output = self.cls_preds[k](cls_feat)\n reg_feat = reg_conv(reg_x)\n reg_output = self.reg_preds[k](reg_feat)\n obj_output = self.obj_preds[k](reg_feat)\n\n output = torch.cat([reg_output, obj_output, cls_output], 1)\n output, grid = self.get_output_and_grid(output, k, stride_this_level, xin[0][0].type())\n x_shifts.append(grid[:, :, 0])\n y_shifts.append(grid[:, :, 1])\n expanded_strides.append(\n torch.full((1, grid.shape[1]), stride_this_level).type_as(xin[0][0])\n )\n outputs.append(output)\n\n outputs = torch.cat(outputs, 1)\n bbox_preds = outputs[:, :, :4] # [batch, n_anchors_all, 4]\n obj_preds = outputs[:, :, 4:5] # [batch, n_anchors_all, 1]\n cls_preds = outputs[:, :, 5:] # [batch, n_anchors_all, n_cls]\n\n # calculate targets\n total_num_anchors = outputs.shape[1]\n x_shifts = torch.cat(x_shifts, 1) # [1, n_anchors_all]\n y_shifts = torch.cat(y_shifts, 1) # [1, n_anchors_all]\n expanded_strides = torch.cat(expanded_strides, 1)\n\n nlabel = (labels.sum(dim=2) > 0).sum(dim=1) # number of objects\n for batch_idx, (img, num_gt, label) in enumerate(zip(imgs, nlabel, labels)):\n img = imgs[batch_idx].permute(1, 2, 0).to(torch.uint8)\n num_gt = int(num_gt)\n if num_gt == 0:\n fg_mask = outputs.new_zeros(total_num_anchors).bool()\n else:\n gt_bboxes_per_image = label[:num_gt, 1:5]\n gt_classes = label[:num_gt, 0]\n bboxes_preds_per_image = bbox_preds[batch_idx]\n _, fg_mask, _, matched_gt_inds, _ = self.get_assignments( # noqa\n batch_idx, num_gt, gt_bboxes_per_image, gt_classes,\n bboxes_preds_per_image, expanded_strides, x_shifts,\n y_shifts, cls_preds, obj_preds,\n )\n\n img = img.cpu().numpy().copy() # copy is crucial here\n coords = torch.stack([\n ((x_shifts + 0.5) * expanded_strides).flatten()[fg_mask],\n ((y_shifts + 0.5) * expanded_strides).flatten()[fg_mask],\n ], 1)\n\n xyxy_boxes = cxcywh2xyxy(gt_bboxes_per_image)\n save_name = save_prefix + str(batch_idx) + \".png\"\n img = visualize_assign(img, xyxy_boxes, coords, matched_gt_inds, save_name)\n logger.info(f\"save img to {save_name}\")"
},
{
"identifier": "YOLOPAFPN",
"path": "yolox/models/yolo_pafpn.py",
"snippet": "class YOLOPAFPN(nn.Module):\n \"\"\"\n YOLOv3 model. Darknet 53 is the default backbone of this model.\n \"\"\"\n\n def __init__(\n self,\n depth=1.0,\n width=1.0,\n in_features=(\"dark3\", \"dark4\", \"dark5\"),\n in_channels=[256, 512, 1024],\n depthwise=False,\n act=\"silu\",\n ):\n super().__init__()\n self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)\n self.in_features = in_features\n self.in_channels = in_channels\n Conv = DWConv if depthwise else BaseConv\n\n self.upsample = nn.Upsample(scale_factor=2, mode=\"nearest\")\n self.lateral_conv0 = BaseConv(\n int(in_channels[2] * width), int(in_channels[1] * width), 1, 1, act=act\n )\n self.C3_p4 = CSPLayer(\n int(2 * in_channels[1] * width),\n int(in_channels[1] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n ) # cat\n\n self.reduce_conv1 = BaseConv(\n int(in_channels[1] * width), int(in_channels[0] * width), 1, 1, act=act\n )\n self.C3_p3 = CSPLayer(\n int(2 * in_channels[0] * width),\n int(in_channels[0] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n )\n\n # bottom-up conv\n self.bu_conv2 = Conv(\n int(in_channels[0] * width), int(in_channels[0] * width), 3, 2, act=act\n )\n self.C3_n3 = CSPLayer(\n int(2 * in_channels[0] * width),\n int(in_channels[1] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n )\n\n # bottom-up conv\n self.bu_conv1 = Conv(\n int(in_channels[1] * width), int(in_channels[1] * width), 3, 2, act=act\n )\n self.C3_n4 = CSPLayer(\n int(2 * in_channels[1] * width),\n int(in_channels[2] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n )\n\n def forward(self, input):\n \"\"\"\n Args:\n inputs: input images.\n\n Returns:\n Tuple[Tensor]: FPN feature.\n \"\"\"\n\n # backbone\n out_features, apollo_feature = self.backbone(input)\n features = [out_features[f] for f in self.in_features]\n [x2, x1, x0] = features\n\n fpn_out0 = self.lateral_conv0(x0) # 1024->512/32\n f_out0 = self.upsample(fpn_out0) # 512/16\n f_out0 = torch.cat([f_out0, x1], 1) # 512->1024/16\n f_out0 = self.C3_p4(f_out0) # 1024->512/16\n\n fpn_out1 = self.reduce_conv1(f_out0) # 512->256/16\n f_out1 = self.upsample(fpn_out1) # 256/8\n f_out1 = torch.cat([f_out1, x2], 1) # 256->512/8\n pan_out2 = self.C3_p3(f_out1) # 512->256/8\n\n p_out1 = self.bu_conv2(pan_out2) # 256->256/16\n p_out1 = torch.cat([p_out1, fpn_out1], 1) # 256->512/16\n pan_out1 = self.C3_n3(p_out1) # 512->512/16\n\n p_out0 = self.bu_conv1(pan_out1) # 512->512/32\n p_out0 = torch.cat([p_out0, fpn_out0], 1) # 512->1024/32\n pan_out0 = self.C3_n4(p_out0) # 1024->1024/32\n\n outputs = (pan_out2, pan_out1, pan_out0)\n # output fpn and feature map for apollo[dark2]\n return outputs, apollo_feature"
}
] | import torch.nn as nn
from .yolo_head import YOLOXHead
from .yolo_pafpn import YOLOPAFPN | 7,744 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) Megvii Inc. All rights reserved.
class YOLOX(nn.Module):
"""
YOLOX model module. The module list is defined by create_yolov3_modules function.
The network returns loss values from three YOLO layers during training
and detection results during test.
"""
def __init__(self, backbone=None, head=None):
super().__init__()
if backbone is None:
backbone = YOLOPAFPN()
if head is None:
| #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) Megvii Inc. All rights reserved.
class YOLOX(nn.Module):
"""
YOLOX model module. The module list is defined by create_yolov3_modules function.
The network returns loss values from three YOLO layers during training
and detection results during test.
"""
def __init__(self, backbone=None, head=None):
super().__init__()
if backbone is None:
backbone = YOLOPAFPN()
if head is None: | head = YOLOXHead(80) | 0 | 2023-11-08 07:07:24+00:00 | 12k |
ndiamant/spice | experiments/train_eval.py | [
{
"identifier": "ConditionalHist",
"path": "spice/conditional_histogram.py",
"snippet": "class ConditionalHist(BaseLightning):\n def __init__(\n self, input_dim: int, hidden_dim: int,\n max_iter: int, bins: torch.Tensor,\n y_min: float,\n lr: float = 1e-3, wd: float = 0,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.module = nn.Sequential(\n MLP(input_dim, hidden=hidden_dim, n_hidden=1, output_dim=bins.shape[0]),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"log bin probabilities\"\"\"\n return torch.log_softmax(self.module(x), dim=-1)\n\n def log_likelihood(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n \"\"\"log likelihood of y | x\"\"\"\n bin_log_probs = self(x)\n return -F.nll_loss(bin_log_probs, y.squeeze(), reduction=\"none\")\n\n def likelihood(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n return self.log_likelihood(x, y).exp()\n\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n x, y = batch\n loss = -self.log_likelihood(x, y).mean()\n self.epoch_log(f\"{prefix}/loss\", loss)\n return loss\n\n @torch.no_grad()\n def find_prob_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n \"\"\"\n alpha: mis-classification rate\n anything above threshold in likelihood should be in the prediction set\n https://people.eecs.berkeley.edu/~angelopoulos/publications/downloads/gentle_intro_conformal_dfuq.pdf\n \"\"\"\n n = len(y_val)\n q_level = math.ceil((n + 1) * (1 - alpha)) / n\n cal_scores = 1 - self.likelihood(x_val.to(self.device), y_val.to(self.device))\n q_hat = torch.quantile(cal_scores, q_level, interpolation=\"higher\").item()\n return 1 - q_hat\n\n @torch.no_grad()\n def get_extended_bins(self):\n extended_bins = torch.empty(self.hparams.bins.shape[0] + 1)\n extended_bins[0] = self.hparams.y_min\n extended_bins[1:] = self.hparams.bins\n return extended_bins\n\n @torch.no_grad()\n def get_bin_widths(self) -> torch.Tensor:\n extended_bins = self.get_extended_bins()\n return extended_bins[1:] - extended_bins[:-1]\n\n @torch.no_grad()\n def get_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n test_prob = self(x_test.to(self.device)).exp().to(y_test.device)\n prediction_set = test_prob > threshold\n covered = (\n (\n F.one_hot(y_test.squeeze(), num_classes=self.hparams.bins.shape[0])\n & prediction_set\n ).any(dim=1)\n ).float()\n bin_sizes = self.get_bin_widths()\n sizes = (bin_sizes.unsqueeze(0) * prediction_set).sum(dim=1)\n return compute_conformal_metrics(x_test, y_test.float() / y_test.max().item(), sizes, covered)\n\n @torch.no_grad()\n def get_hpd_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n x_val = x_val.to(self.device)\n y_val = y_val.to(self.device)\n all_probs = self(x_val).exp()\n y_probs = all_probs.gather(index=y_val, dim=1)\n bin_sizes = self.get_bin_widths()\n score = integrate_categorical_below_threshold(all_probs.cpu(), y_probs.cpu(), bin_sizes.cpu())\n return -score_to_q_hat(-score, alpha)\n\n @torch.no_grad()\n def get_hpd_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n # HPD\n probs = self(x_test.to(self.device)).exp().cpu()\n bin_sizes = self.get_bin_widths()\n hpd_cutoffs = find_hpd_cutoffs(probs, bin_sizes.cpu(), threshold)\n bin_mask = probs >= hpd_cutoffs.unsqueeze(1)\n # size\n sizes = (bin_sizes.unsqueeze(0) * bin_mask).sum(dim=1)\n y_onehot = F.one_hot(y_test.squeeze(), num_classes=self.hparams.bins.shape[0])\n covered = (y_onehot & bin_mask).any(dim=1).float()\n # coverage\n metrics = compute_conformal_metrics(x_test, y_test.float() / y_test.max().item(), sizes, covered)\n metrics = {\n f\"hpd_{name}\": val for name, val in metrics.items()\n }\n return metrics"
},
{
"identifier": "CHR",
"path": "spice/chr.py",
"snippet": "class CHR(BaseLightning):\n def __init__(\n self, input_dim: int, hidden_dim: int,\n max_iter: int, n_bins: int,\n lr: float = 1e-3, wd: float = 0,\n y_min: float = 0, y_max: float = 1,\n hist_steps: int = 1000,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.mlp = MLP(input_dim, hidden=hidden_dim, n_hidden=1, output_dim=n_bins)\n self.register_buffer(\"quantiles\", torch.linspace(0.01, 0.99, n_bins))\n self.loss_fn = AllQuantileLoss(quantiles=self.quantiles)\n\n def forward(self, x: torch.Tensor, sort: bool = True) -> torch.Tensor:\n y = self.mlp(x)\n if sort:\n return y.sort(dim=-1).values\n return y\n\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n x, y = batch\n pred = self(x)\n loss = self.loss_fn(pred, y)\n self.epoch_log(f\"{prefix}/loss\", loss)\n return loss\n\n @torch.no_grad()\n def calibrate(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float):\n chr_ = CHRCalibrate(self.quantiles.squeeze().cpu().numpy(), randomize=False)\n q_calib = self(x_val.to(self.device)).cpu().numpy()\n chr_.calibrate(q_calib=q_calib, Y=y_val.squeeze().cpu().numpy(), alpha=alpha)\n return chr_\n\n @torch.no_grad()\n def get_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, chr: CHRCalibrate,\n ) -> dict[str, float]:\n q_new = self(x_test.to(self.device)).cpu().numpy()\n bands = chr.predict(q_new=q_new)\n y = y_test.squeeze().numpy()\n covered = ((y >= bands[:, 0]) & (y <= bands[:, 1]))\n sizes = torch.tensor(bands[:, 1] - bands[:, 0], dtype=y_test.dtype)\n return compute_conformal_metrics(x_test, y_test, sizes=sizes, covered=torch.tensor(covered))"
},
{
"identifier": "RegressionData",
"path": "spice/datasets.py",
"snippet": "class RegressionData(LightningDataModule):\n def __init__(\n self, name: str, y_scaling: str = \"min_max\",\n batch_size: int = 512, discretize_n_bins: int = None,\n train_seed: int = 57771, smart_discretize: bool = True,\n ):\n super().__init__()\n x, y = get_dataset(name)\n y = y.reshape(y.shape[0], 1)\n np.random.seed(112123)\n n = y.shape[0]\n # train, val, calibrate, val calibration, test\n dset_idx = np.random.choice(list(range(5)), p=[0.5, 0.1, 0.1, 0.1, 0.2], size=(n,))\n test_idx = dset_idx == 4\n # shuffle the train split based on the seed\n np.random.seed(train_seed)\n dset_idx[~test_idx] = np.random.permutation(dset_idx[~test_idx])\n train_idx = dset_idx == 0\n val_idx = dset_idx == 1\n cal_idx = dset_idx == 2\n cal_val_idx = dset_idx == 3\n # scaling\n y_scaler = {\n \"min_max\": MinMaxScaler(feature_range=(0, 1 - 1e-5)),\n \"std\": StandardScaler(),\n }[y_scaling]\n y_train = y[train_idx]\n y_scaler.fit(y_train)\n x_train = x[train_idx]\n x_scaler = StandardScaler()\n x_scaler.fit(x_train)\n x = torch.tensor(x_scaler.transform(x), dtype=torch.float32)\n y = torch.tensor(y_scaler.transform(y), dtype=torch.float32)\n # discretize for histogram case\n self.bins = None\n if discretize_n_bins is not None:\n transformed_train_y = torch.tensor(y_scaler.transform(y_train))\n if smart_discretize:\n self.bins = select_bins(transformed_train_y, discretize_n_bins)\n else:\n self.bins = torch.linspace(\n 1 / discretize_n_bins, 1, discretize_n_bins,\n )\n y = discretize(y, self.bins)\n train_dset = TensorDataset(x[train_idx], y[train_idx])\n self.train_dset = train_dset\n self.val_dset = TensorDataset(x[val_idx], y[val_idx])\n self.cal_dset = TensorDataset(x[cal_idx], y[cal_idx])\n self.cal_val_dset = TensorDataset(x[cal_val_idx], y[cal_val_idx])\n self.test_dset = TensorDataset(x[test_idx], y[test_idx])\n # save stuff\n self.batch_size = batch_size\n self.x_scaler = x_scaler\n self.y_scaler = y_scaler\n self.y_min_max_scaler = MinMaxScaler(feature_range=(0, 1 - 1e-5)).fit(\n train_dset.tensors[1], # used to keep size evaluations on the same scale\n )\n self.test_idx = test_idx\n\n def train_dataloader(self) -> DataLoader:\n return DataLoader(self.train_dset, shuffle=True, batch_size=self.batch_size)\n\n def val_dataloader(self) -> DataLoader:\n return DataLoader(self.val_dset, shuffle=True, batch_size=self.batch_size)\n\n def test_dataloader(self) -> DataLoader:\n return DataLoader(self.test_dset, shuffle=False, batch_size=self.batch_size)\n\n def train_batches(self, max_batches: int = 100) -> int:\n return min(max_batches, len(self.train_dataloader()))\n\n def val_batches(self, max_batches: int = 10) -> int:\n return min(max_batches, len(self.val_dataloader()))"
},
{
"identifier": "CQR",
"path": "spice/cqr.py",
"snippet": "class CQR(BaseLightning):\n \"\"\"conformalized quantile regression\"\"\"\n def __init__(\n self, input_dim: int, hidden_dim: int,\n low_quantile: float, high_quantile: float,\n max_iter: int, lr: float = 1e-3, wd: float = 0,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.module = MLP(\n input_dim=input_dim, hidden=hidden_dim, n_hidden=1, output_dim=2,\n )\n self.quantiles = torch.tensor([low_quantile, high_quantile])\n self.loss_fn = AllQuantileLoss(quantiles=self.quantiles)\n\n def forward(self, x, sort: bool = False) -> torch.Tensor:\n qs = self.module(x)\n if sort:\n qs = qs.sort(dim=-1).values\n return qs\n\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n x, y = batch\n y_pred = self(x)\n loss = self.loss_fn(y_pred, y)\n self.epoch_log(f\"{prefix}/loss\", loss)\n return loss\n\n @torch.no_grad()\n def conformity_score(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n pred_quantiles = self(x, sort=True)\n lower = pred_quantiles[:, 0] - y.squeeze()\n upper = y.squeeze() - pred_quantiles[:, 1]\n return torch.maximum(lower, upper)\n\n @torch.no_grad()\n def get_q_hat(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n conf_score = self.conformity_score(x_val.to(self.device), y_val.to(self.device))\n q_hat = score_to_q_hat(conf_score, alpha)\n return q_hat\n\n @torch.no_grad()\n def get_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, q_hat: float,\n ) -> dict[str, float]:\n pred_quantiles = self(x_test.to(self.device), sort=True).cpu()\n left_interval = pred_quantiles[:, 0] - q_hat\n right_interval = pred_quantiles[:, 1] + q_hat\n covered = (\n (y_test.squeeze() > left_interval)\n & (y_test.squeeze() < right_interval)\n )\n sizes = (right_interval - left_interval)\n return compute_conformal_metrics(x_test, y_test, sizes=sizes, covered=covered)\n\n @torch.no_grad()\n def prediction_interval(\n self, x: torch.Tensor, conformity_score: torch.Tensor,\n alpha: float,\n ) -> torch.Tensor:\n \"\"\"alpha is mis-coverage rate\"\"\"\n pred_quantiles = self(x, sort=True)\n n_calibrate = conformity_score.shape[0]\n quantile = conformity_score.quantile(\n (1 - alpha) * (1 + 1 / n_calibrate)\n )\n pred_quantiles[:, 0] -= quantile\n pred_quantiles[:, 1] += quantile\n return pred_quantiles"
},
{
"identifier": "PCP",
"path": "spice/pcp.py",
"snippet": "class PCP(BaseLightning):\n def __init__(\n self, input_dim: int, hidden_dim: int,\n max_iter: int, lr: float = 1e-3, wd: float = 0,\n n_mixture: int = 10,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.cond_gmm = ConditionalGMM(input_dim, hidden_dim, n_mixture)\n\n def forward(self, x: torch.Tensor) -> D.MixtureSameFamily:\n return self.cond_gmm(x)\n\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n x, y = batch\n gmm = self(x)\n log_p = gmm.log_prob(y.squeeze())\n loss = -log_p.nanmean()\n self.epoch_log(f\"{prefix}/loss\", loss)\n return loss\n\n @torch.no_grad()\n def get_filtered_samples(\n self, x: torch.Tensor, k: int = 50, beta: float = 0.2,\n ) -> torch.Tensor:\n gmm = self(x)\n samples = gmm.sample((k,)) # K = 50 x batch_size\n # filter\n densities = gmm.log_prob(samples) # K = 50 x batch_size\n densities_argsort = densities.argsort(dim=0)\n n_filter = int(k * beta)\n keep_idx = densities_argsort[n_filter:] # k = 40 x batch_size\n filtered_samples = samples[keep_idx, torch.arange(x.shape[0])]\n return filtered_samples.T\n\n @torch.no_grad()\n def get_q_hat(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n # https://github.com/Zhendong-Wang/Probabilistic-Conformal-Prediction/blob/54a31cbfe0c87182cbc4351f1d12a59a65452a40/pcp/pcp.py#L28\n n = y_val.shape[0]\n # sample\n filtered_samples = self.get_filtered_samples(x_val.to(self.device)).to(y_val.device)\n # conformal\n score = (filtered_samples - y_val.view(n, 1)).abs().min(dim=1).values\n return score_to_q_hat(score, alpha)\n\n @torch.no_grad()\n def get_prediction_intervals(\n self, x: torch.Tensor, q_hat: float, parallel_workers: int = 0,\n ) -> list[Union]:\n # sample\n filtered_samples = self.get_filtered_samples(x).cpu()\n desc = \"calculating intervals from samples\"\n fn = partial(union_from_samples, q_hat=q_hat)\n fn_in = filtered_samples\n if parallel_workers:\n bands = process_map(\n fn, fn_in, max_workers=parallel_workers, desc=desc,\n chunksize=max(1, min(100, fn_in.shape[0] // (2 * parallel_workers)))\n )\n else:\n bands = list(tqdm(map(fn, fn_in), desc=desc, total=len(fn_in)))\n return bands\n\n @torch.no_grad()\n def get_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, q_hat: float,\n interval_workers: int = 0,\n ) -> dict[str, float]:\n intervals = self.get_prediction_intervals(x_test.to(self.device), q_hat, interval_workers)\n n = y_test.shape[0]\n covered = torch.zeros(n)\n sizes = torch.empty(n)\n for i, (union, yi) in enumerate(tqdm(\n zip(intervals, y_test),\n desc=\"calculating coverage and size\", total=n,\n )):\n sizes[i] = float(union.measure)\n if union.contains(yi.item()):\n covered[i] = 1\n return compute_conformal_metrics(\n x_test, y_test, sizes=sizes, covered=covered,\n )"
},
{
"identifier": "timestamp",
"path": "spice/utils.py",
"snippet": "def timestamp() -> str:\n now = datetime.now()\n return now.strftime(\"%Y-%m-%d_%H-%M-%S-%f\")"
},
{
"identifier": "rename_metrics",
"path": "spice/utils.py",
"snippet": "def rename_metrics(metrics: dict[str, float], prefix: str, alpha: float) -> dict[str, float]:\n return {\n f\"{prefix}/{name}_at_{alpha}\": val\n for name, val in metrics.items()\n }"
},
{
"identifier": "WANDB_PROJECT",
"path": "spice/utils.py",
"snippet": "WANDB_PROJECT = \"spice\""
},
{
"identifier": "SPICEn2",
"path": "spice/spice_n2.py",
"snippet": "class SPICEn2(BaseLightning):\n def __init__(\n self, input_dim: int, hidden_dim: int, n_knots: int,\n learn_bin_widths: bool,\n max_iter: int, lr: float = 1e-3, wd: float = 0,\n smart_bin_init_w: torch.Tensor = None, smart_bin_init_h: torch.Tensor = None,\n min_f_bar_val: float = 1e-2,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.encoder = nn.Sequential(\n MLP(input_dim, hidden=hidden_dim, n_hidden=0),\n )\n self.density = ConditionalQuadratic(\n hidden_dim, n_knots, learn_bin_widths=learn_bin_widths,\n min_f_bar_val=min_f_bar_val,\n bin_width_init=smart_bin_init_w, bin_height_init=smart_bin_init_h,\n )\n\n def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n z = self.encoder(x)\n return self.density(z, y.clip(0, 1 - 1e-3))\n\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n x, y = batch\n likelihood = self(x, y)\n self.epoch_log(f\"{prefix}/likelihood\", likelihood.mean())\n log_likelihood = never_nan_log(likelihood, eps=1e-5)\n self.epoch_log(f\"{prefix}/log_likelihood\", log_likelihood.mean())\n self.epoch_log(f\"{prefix}/log_likelihood_std\", log_likelihood.std(dim=0))\n self.epoch_log(f\"{prefix}/log_likelihood_min\", log_likelihood.min())\n self.epoch_log(f\"{prefix}/log_likelihood_max\", log_likelihood.max())\n loss = -log_likelihood.mean()\n self.epoch_log(f\"{prefix}/loss\", loss)\n return loss\n\n @torch.no_grad()\n def get_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n score = -self(x_val.to(self.device), y_val.to(self.device))\n q_hat = score_to_q_hat(score, alpha)\n return -q_hat\n\n @torch.no_grad()\n def get_intervals(self, x: torch.Tensor, cutoff: float) -> tuple[\n tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor, torch.Tensor, torch.Tensor],\n ]:\n z = self.encoder(x)\n (x0, x1), (a, b, c) = self.density.get_quadratic_coeffs(z)\n return (x0, x1), get_intervals(x0, x1, a, b, c, cutoff)\n\n @torch.no_grad()\n def get_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n test_likelihood = self(x_test.to(self.device), y_test.to(self.device))\n covered = (test_likelihood > threshold).float()\n (x0, x1), (left, right, inside) = self.get_intervals(x_test.to(self.device), threshold)\n sizes = get_interval_sizes(x0, x1, left, right, inside)\n metrics = compute_conformal_metrics(x_test, y_test, sizes, covered)\n metrics[\"approx_size\"] = self.approx_size(x_test, threshold)\n return metrics\n\n @torch.no_grad()\n def approx_size(self, x_test: torch.Tensor, threshold: float):\n y_approx_area = torch.linspace(0, 1, 1000, device=self.device).repeat((x_test.shape[0], 1))\n density_grid = self(\n x_test.to(self.device), y_approx_area,\n )\n return (density_grid > threshold).float().mean().item()\n\n @torch.no_grad()\n def get_hpd_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n x_val = x_val.to(self.device)\n y_val = y_val.to(self.device)\n z = self.encoder(x_val)\n (x0, x1), (a, b, c) = self.density.get_quadratic_coeffs(z)\n y_density = self(x_val, y_val)\n score = integrate_above_cutoff(x0, x1, a, b, c, y_density)\n q_hat = score_to_q_hat(score, alpha)\n return q_hat\n\n @torch.no_grad()\n def get_hpd_intervals(self, x: torch.Tensor, cutoff: float) -> tuple[\n tuple[torch.Tensor, torch.Tensor, torch.Tensor], tuple[torch.Tensor, torch.Tensor, torch.Tensor],\n ]:\n z = self.encoder(x.to(self.device))\n (x0, x1), (a, b, c) = self.density.get_quadratic_coeffs(z)\n hpd_cutoffs = find_hpd_cutoff(x0.to(self.device), x1.to(self.device), a, b, c, cutoff)\n return (x0, x1, hpd_cutoffs), get_intervals(x0, x1, a, b, c, hpd_cutoffs)\n\n @torch.no_grad()\n def get_hpd_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n (x0, x1, cutoffs), intervals = self.get_hpd_intervals(x_test, threshold)\n sizes = get_interval_sizes(x0, x1, *intervals)\n covered = y_in_interval(y_test.to(x0.device), x0, x1, *intervals)\n metrics = compute_conformal_metrics(x_test, y_test, sizes, covered)\n metrics[\"approx_size\"] = self.approx_size(x_test, cutoffs)\n metrics = {\n f\"hpd_{name}\": val for name, val in metrics.items()\n }\n return metrics"
},
{
"identifier": "smart_bin_init",
"path": "spice/spice_n2.py",
"snippet": "def smart_bin_init(y_train: torch.Tensor, n_knots: int) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n return:\n x positions: n_knots - 1\n y_positions: n_knots - 1\n \"\"\"\n quantiles = unique_quantile(y_train.squeeze(), n_knots)\n heights = torch.histogram(y_train.squeeze(), quantiles, density=True).hist\n widths = quantiles[1:] - quantiles[:-1]\n return widths, heights"
},
{
"identifier": "SPICEn1",
"path": "spice/spice_n1.py",
"snippet": "class SPICEn1(BaseLightning):\n def __init__(\n self, input_dim: int, hidden_dim: int, n_knots: int,\n learn_bin_widths: bool,\n max_iter: int, lr: float = 1e-3, wd: float = 0,\n bin_width_init: torch.Tensor = None, bin_height_init: torch.Tensor = None,\n min_likelihood: float = 1e-2,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.encoder = nn.Sequential(\n MLP(input_dim, hidden=hidden_dim, n_hidden=0),\n )\n self.density = ConditionalPiecewiseLinearDensity(\n hidden_dim, n_knots, learn_bin_widths=learn_bin_widths,\n min_likelihood=min_likelihood,\n bin_width_init=bin_width_init, bin_height_init=bin_height_init,\n )\n\n def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n z = self.encoder(x)\n return self.density(z, y)\n\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n x, y = batch\n likelihood = self(x, y)\n self.epoch_log(f\"{prefix}/likelihood\", likelihood.mean())\n log_likelihood = never_nan_log(likelihood, eps=1e-5)\n self.epoch_log(f\"{prefix}/log_likelihood\", log_likelihood.mean())\n self.epoch_log(f\"{prefix}/log_likelihood_std\", log_likelihood.std(dim=0).mean())\n self.epoch_log(f\"{prefix}/log_likelihood_min\", log_likelihood.min())\n self.epoch_log(f\"{prefix}/log_likelihood_max\", log_likelihood.max())\n loss = -log_likelihood.mean()\n self.epoch_log(f\"{prefix}/loss\", loss)\n return loss\n\n @torch.no_grad()\n def get_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n score = -self(x_val.to(self.device), y_val.to(self.device))\n q_hat = score_to_q_hat(score, alpha)\n return -q_hat\n\n @torch.no_grad()\n def get_intervals(self, x: torch.Tensor, cutoff: float) -> tuple[torch.Tensor, torch.Tensor]:\n z = self.encoder(x)\n knot_pos, knot_height = self.density.get_knot_pos_height(z)\n return get_intervals(knot_pos, knot_height, cutoff)\n\n @torch.no_grad()\n def get_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n test_likelihood = self(x_test.to(self.device), y_test.to(self.device))\n covered = (test_likelihood > threshold)\n left, right = self.get_intervals(x_test.to(self.device), threshold)\n sizes = get_interval_sizes(left, right)\n return compute_conformal_metrics(x_test, y_test, sizes, covered)\n\n @torch.no_grad()\n def get_hpd_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n x_val = x_val.to(self.device)\n y_val = y_val.to(self.device)\n z = self.encoder(x_val)\n knot_pos, knot_height = self.density.get_knot_pos_height(z)\n y_density = self(x_val, y_val)\n score = integrate_below_cutoff(knot_pos, knot_height, y_density)\n return -score_to_q_hat(-score, alpha)\n\n @torch.no_grad()\n def get_knots_and_hpd_cutoffs(self, x: torch.Tensor, cutoff: float) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n z = self.encoder(x.to(self.device))\n knot_pos, knot_height = self.density.get_knot_pos_height(z)\n hpd_cutoffs = find_hpd_cutoff(knot_pos.to(self.device), knot_height.to(self.device), cutoff)\n return knot_pos, knot_height, hpd_cutoffs\n\n @torch.no_grad()\n def get_hpd_intervals(self, x: torch.Tensor, cutoff: float) -> tuple[torch.Tensor, torch.Tensor]:\n knot_pos, knot_height, hpd_cutoffs = self.get_knots_and_hpd_cutoffs(x, cutoff)\n return get_intervals(knot_pos, knot_height, hpd_cutoffs)\n\n @torch.no_grad()\n def get_hpd_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n left, right = self.get_hpd_intervals(x_test, threshold)\n sizes = get_interval_sizes(left, right)\n covered = (\n (y_test >= left.cpu())\n & (y_test < right.cpu())\n ).any(dim=1)\n metrics = compute_conformal_metrics(x_test, y_test, sizes, covered)\n metrics = {\n f\"hpd_{name}\": val for name, val in metrics.items()\n }\n return metrics"
},
{
"identifier": "smart_bin_init",
"path": "spice/spice_n1.py",
"snippet": "@torch.no_grad()\ndef smart_bin_init(y_train: torch.Tensor, n_knots: int) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n return:\n x positions: n_knots - 1\n y_positions: n_knots - 1\n \"\"\"\n quantiles = unique_quantile(y_train.squeeze(), n_knots + 1)\n heights = torch.histogram(y_train.squeeze(), bins=quantiles, density=True).hist\n final_heights = heights\n quantiles = unique_quantile(y_train.squeeze(), n_knots)\n widths = quantiles[1:] - quantiles[:-1]\n return widths, final_heights"
}
] | import argparse
import os
import wandb
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, LearningRateMonitor
from spice.conditional_histogram import ConditionalHist
from spice.chr import CHR
from spice.datasets import RegressionData
from spice.cqr import CQR
from spice.pcp import PCP
from spice.utils import timestamp, rename_metrics, WANDB_PROJECT
from spice.spice_n2 import SPICEn2, smart_bin_init
from spice.spice_n1 import SPICEn1
from spice.spice_n1 import smart_bin_init as spice_n1_smart_bin_init | 8,036 |
def setup_trainer_and_data(
name: str, wandb_log_dir: str,
epochs: int, version: str, checkpoint_folder: str,
dataset_name: str, seed: int,
y_scaling: str = "min_max", discretize_n_bins: int = None,
smart_discretize: bool = True,
) -> tuple[Trainer, WandbLogger, ModelCheckpoint, RegressionData]:
data = RegressionData(
dataset_name, train_seed=seed, y_scaling=y_scaling, discretize_n_bins=discretize_n_bins,
smart_discretize=smart_discretize,
)
logger = WandbLogger(
project=WANDB_PROJECT, save_dir=wandb_log_dir,
name=name, group=version,
version=f"{version}_{name}",
)
checkpoint = ModelCheckpoint(
dirpath=os.path.join(checkpoint_folder, name)
)
max_steps_per_epoch = 100
max_val_steps = 10
train_batches = data.train_batches(max_steps_per_epoch)
trainer = Trainer(
logger=logger,
callbacks=[
EarlyStopping(monitor="val/loss", patience=epochs // 4, mode="min"),
LearningRateMonitor(),
checkpoint,
],
accelerator="gpu", max_steps=epochs * max_steps_per_epoch,
check_val_every_n_epoch=1,
limit_train_batches=train_batches,
limit_val_batches=data.val_batches(max_val_steps),
enable_progress_bar=False,
gradient_clip_val=5,
log_every_n_steps=train_batches,
)
return trainer, logger, checkpoint, data
def run_conditional_histogram(
dataset_name: str,
lr: float, wd: float, epochs: int,
hidden: int,
n_bins: int,
seed: int,
alphas: list[float],
smart_bin_positions: bool,
# saving settings
checkpoint_folder: str, version: str, wandb_log_dir: str,
#
run_test: bool = False,
):
# set up data
ts = timestamp()
name = f"conditional_hist_version-{version}_{ts}"
trainer, logger, checkpoint, data = setup_trainer_and_data(
name=name, wandb_log_dir=wandb_log_dir, epochs=epochs, version=version,
dataset_name=dataset_name, seed=seed, checkpoint_folder=checkpoint_folder,
discretize_n_bins=n_bins, smart_discretize=smart_bin_positions,
)
seed_everything(seed)
wandb.config.update({
"dataset_name": dataset_name, "alphas": alphas, "model": "conditional_hist",
"n_bins": n_bins, "smart_bin_positions": smart_bin_positions,
"seed": seed,
})
# set up model
x_train, y_train = data.train_dset.tensors
|
def setup_trainer_and_data(
name: str, wandb_log_dir: str,
epochs: int, version: str, checkpoint_folder: str,
dataset_name: str, seed: int,
y_scaling: str = "min_max", discretize_n_bins: int = None,
smart_discretize: bool = True,
) -> tuple[Trainer, WandbLogger, ModelCheckpoint, RegressionData]:
data = RegressionData(
dataset_name, train_seed=seed, y_scaling=y_scaling, discretize_n_bins=discretize_n_bins,
smart_discretize=smart_discretize,
)
logger = WandbLogger(
project=WANDB_PROJECT, save_dir=wandb_log_dir,
name=name, group=version,
version=f"{version}_{name}",
)
checkpoint = ModelCheckpoint(
dirpath=os.path.join(checkpoint_folder, name)
)
max_steps_per_epoch = 100
max_val_steps = 10
train_batches = data.train_batches(max_steps_per_epoch)
trainer = Trainer(
logger=logger,
callbacks=[
EarlyStopping(monitor="val/loss", patience=epochs // 4, mode="min"),
LearningRateMonitor(),
checkpoint,
],
accelerator="gpu", max_steps=epochs * max_steps_per_epoch,
check_val_every_n_epoch=1,
limit_train_batches=train_batches,
limit_val_batches=data.val_batches(max_val_steps),
enable_progress_bar=False,
gradient_clip_val=5,
log_every_n_steps=train_batches,
)
return trainer, logger, checkpoint, data
def run_conditional_histogram(
dataset_name: str,
lr: float, wd: float, epochs: int,
hidden: int,
n_bins: int,
seed: int,
alphas: list[float],
smart_bin_positions: bool,
# saving settings
checkpoint_folder: str, version: str, wandb_log_dir: str,
#
run_test: bool = False,
):
# set up data
ts = timestamp()
name = f"conditional_hist_version-{version}_{ts}"
trainer, logger, checkpoint, data = setup_trainer_and_data(
name=name, wandb_log_dir=wandb_log_dir, epochs=epochs, version=version,
dataset_name=dataset_name, seed=seed, checkpoint_folder=checkpoint_folder,
discretize_n_bins=n_bins, smart_discretize=smart_bin_positions,
)
seed_everything(seed)
wandb.config.update({
"dataset_name": dataset_name, "alphas": alphas, "model": "conditional_hist",
"n_bins": n_bins, "smart_bin_positions": smart_bin_positions,
"seed": seed,
})
# set up model
x_train, y_train = data.train_dset.tensors | model = ConditionalHist( | 0 | 2023-11-01 18:04:29+00:00 | 12k |
nik-sm/com-hom-emg | scripts/collect_fresh_classifier_stats.py | [
{
"identifier": "DataModule",
"path": "com_hom_emg/data.py",
"snippet": "class DataModule(LightningDataModule):\n @staticmethod\n def add_argparse_args(parent_parser):\n parser = parent_parser.add_argument_group(\"DataModule\")\n parser.add_argument(\"--fold\", type=int, required=True)\n parser.add_argument(\"--n_train_subj\", type=int, default=8)\n parser.add_argument(\"--n_val_subj\", type=int, default=1)\n parser.add_argument(\"--n_test_subj\", type=int, default=1)\n parser.add_argument(\"--batch_size\", type=int, default=128)\n parser.add_argument(\"--num_workers\", type=int, default=8)\n parser.add_argument(\"--use_preprocessed_data\", type=str2bool, default=False)\n return parent_parser\n\n def __init__(\n self,\n *,\n # seed and per_subj_data come from cli\n seed: int,\n per_subj_data: dict,\n #\n fold: int,\n n_train_subj: int,\n n_val_subj: int,\n n_test_subj: int,\n batch_size: int,\n num_workers: int,\n use_preprocessed_data: bool,\n **kw,\n ):\n \"\"\"\n From N subjects, we select 1 for val, 1 for test, and N-2 for train.\n In each set, data are merged and shuffled.\n While loading, we distinguish single and double gestures for easier splitting during train steps.\n \"\"\"\n super().__init__()\n self.train_set, self.val_set, self.test_set = get_datasets(\n per_subj_data, fold, n_train_subj, n_val_subj, n_test_subj, use_preprocessed_data\n )\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.seed = seed\n self.example_data_shape = self.train_set.tensors[0][0].shape\n\n def get_loader(self, dataset, shuffle: bool):\n return DataLoader(\n dataset,\n shuffle=shuffle,\n pin_memory=True,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n worker_init_fn=seed_worker,\n generator=torch.Generator().manual_seed(self.seed),\n persistent_workers=True,\n )\n\n def train_dataloader(self):\n return self.get_loader(self.train_set, shuffle=True)\n\n def val_dataloader(self):\n return self.get_loader(self.val_set, shuffle=False)\n\n def test_dataloader(self):\n return self.get_loader(self.test_set, shuffle=False)"
},
{
"identifier": "get_per_subj_data",
"path": "com_hom_emg/data.py",
"snippet": "def get_per_subj_data():\n path = PROJECT_PATH / \"data\" / \"combination-gesture-dataset\" / \"python\"\n per_subj_data = {}\n for subj_idx in range(10):\n per_subj_data[subj_idx] = {\n \"data\": np.load(path / f\"subj{subj_idx}/data.npy\"),\n \"labels\": np.load(path / f\"subj{subj_idx}/labels.npy\"),\n }\n return per_subj_data"
},
{
"identifier": "LearnedEmbedding",
"path": "com_hom_emg/model.py",
"snippet": "class LearnedEmbedding(pl.LightningModule):\n @staticmethod\n def add_argparse_args(parent_parser):\n parser = parent_parser.add_argument_group(\"LearnedEmbedding\")\n parser.add_argument(\"--encoder_arch\", choices=[\"basic\", \"conformer\", \"vit\", \"identity\"], default=\"basic\")\n parser.add_argument(\"--clf_arch\", choices=[\"small\", \"large\"], default=\"small\")\n parser.add_argument(\"--feature_dim\", type=int, default=64)\n # Note that with normalized features, we might need to re-normalized after making combinations\n parser.add_argument(\"--data_noise_SNR\", type=float, default=None, help=\"Desired SNR in dB. None for no noise.\")\n parser.add_argument(\n \"--feature_noise_SNR\", type=float, default=None, help=\"Desired SNR in dB. None for no noise.\"\n )\n parser.add_argument(\"--normalized_features\", type=str2bool, default=False)\n parser.add_argument(\"--feature_combine_type\", choices=[\"avg\", \"mlp\"], default=\"avg\")\n parser.add_argument(\"--lr\", type=float, default=3e-4)\n parser.add_argument(\"--lr_decay\", type=float, default=1.0)\n parser.add_argument(\"--linearity_loss_coeff\", type=float, default=1.0)\n parser.add_argument(\"--real_CE_loss_coeff\", type=float, default=1.0)\n parser.add_argument(\"--fake_CE_loss_coeff\", type=float, default=1.0)\n parser.add_argument(\"--loss_type\", choices=[\"triplet\", \"triplet-centroids\", \"triplet-hard\"], default=\"triplet\")\n parser.add_argument(\"--margin\", type=float, default=1.0)\n parser.add_argument(\"--centroids_momentum\", type=float, default=0.75, help=\"For `triplet-centroids` loss\")\n parser.add_argument(\"--triplets_per_item\", type=int, default=1, help=\"For `triplet` loss\")\n\n parser = parent_parser.add_argument_group(\"LearnedEmbedding - Fine-tuning\")\n parser.add_argument(\"--finetune_steps\", type=int, default=10_000)\n parser.add_argument(\"--finetune_lr\", type=float, default=3e-5)\n parser.add_argument(\"--finetune_lr_decay\", type=float, default=1.0)\n parser.add_argument(\"--finetune_batch_size\", type=float, default=32)\n parser.add_argument(\"--finetune_test_frac\", type=float, default=0.2)\n parser.add_argument(\"--finetune_n_aug_per_class\", type=int, default=-1, help=\"-1 for all, positive for N\")\n return parent_parser\n\n def __init__(self, **kwargs):\n super().__init__()\n self.save_hyperparameters() # Access arg from command line \"--arg1\" at \"self.hparams.arg1\", etc\n\n # NOTE - self.example_input_array - magic pytorch lightning variable for tboard log_graph\n self.example_input_array = torch.ones(1, self.hparams.input_channels, self.hparams.input_time_length)\n if self.hparams.encoder_arch == \"basic\":\n self.embedding = EmbeddingNetwork(\n input_channels=self.hparams.input_channels,\n input_time_length=self.hparams.input_time_length,\n feature_dim=self.hparams.feature_dim,\n normalized_features=self.hparams.normalized_features,\n use_preprocessed_data=self.hparams.use_preprocessed_data,\n )\n elif self.hparams.encoder_arch == \"conformer\":\n self.embedding = Conformer(\n feature_dim=self.hparams.feature_dim,\n normalized_features=self.hparams.normalized_features,\n )\n elif self.hparams.encoder_arch == \"vit\":\n vit = SimpleViT(\n seq_len=962,\n channels=8,\n patch_size=37,\n num_classes=self.hparams.feature_dim,\n dim=256,\n depth=6,\n heads=8,\n mlp_dim=256,\n )\n if self.hparams.normalized_features:\n vit = nn.Sequential(vit, UnitNormLayer())\n self.embedding = vit\n elif self.hparams.arch == \"identity\":\n self.embedding = DummyIdentity()\n else:\n raise NotImplementedError()\n if self.hparams.clf_arch == \"small\":\n self.direction_clf = nn.Linear(self.hparams.feature_dim, 5)\n self.modifier_clf = nn.Linear(self.hparams.feature_dim, 5)\n elif self.hparams.clf_arch == \"large\":\n self.direction_clf = MLPClf(self.hparams.feature_dim, 5)\n self.modifier_clf = MLPClf(self.hparams.feature_dim, 5)\n if self.hparams.loss_type == \"triplet\":\n self.linearity_loss_fn = TripletLoss(\n margin=self.hparams.margin,\n triplets_per_item=self.hparams.triplets_per_item,\n )\n elif self.hparams.loss_type == \"triplet-centroids\":\n self.linearity_loss_fn = TripletCentroids(\n margin=self.hparams.margin,\n feature_dim=self.hparams.feature_dim,\n device=\"cuda\" if self.hparams.accelerator == \"gpu\" else \"cpu\",\n momentum=self.hparams.centroids_momentum,\n )\n elif self.hparams.loss_type == \"triplet-hard\":\n self.linearity_loss_fn = TripletLossHardMining(\n margin=self.hparams.margin,\n )\n else:\n logger.error(f\"Unknown loss type: {self.hparams.loss_type}\")\n raise NotImplementedError()\n if self.hparams.feature_combine_type == \"avg\":\n # Store on self so it will be detected as additional params\n combine_fn = Avg()\n elif self.hparams.feature_combine_type == \"mlp\":\n combine_fn = MLPCombine(feature_dim=self.hparams.feature_dim)\n self.feature_combination = CombinePairs(\n combine_fn=combine_fn, normalized_features=self.hparams.normalized_features\n )\n\n def forward(self, preprocessed_emg_data):\n features = self.embedding(preprocessed_emg_data)\n return features\n\n def training_step(self, batch, batch_idx):\n (data, labels, is_single, subj_ids) = batch\n # Add noise to each class separately to reach the desired SNR\n if self.hparams.data_noise_SNR is not None:\n with torch.no_grad():\n for label in labels.unique(dim=0):\n subset_idx = (labels == label).all(-1)\n subset = data[subset_idx]\n data[subset_idx] = subset + get_noise(subset, self.hparams.data_noise_SNR)\n\n # Compute features for real data\n real_features = self.embedding(data)\n # Add noise to features\n if self.hparams.feature_noise_SNR is not None:\n for label in labels.unique(dim=0):\n subset_idx = (labels == label).all(-1)\n subset = real_features[subset_idx]\n real_features[subset_idx] = subset + get_noise(subset, self.hparams.feature_noise_SNR)\n\n # Create fake double features features from real singles\n single_features = real_features[is_single]\n single_labels = labels[is_single]\n try:\n fake_double_features, fake_double_labels = self.feature_combination(single_features, single_labels)\n except InsufficientDataError:\n logger.warning(\"Insufficient data for augmentation. Skipping batch.\")\n return None\n\n # Isolate real double features from batch\n real_double_features, real_double_labels = real_features[~is_single], labels[~is_single]\n if len(real_double_features) == 0:\n logger.warning(\"No real double features in batch. Skipping batch.\")\n return None\n if len(fake_double_features) == 0:\n logger.warning(\"No fake double features in batch. Skipping batch.\")\n return None\n\n # Compute linearity loss\n linearity_loss = self.linearity_loss_fn(\n real_double_features=real_double_features,\n real_double_labels=real_double_labels,\n fake_double_features=fake_double_features,\n fake_double_labels=fake_double_labels,\n )\n\n # Compute classification loss on real data\n real_dir_logits = self.direction_clf(real_features)\n CE_real_dir = F.cross_entropy(real_dir_logits, labels[:, 0])\n bal_acc_real_dir = accuracy(\n real_dir_logits.argmax(-1), labels[:, 0], task=\"multiclass\", num_classes=5, average=\"macro\"\n )\n\n real_mod_logits = self.modifier_clf(real_features)\n CE_real_mod = F.cross_entropy(real_mod_logits, labels[:, 1])\n bal_acc_real_mod = accuracy(\n real_mod_logits.argmax(-1), labels[:, 1], task=\"multiclass\", num_classes=5, average=\"macro\"\n )\n\n # Compute classification loss on fake combinations\n fake_dir_logits = self.direction_clf(fake_double_features)\n CE_fake_dir = F.cross_entropy(fake_dir_logits, fake_double_labels[:, 0])\n bal_acc_fake_dir = accuracy(\n fake_dir_logits.argmax(-1), fake_double_labels[:, 0], task=\"multiclass\", num_classes=5, average=\"macro\"\n )\n\n fake_mod_logits = self.modifier_clf(fake_double_features)\n CE_fake_mod = F.cross_entropy(fake_mod_logits, fake_double_labels[:, 1])\n bal_acc_fake_mod = accuracy(\n fake_mod_logits.argmax(-1), fake_double_labels[:, 1], task=\"multiclass\", num_classes=5, average=\"macro\"\n )\n\n # Decrease emphasis on fake CE so they have equal importance\n down_ratio = len(real_features) / len(fake_double_features)\n real_CE = self.hparams.real_CE_loss_coeff * (CE_real_dir + CE_real_mod) / 2\n fake_CE = down_ratio * self.hparams.fake_CE_loss_coeff * (CE_fake_dir + CE_fake_mod) / 2\n lin_loss = self.hparams.linearity_loss_coeff * linearity_loss\n total_loss = real_CE + fake_CE + lin_loss\n\n # Log individual loss terms (before applying coefficients)\n self.log(\"train/CE_real_dir\", CE_real_dir)\n self.log(\"train/CE_real_mod\", CE_real_mod)\n self.log(\"train/CE_fake_dir\", CE_fake_dir)\n self.log(\"train/CE_fake_mod\", CE_fake_mod)\n self.log(\"train/linearity_loss\", linearity_loss)\n tb = self.logger.experiment\n tb.add_histogram(\"train/real_double_feature_norm\", real_double_features.norm(dim=-1), self.global_step)\n tb.add_histogram(\"train/fake_double_feature_norm\", fake_double_features.norm(dim=-1), self.global_step)\n\n # Log total loss\n self.log(\"train/total_loss\", total_loss)\n\n # Log balanced accuracies\n self.log(\"train/bal_acc_real_dir\", bal_acc_real_dir)\n self.log(\"train/bal_acc_real_mod\", bal_acc_real_mod)\n self.log(\"train/bal_acc_fake_dir\", bal_acc_fake_dir)\n self.log(\"train/bal_acc_fake_mod\", bal_acc_fake_mod)\n return total_loss\n\n def training_epoch_end(self, outputs):\n metrics = {k: v.item() if isinstance(v, torch.Tensor) else v for k, v in self.trainer.callback_metrics.items()}\n metrics = {f\"{k}\": f\"{v:.4f}\" for k, v in metrics.items()}\n logger.info(f\"Epoch: {self.current_epoch}, Metrics: {metrics}\")\n\n def _val_or_test_step(self, batch, name=None):\n (data, labels, is_single, subj_ids) = batch\n\n # Compute metrics on real data\n real_features = self.embedding(data)\n real_dir_logits = self.direction_clf(real_features)\n real_mod_logits = self.modifier_clf(real_features)\n real_preds = torch.stack((real_dir_logits.argmax(-1), real_mod_logits.argmax(-1)), dim=-1)\n real_cm = get_combo_conf_mat(labels, real_preds)\n\n # To be clear that fake data is not part of the result, compute result before making fake data\n res = {\"features\": real_features, \"labels\": labels, \"is_single\": is_single, \"subj_ids\": subj_ids}\n\n # Compute metrics on fake data\n single_features = real_features[is_single]\n single_labels = labels[is_single]\n try:\n fake_double_features, fake_double_labels = self.feature_combination(single_features, single_labels)\n except InsufficientDataError:\n logger.warning(\"Insufficient data for augmentation. Skipping batch.\")\n return None\n fake_dir_logits = self.direction_clf(fake_double_features)\n fake_mod_logits = self.modifier_clf(fake_double_features)\n\n fake_preds = torch.stack((fake_dir_logits.argmax(-1), fake_mod_logits.argmax(-1)), dim=-1)\n fake_cm = get_combo_conf_mat(fake_double_labels, fake_preds)\n if name is not None:\n self.log(f\"{name}/single_bal_acc\", np.nanmean(np.diag(real_cm)[:8]))\n self.log(f\"{name}/double_bal_acc\", np.nanmean(np.diag(real_cm)[8:]))\n self.log(f\"{name}/overall_bal_acc\", np.nanmean(np.diag(real_cm)[:24]))\n self.log(f\"{name}/fake_double_bal_acc\", np.nanmean(np.diag(fake_cm)[8:]))\n return res\n\n def validation_step(self, batch, batch_idx):\n self._val_or_test_step(batch, \"val\")\n\n def test_step(self, batch, batch_idx):\n return self._val_or_test_step(batch, None)\n\n @torch.enable_grad()\n @torch.inference_mode(False)\n def test_epoch_end(self, outputs):\n features = torch.cat([x[\"features\"] for x in outputs])\n labels = torch.cat([x[\"labels\"] for x in outputs])\n is_single = torch.cat([x[\"is_single\"] for x in outputs])\n subj_ids = torch.cat([x[\"subj_ids\"] for x in outputs])\n\n combined_evaluation = self.run_finetune_evaluation(features, labels, is_single, subj_ids)\n scalars = [\"single_bal_acc\", \"double_bal_acc\", \"overall_bal_acc\"]\n for scenario in [\"zero_shot\", \"upper_bound\", \"lower_bound\", \"augmented\"]:\n for key in scalars:\n value = combined_evaluation[scenario][key]\n self.log(f\"test_{scenario}/{key}\", value, sync_dist=True)\n\n # Save confusion matrix\n path = Path(self.logger.log_dir)\n np.save(path / f\"test.{scenario}.confusion_matrix.npy\", combined_evaluation[scenario][\"confusion_matrix\"])\n # TODO - how else can we get output from pytorch lightning's trainer.test()?\n return None\n\n def run_finetune_evaluation(self, features, labels, is_single, subj_ids):\n logger.info(\"Try evaluation by fine-tuning pre-trained dir_clf and mod_clf\")\n # Freeze the feature combination fn, just to be safe\n for param in self.feature_combination.parameters():\n param.requires_grad = False\n self.feature_combination.eval()\n\n evaluations = []\n for subj_id in subj_ids.unique():\n logger.info(f\"Fine-tuning evaluation for subject {subj_id}\")\n # Get subset of features and labels for this subject\n idx = subj_ids == subj_id\n evaluations.append(\n self.run_finetune_one_subj(features=features[idx], labels=labels[idx], is_single=is_single[idx])\n )\n\n combined_evaluation = {}\n for key in [\"upper_bound\", \"lower_bound\", \"augmented\", \"zero_shot\"]:\n combined_evaluation[key] = {\n \"single_bal_acc\": np.mean([x[key][\"single_bal_acc\"] for x in evaluations]),\n \"double_bal_acc\": np.mean([x[key][\"double_bal_acc\"] for x in evaluations]),\n \"overall_bal_acc\": np.mean([x[key][\"overall_bal_acc\"] for x in evaluations]),\n \"confusion_matrix\": np.mean([x[key][\"confusion_matrix\"] for x in evaluations], axis=0),\n }\n return combined_evaluation\n\n def run_finetune_one_subj(self, features, labels, is_single):\n # Split into train/test\n N_single = is_single.sum().item()\n N_single_test = int(N_single * self.hparams.finetune_test_frac)\n\n N_double = (~is_single).sum().item()\n N_double_test = int(N_double * self.hparams.finetune_test_frac)\n\n np.random.seed(0)\n single_perm = np.random.permutation(N_single)\n test_single_feat = features[is_single][single_perm[:N_single_test]]\n test_single_labels = labels[is_single][single_perm[:N_single_test]]\n train_single_feat = features[is_single][single_perm[N_single_test:]]\n train_single_labels = labels[is_single][single_perm[N_single_test:]]\n\n double_perm = np.random.permutation(N_double)\n test_double_feat = features[~is_single][double_perm[:N_double_test]]\n test_double_labels = labels[~is_single][double_perm[:N_double_test]]\n train_double_feat = features[~is_single][double_perm[N_double_test:]]\n train_double_labels = labels[~is_single][double_perm[N_double_test:]]\n\n def try_once(which: str):\n logger.info(f\"Finetune for scenario: {which}\")\n aug = {\"upper\": None, \"lower\": None, \"aug\": self.feature_combination}[which]\n doubles_in_train = {\"upper\": True, \"lower\": False, \"aug\": False}[which]\n\n # Setup train data\n logger.debug(f\"real singles: {len(train_single_feat)}\")\n logger.debug(f\"real doubles: {len(train_double_feat)}\")\n if doubles_in_train:\n x_train = torch.cat((train_single_feat, train_double_feat))\n y_train = torch.cat((train_single_labels, train_double_labels))\n else:\n x_train = train_single_feat\n y_train = train_single_labels\n if aug is not None:\n x_aug, y_aug = aug(train_single_feat, train_single_labels)\n if self.hparams.finetune_n_aug_per_class > 0:\n # Subset each class\n res_x, res_y = [], []\n for c in y_aug.unique(dim=0):\n idx = (y_aug == c).all(dim=1)\n perm = np.random.permutation(idx.sum().item())\n res_x.append(x_aug[idx][perm[: self.hparams.finetune_n_aug_per_class]])\n res_y.append(y_aug[idx][perm[: self.hparams.finetune_n_aug_per_class]])\n x_aug, y_aug = torch.cat(res_x), torch.cat(res_y)\n logger.debug(f\"n_aug_per_class: {self.hparams.finetune_n_aug_per_class}\")\n logger.debug(f\"fake doubles: {x_aug.shape[0]}\")\n x_train = torch.cat([x_train, x_aug])\n y_train = torch.cat([y_train, y_aug])\n\n x_train, y_train = shuffle_together(x_train, y_train)\n\n # Setup test data\n x_test = torch.cat([test_single_feat, test_double_feat])\n y_test = torch.cat([test_single_labels, test_double_labels])\n x_test, y_test = shuffle_together(x_test, y_test)\n\n # Make a temporary copy of the models\n dir_clf = deepcopy(self.direction_clf)\n mod_clf = deepcopy(self.modifier_clf)\n optim = torch.optim.AdamW(chain(dir_clf.parameters(), mod_clf.parameters()), lr=self.hparams.finetune_lr)\n sched = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=self.hparams.finetune_lr_decay)\n # Since the features are already on GPU, can't use multiprocess dataloader\n bs = self.hparams.finetune_batch_size\n train_loader = DataLoader(TensorDataset(x_train, y_train), batch_size=bs, shuffle=True, num_workers=0)\n test_loader = DataLoader(TensorDataset(x_test, y_test), batch_size=bs, shuffle=False, num_workers=0)\n\n def infinite_cycle(loader):\n while True:\n for x, y in loader:\n yield x, y\n\n inf_train_loader = infinite_cycle(train_loader)\n\n @torch.no_grad()\n def test():\n dir_clf.eval()\n mod_clf.eval()\n dir_logits, mod_logits, y_test = [], [], []\n for x, y in test_loader:\n dir_logits.append(dir_clf(x))\n mod_logits.append(mod_clf(x))\n y_test.append(y)\n dir_logits = torch.cat(dir_logits)\n mod_logits = torch.cat(mod_logits)\n y_test = torch.cat(y_test)\n preds = torch.stack((dir_logits.argmax(-1), mod_logits.argmax(-1)), dim=-1)\n cm = get_combo_conf_mat(y_test, preds)\n return {\n \"single_bal_acc\": np.nanmean(np.diag(cm)[:8]),\n \"double_bal_acc\": np.nanmean(np.diag(cm)[8:]),\n \"overall_bal_acc\": np.nanmean(np.diag(cm)[:24]),\n \"confusion_matrix\": cm,\n }\n\n zero_shot_res = test() # Test once with no fine-tuning\n logger.debug(f\"Zero-shot results: {zero_shot_res}\")\n tb = self.logger.experiment\n # Graphs will start with the zero-shot result\n scalars = [\"single_bal_acc\", \"double_bal_acc\", \"overall_bal_acc\"]\n for k in scalars:\n v = zero_shot_res[k]\n tb.add_scalar(f\"finetune/{which}/{k}\", v, 0) # Start at x-axis=0\n # Continue graphs from 1 onward\n dir_clf.train()\n mod_clf.train()\n for i in range(1, self.hparams.finetune_steps + 1):\n x, y = next(inf_train_loader)\n optim.zero_grad()\n dir_logits = dir_clf(x)\n mod_logits = mod_clf(x)\n dir_loss = F.cross_entropy(dir_logits, y[:, 0])\n mod_loss = F.cross_entropy(mod_logits, y[:, 1])\n loss = dir_loss + mod_loss\n loss.backward()\n optim.step()\n\n if i % 100 == 0:\n finetuned_res = test()\n logger.debug(f\"Step {i} results: {finetuned_res}\")\n for k in scalars:\n v = finetuned_res[k]\n tb.add_scalar(f\"finetune/{which}/{k}\", v, i)\n dir_clf.train()\n mod_clf.train()\n sched.step()\n\n finetuned_res = test()\n\n return finetuned_res, zero_shot_res\n\n upper, zero_shot_res = try_once(\"upper\")\n lower, _ = try_once(\"lower\")\n aug, _ = try_once(\"aug\")\n return {\"zero_shot\": zero_shot_res, \"upper_bound\": upper, \"lower_bound\": lower, \"augmented\": aug}\n\n def configure_optimizers(self):\n optim = torch.optim.AdamW(self.embedding.parameters(), lr=self.hparams.lr)\n sched = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=self.hparams.lr_decay)\n return {\"optimizer\": optim, \"lr_scheduler\": {\"scheduler\": sched, \"name\": \"lr_sched\"}}"
},
{
"identifier": "ControlModel_RandomGuess",
"path": "com_hom_emg/parallel_models.py",
"snippet": "class ControlModel_RandomGuess(BaseParallelModel):\n def __init__(self, *args, **kw):\n pass\n\n def fit(self, x, y):\n return self\n\n def predict_proba(self, x):\n # Create random probs output with correct shape\n # Note that the probabilities should be normalized along the final axis\n # This is the same axis where we'll choose one prediction\n probs = np.random.rand(x.shape[0], 2, 5)\n probs /= np.sum(probs, axis=-1, keepdims=True)\n return probs\n\n def predict(self, x):\n return self.predict_proba(x).argmax(-1)\n\n def save(self, path):\n pass\n\n @classmethod\n def load(cls, path):\n pass\n\n def get_params(self, deep=True):\n return {}\n\n def __repr__(self):\n return f\"{type(self).__name__}()\""
},
{
"identifier": "ParallelA",
"path": "com_hom_emg/parallel_models.py",
"snippet": "class ParallelA(BaseParallelModel):\n DEFAULT_SAVE_NAME = \"ParallelA.pkl\"\n\n def __init__(self, dir_clf, mod_clf):\n self.dir_clf = dir_clf\n self.mod_clf = mod_clf\n\n def get_params(self, deep=True):\n return {\"dir_clf\": self.dir_clf, \"mod_clf\": self.mod_clf}\n\n def fit(self, x, y):\n self.dir_clf.fit(x, y[:, 0])\n self.mod_clf.fit(x, y[:, 1])\n return self\n\n def predict_proba(self, x):\n prob0 = self.dir_clf.predict_proba(x)\n prob1 = self.mod_clf.predict_proba(x)\n return np.stack([prob0, prob1], axis=1)\n\n def predict(self, x):\n return self.predict_proba(x).argmax(-1)\n\n def save(self, save_dir: Path) -> Path:\n assert save_dir.exists() and save_dir.is_dir()\n file_path = save_dir / self.DEFAULT_SAVE_NAME\n with open(file_path, \"wb\") as f:\n pickle.dump(self, f)\n return file_path\n\n @classmethod\n def load(cls, file_path: Path) -> \"ParallelA\":\n with open(file_path, \"rb\") as f:\n return pickle.load(f)\n\n def __repr__(self):\n return f\"{type(self).__name__}(dir_clf={self.dir_clf}, mod_clf={self.mod_clf})\""
},
{
"identifier": "get_combo_conf_mat",
"path": "com_hom_emg/scoring.py",
"snippet": "def get_combo_conf_mat(y_true_2d, y_pred_2d, normalize=True):\n \"\"\"We get a confusion matrix of shape (25, 25). Row is true class, col is predicted.\n Entries are arranged like this:\n (D1, None), ..., (D4, None), (None, M1), ..., (None, M4), (D1, M1), ...\n (D1, M4), (D2, M1), ... (D2, M4), ... (D4, M4), (None, None)\n where D1 ... D4 are directions in order of appearance from DIRECTION_GESTURES\n and M1 ... M4 are modifiers in order of appearance from MODIFIER_GESTURES.\n This means the first 4 rows are each \"direction-only\" label, next 4 are \"modifier-only\" labels.\"\"\"\n cm = np.zeros((len(CANONICAL_COORDS), len(CANONICAL_COORDS)))\n for yt, yp in zip(y_true_2d, y_pred_2d):\n cm[CANONICAL_COORDS.index(tuple(yt)), CANONICAL_COORDS.index(tuple(yp))] += 1\n if normalize:\n # NOTE - result may contain nans - use nanmean later\n with np.errstate(all=\"ignore\"): # Ignore division by zero for empty rows\n cm /= cm.sum(axis=-1, keepdims=True)\n return cm"
},
{
"identifier": "PROJECT_PATH",
"path": "com_hom_emg/utils.py",
"snippet": "PROJECT_PATH = Path(__file__).parent.parent"
}
] | import argparse
import re
import sys
import numpy as np
import pandas as pd
import torch
import yaml
from copy import deepcopy
from pathlib import Path
from typing import List, Optional
from ablation_settings import settings_names as ablation_settings_names
from loguru import logger
from pytorch_lightning import seed_everything
from regular_settings import settings_names as regular_settings_names
from rich.console import Console
from rich.table import Table
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.linear_model import LogisticRegression as LogR
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.tree import DecisionTreeClassifier as DT
from tqdm import tqdm
from utils import table_to_csv
from com_hom_emg.data import DataModule, get_per_subj_data
from com_hom_emg.model import LearnedEmbedding
from com_hom_emg.parallel_models import ControlModel_RandomGuess, ParallelA
from com_hom_emg.scoring import get_combo_conf_mat
from com_hom_emg.utils import PROJECT_PATH | 8,435 | """Train fresh classifiers using test checkpoints"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.remove()
logger.add(lambda msg: tqdm.write(msg, end=""), colorize=True)
class FailedRunError(Exception):
pass
def load_one(folder: Path, which="best"):
"""Extract train, val, and test metrics from the specified checkpoint (best or last).
Also extract hyperparams from hparams.yaml file."""
# Given a checkpoint like this: best__epoch=38__step=14664__val_aug_overall_acc=0.569.ckpt
# We want to extract the step: 14664
ckpts = folder / "checkpoints"
matching_ckpts = list(ckpts.glob(f"{which}*.ckpt"))
if len(matching_ckpts) == 0:
raise FailedRunError(f"No checkpoint found for {which} in {folder}")
# When there are multiple runs, take the most recent.
# Since only 1 metrics.csv is kept, this matches the latest ckpt
chosen_ckpt = max(matching_ckpts, key=lambda x: x.stat().st_mtime)
step = int(re.match(rf"{which}__epoch=\d+__step=(\d+)", chosen_ckpt.name).group(1))
metrics = pd.read_csv(folder / "metrics.csv")
results = {}
# NOTE - for this experiment, we ignore the test results, which come from fine-tuning,
# since we will train a fresh classifier instead
for split in ["train", "val"]:
cols = [col for col in metrics.columns if col.startswith(split)]
if len(cols) == 0:
raise FailedRunError(f"No {split} metrics found in {folder}")
cols.append("step")
subset = metrics[cols].dropna().set_index("step")
subset = subset.iloc[subset.index.get_indexer([step], method="nearest")]
assert len(subset) == 1
results.update(**subset.to_dict(orient="records")[0])
hparams = yaml.safe_load((folder / "hparams.yaml").read_text())
return hparams, results, chosen_ckpt
def subset_one_class(X, Y, N):
idx = np.random.choice(len(X), size=N, replace=False)
return X[idx], Y[idx]
def subset_each_class(X, Y, N):
result_x, result_y = [], []
for y in Y.unique(dim=0):
idx = (Y == y).all(-1)
x = X[idx]
y = Y[idx]
x, y = subset_one_class(x, y, N)
result_x.append(x)
result_y.append(y)
return torch.cat(result_x), torch.cat(result_y)
def get_clf(name: str):
if name == "logr":
return LogR(class_weight="balanced", max_iter=4000, n_jobs=-1)
elif name == "lda":
return LDA()
elif name == "knn":
return KNN(n_jobs=-1)
elif name == "rf":
return RF(n_jobs=-1, class_weight="balanced")
elif name == "dt":
return DT(class_weight="balanced")
else:
raise ValueError(f"Unknown classifier name: {name}")
@torch.no_grad()
def try_fresh_classifier(embedding, test_loader, clf_name: str, test_frac=0.2, N_aug_each_class=500):
# Get features
embedding.to(device)
features, labels, is_single = [], [], []
for batch_data, batch_labels, batch_is_single, _subj_ids in test_loader:
features.append(embedding(batch_data.to(device)))
labels.append(batch_labels.to(device))
is_single.append(batch_is_single)
features = torch.cat(features)
labels = torch.cat(labels)
is_single = torch.cat(is_single)
# Create a single train/test split
N_single = is_single.sum().item()
N_single_test = int(N_single * test_frac)
N_double = (~is_single).sum().item()
N_double_test = int(N_double * test_frac)
np.random.seed(0)
single_perm = np.random.permutation(N_single)
test_single_feat = features[is_single][single_perm[:N_single_test]]
test_single_labels = labels[is_single][single_perm[:N_single_test]]
train_single_feat = features[is_single][single_perm[N_single_test:]]
train_single_labels = labels[is_single][single_perm[N_single_test:]]
double_perm = np.random.permutation(N_double)
test_double_feat = features[~is_single][double_perm[:N_double_test]]
test_double_labels = labels[~is_single][double_perm[:N_double_test]]
train_double_feat = features[~is_single][double_perm[N_double_test:]]
train_double_labels = labels[~is_single][double_perm[N_double_test:]]
# Define function to train a single sklearn clf
def try_once(which: str):
# logger.info(f"Train an example model for scenario: {which}")
| """Train fresh classifiers using test checkpoints"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.remove()
logger.add(lambda msg: tqdm.write(msg, end=""), colorize=True)
class FailedRunError(Exception):
pass
def load_one(folder: Path, which="best"):
"""Extract train, val, and test metrics from the specified checkpoint (best or last).
Also extract hyperparams from hparams.yaml file."""
# Given a checkpoint like this: best__epoch=38__step=14664__val_aug_overall_acc=0.569.ckpt
# We want to extract the step: 14664
ckpts = folder / "checkpoints"
matching_ckpts = list(ckpts.glob(f"{which}*.ckpt"))
if len(matching_ckpts) == 0:
raise FailedRunError(f"No checkpoint found for {which} in {folder}")
# When there are multiple runs, take the most recent.
# Since only 1 metrics.csv is kept, this matches the latest ckpt
chosen_ckpt = max(matching_ckpts, key=lambda x: x.stat().st_mtime)
step = int(re.match(rf"{which}__epoch=\d+__step=(\d+)", chosen_ckpt.name).group(1))
metrics = pd.read_csv(folder / "metrics.csv")
results = {}
# NOTE - for this experiment, we ignore the test results, which come from fine-tuning,
# since we will train a fresh classifier instead
for split in ["train", "val"]:
cols = [col for col in metrics.columns if col.startswith(split)]
if len(cols) == 0:
raise FailedRunError(f"No {split} metrics found in {folder}")
cols.append("step")
subset = metrics[cols].dropna().set_index("step")
subset = subset.iloc[subset.index.get_indexer([step], method="nearest")]
assert len(subset) == 1
results.update(**subset.to_dict(orient="records")[0])
hparams = yaml.safe_load((folder / "hparams.yaml").read_text())
return hparams, results, chosen_ckpt
def subset_one_class(X, Y, N):
idx = np.random.choice(len(X), size=N, replace=False)
return X[idx], Y[idx]
def subset_each_class(X, Y, N):
result_x, result_y = [], []
for y in Y.unique(dim=0):
idx = (Y == y).all(-1)
x = X[idx]
y = Y[idx]
x, y = subset_one_class(x, y, N)
result_x.append(x)
result_y.append(y)
return torch.cat(result_x), torch.cat(result_y)
def get_clf(name: str):
if name == "logr":
return LogR(class_weight="balanced", max_iter=4000, n_jobs=-1)
elif name == "lda":
return LDA()
elif name == "knn":
return KNN(n_jobs=-1)
elif name == "rf":
return RF(n_jobs=-1, class_weight="balanced")
elif name == "dt":
return DT(class_weight="balanced")
else:
raise ValueError(f"Unknown classifier name: {name}")
@torch.no_grad()
def try_fresh_classifier(embedding, test_loader, clf_name: str, test_frac=0.2, N_aug_each_class=500):
# Get features
embedding.to(device)
features, labels, is_single = [], [], []
for batch_data, batch_labels, batch_is_single, _subj_ids in test_loader:
features.append(embedding(batch_data.to(device)))
labels.append(batch_labels.to(device))
is_single.append(batch_is_single)
features = torch.cat(features)
labels = torch.cat(labels)
is_single = torch.cat(is_single)
# Create a single train/test split
N_single = is_single.sum().item()
N_single_test = int(N_single * test_frac)
N_double = (~is_single).sum().item()
N_double_test = int(N_double * test_frac)
np.random.seed(0)
single_perm = np.random.permutation(N_single)
test_single_feat = features[is_single][single_perm[:N_single_test]]
test_single_labels = labels[is_single][single_perm[:N_single_test]]
train_single_feat = features[is_single][single_perm[N_single_test:]]
train_single_labels = labels[is_single][single_perm[N_single_test:]]
double_perm = np.random.permutation(N_double)
test_double_feat = features[~is_single][double_perm[:N_double_test]]
test_double_labels = labels[~is_single][double_perm[:N_double_test]]
train_double_feat = features[~is_single][double_perm[N_double_test:]]
train_double_labels = labels[~is_single][double_perm[N_double_test:]]
# Define function to train a single sklearn clf
def try_once(which: str):
# logger.info(f"Train an example model for scenario: {which}") | clf = ParallelA(dir_clf=get_clf(clf_name), mod_clf=get_clf(clf_name)) | 4 | 2023-11-01 21:12:05+00:00 | 12k |
SqueezeAILab/LLMCompiler | src/chains/llm_math_chain.py | [
{
"identifier": "Chain",
"path": "src/chains/chain.py",
"snippet": "class Chain(Serializable, Runnable[Dict[str, Any], Dict[str, Any]], ABC):\n \"\"\"Abstract base class for creating structured sequences of calls to components.\n\n Chains should be used to encode a sequence of calls to components like\n models, document retrievers, other chains, etc., and provide a simple interface\n to this sequence.\n\n Copied from langchain v0.0.283.\n\n The Chain interface makes it easy to create apps that are:\n - Stateful: add Memory to any Chain to give it state,\n - Observable: pass Callbacks to a Chain to execute additional functionality,\n like logging, outside the main sequence of component calls,\n - Composable: the Chain API is flexible enough that it is easy to combine\n Chains with other components, including other Chains.\n\n The main methods exposed by chains are:\n - `__call__`: Chains are callable. The `__call__` method is the primary way to\n execute a Chain. This takes inputs as a dictionary and returns a\n dictionary output.\n - `run`: A convenience method that takes inputs as args/kwargs and returns the\n output as a string or object. This method can only be used for a subset of\n chains and cannot return as rich of an output as `__call__`.\n \"\"\"\n\n def invoke(\n self,\n input: Dict[str, Any],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Dict[str, Any]:\n config = config or {}\n return self(\n input,\n callbacks=config.get(\"callbacks\"),\n tags=config.get(\"tags\"),\n metadata=config.get(\"metadata\"),\n run_name=config.get(\"run_name\"),\n **kwargs,\n )\n\n async def ainvoke(\n self,\n input: Dict[str, Any],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Dict[str, Any]:\n if type(self)._acall == Chain._acall:\n # If the chain does not implement async, fall back to default implementation\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.invoke, input, config, **kwargs)\n )\n\n config = config or {}\n return await self.acall(\n input,\n callbacks=config.get(\"callbacks\"),\n tags=config.get(\"tags\"),\n metadata=config.get(\"metadata\"),\n run_name=config.get(\"run_name\"),\n **kwargs,\n )\n\n memory: Optional[BaseMemory] = None\n \"\"\"Optional memory object. Defaults to None.\n Memory is a class that gets called at the start\n and at the end of every chain. At the start, memory loads variables and passes\n them along in the chain. At the end, it saves any returned variables.\n There are many different types of memory - please see memory docs\n for the full catalog.\"\"\"\n callbacks: Callbacks = Field(default=None, exclude=True)\n \"\"\"Optional list of callback handlers (or callback manager). Defaults to None.\n Callback handlers are called throughout the lifecycle of a call to a chain,\n starting with on_chain_start, ending with on_chain_end or on_chain_error.\n Each custom chain can optionally call additional callback methods, see Callback docs\n for full details.\"\"\"\n callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)\n \"\"\"Deprecated, use `callbacks` instead.\"\"\"\n verbose: bool = Field(default_factory=_get_verbosity)\n \"\"\"Whether or not run in verbose mode. In verbose mode, some intermediate logs\n will be printed to the console. Defaults to `langchain.verbose` value.\"\"\"\n tags: Optional[List[str]] = None\n \"\"\"Optional list of tags associated with the chain. Defaults to None.\n These tags will be associated with each call to this chain,\n and passed as arguments to the handlers defined in `callbacks`.\n You can use these to eg identify a specific instance of a chain with its use case.\n \"\"\"\n metadata: Optional[Dict[str, Any]] = None\n \"\"\"Optional metadata associated with the chain. Defaults to None.\n This metadata will be associated with each call to this chain,\n and passed as arguments to the handlers defined in `callbacks`.\n You can use these to eg identify a specific instance of a chain with its use case.\n \"\"\"\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @property\n def _chain_type(self) -> str:\n raise NotImplementedError(\"Saving not supported for this chain type.\")\n\n @root_validator()\n def raise_callback_manager_deprecation(cls, values: Dict) -> Dict:\n \"\"\"Raise deprecation warning if callback_manager is used.\"\"\"\n if values.get(\"callback_manager\") is not None:\n if values.get(\"callbacks\") is not None:\n raise ValueError(\n \"Cannot specify both callback_manager and callbacks. \"\n \"callback_manager is deprecated, callbacks is the preferred \"\n \"parameter to pass in.\"\n )\n warnings.warn(\n \"callback_manager is deprecated. Please use callbacks instead.\",\n DeprecationWarning,\n )\n values[\"callbacks\"] = values.pop(\"callback_manager\", None)\n return values\n\n @validator(\"verbose\", pre=True, always=True)\n def set_verbose(cls, verbose: Optional[bool]) -> bool:\n \"\"\"Set the chain verbosity.\n\n Defaults to the global setting if not specified by the user.\n \"\"\"\n if verbose is None:\n return _get_verbosity()\n else:\n return verbose\n\n @property\n @abstractmethod\n def input_keys(self) -> List[str]:\n \"\"\"Keys expected to be in the chain input.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def output_keys(self) -> List[str]:\n \"\"\"Keys expected to be in the chain output.\"\"\"\n raise NotImplementedError\n\n def _validate_inputs(self, inputs: Dict[str, Any]) -> None:\n \"\"\"Check that all inputs are present.\"\"\"\n missing_keys = set(self.input_keys).difference(inputs)\n if missing_keys:\n raise ValueError(f\"Missing some input keys: {missing_keys}\")\n\n def _validate_outputs(self, outputs: Dict[str, Any]) -> None:\n missing_keys = set(self.output_keys).difference(outputs)\n if missing_keys:\n raise ValueError(f\"Missing some output keys: {missing_keys}\")\n\n @abstractmethod\n def _call(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n \"\"\"Execute the chain.\n\n This is a private method that is not user-facing. It is only called within\n `Chain.__call__`, which is the user-facing wrapper method that handles\n callbacks configuration and some input/output processing.\n\n Args:\n inputs: A dict of named inputs to the chain. Assumed to contain all inputs\n specified in `Chain.input_keys`, including any inputs added by memory.\n run_manager: The callbacks manager that contains the callback handlers for\n this run of the chain.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n raise NotImplementedError\n\n async def _acall(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n \"\"\"Asynchronously execute the chain.\n\n This is a private method that is not user-facing. It is only called within\n `Chain.acall`, which is the user-facing wrapper method that handles\n callbacks configuration and some input/output processing.\n\n Args:\n inputs: A dict of named inputs to the chain. Assumed to contain all inputs\n specified in `Chain.input_keys`, including any inputs added by memory.\n run_manager: The callbacks manager that contains the callback handlers for\n this run of the chain.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n raise NotImplementedError(\"Async call not supported for this chain type.\")\n\n def __call__(\n self,\n inputs: Union[Dict[str, Any], Any],\n return_only_outputs: bool = False,\n callbacks: Callbacks = None,\n *,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n run_name: Optional[str] = None,\n include_run_info: bool = False,\n ) -> Dict[str, Any]:\n \"\"\"Execute the chain.\n\n Args:\n inputs: Dictionary of inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n return_only_outputs: Whether to return only outputs in the\n response. If True, only new keys generated by this chain will be\n returned. If False, both input keys and new keys generated by this\n chain will be returned. Defaults to False.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n metadata: Optional metadata associated with the chain. Defaults to None\n include_run_info: Whether to include run info in the response. Defaults\n to False.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n inputs = self.prep_inputs(inputs)\n callback_manager = CallbackManager.configure(\n callbacks,\n self.callbacks,\n self.verbose,\n tags,\n self.tags,\n metadata,\n self.metadata,\n )\n new_arg_supported = inspect.signature(self._call).parameters.get(\"run_manager\")\n run_manager = callback_manager.on_chain_start(\n dumpd(self),\n inputs,\n name=run_name,\n )\n try:\n outputs = (\n self._call(inputs, run_manager=run_manager)\n if new_arg_supported\n else self._call(inputs)\n )\n except (KeyboardInterrupt, Exception) as e:\n run_manager.on_chain_error(e)\n raise e\n run_manager.on_chain_end(outputs)\n final_outputs: Dict[str, Any] = self.prep_outputs(\n inputs, outputs, return_only_outputs\n )\n if include_run_info:\n final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)\n return final_outputs\n\n async def acall(\n self,\n inputs: Union[Dict[str, Any], Any],\n return_only_outputs: bool = False,\n callbacks: Callbacks = None,\n *,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n run_name: Optional[str] = None,\n include_run_info: bool = False,\n ) -> Dict[str, Any]:\n \"\"\"Asynchronously execute the chain.\n\n Args:\n inputs: Dictionary of inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n return_only_outputs: Whether to return only outputs in the\n response. If True, only new keys generated by this chain will be\n returned. If False, both input keys and new keys generated by this\n chain will be returned. Defaults to False.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n metadata: Optional metadata associated with the chain. Defaults to None\n include_run_info: Whether to include run info in the response. Defaults\n to False.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n inputs = self.prep_inputs(inputs)\n callback_manager = AsyncCallbackManager.configure(\n callbacks,\n self.callbacks,\n self.verbose,\n tags,\n self.tags,\n metadata,\n self.metadata,\n )\n new_arg_supported = inspect.signature(self._acall).parameters.get(\"run_manager\")\n run_manager = await callback_manager.on_chain_start(\n dumpd(self),\n inputs,\n name=run_name,\n )\n try:\n outputs = (\n await self._acall(inputs, run_manager=run_manager)\n if new_arg_supported\n else await self._acall(inputs)\n )\n except (KeyboardInterrupt, Exception) as e:\n await run_manager.on_chain_error(e)\n raise e\n await run_manager.on_chain_end(outputs)\n final_outputs: Dict[str, Any] = self.prep_outputs(\n inputs, outputs, return_only_outputs\n )\n if include_run_info:\n final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)\n return final_outputs\n\n def prep_outputs(\n self,\n inputs: Dict[str, str],\n outputs: Dict[str, str],\n return_only_outputs: bool = False,\n ) -> Dict[str, str]:\n \"\"\"Validate and prepare chain outputs, and save info about this run to memory.\n\n Args:\n inputs: Dictionary of chain inputs, including any inputs added by chain\n memory.\n outputs: Dictionary of initial chain outputs.\n return_only_outputs: Whether to only return the chain outputs. If False,\n inputs are also added to the final outputs.\n\n Returns:\n A dict of the final chain outputs.\n \"\"\"\n self._validate_outputs(outputs)\n if self.memory is not None:\n self.memory.save_context(inputs, outputs)\n if return_only_outputs:\n return outputs\n else:\n return {**inputs, **outputs}\n\n def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:\n \"\"\"Validate and prepare chain inputs, including adding inputs from memory.\n\n Args:\n inputs: Dictionary of raw inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n\n Returns:\n A dictionary of all inputs, including those added by the chain's memory.\n \"\"\"\n if not isinstance(inputs, dict):\n _input_keys = set(self.input_keys)\n if self.memory is not None:\n # If there are multiple input keys, but some get set by memory so that\n # only one is not set, we can still figure out which key it is.\n _input_keys = _input_keys.difference(self.memory.memory_variables)\n if len(_input_keys) != 1:\n raise ValueError(\n f\"A single string input was passed in, but this chain expects \"\n f\"multiple inputs ({_input_keys}). When a chain expects \"\n f\"multiple inputs, please call it by passing in a dictionary, \"\n \"eg `chain({'foo': 1, 'bar': 2})`\"\n )\n inputs = {list(_input_keys)[0]: inputs}\n if self.memory is not None:\n external_context = self.memory.load_memory_variables(inputs)\n inputs = dict(inputs, **external_context)\n self._validate_inputs(inputs)\n return inputs\n\n @property\n def _run_output_key(self) -> str:\n if len(self.output_keys) != 1:\n raise ValueError(\n f\"`run` not supported when there is not exactly \"\n f\"one output key. Got {self.output_keys}.\"\n )\n return self.output_keys[0]\n\n def run(\n self,\n *args: Any,\n callbacks: Callbacks = None,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Convenience method for executing chain.\n\n The main difference between this method and `Chain.__call__` is that this\n method expects inputs to be passed directly in as positional arguments or\n keyword arguments, whereas `Chain.__call__` expects a single input dictionary\n with all the inputs\n\n Args:\n *args: If the chain expects a single input, it can be passed in as the\n sole positional argument.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n **kwargs: If the chain expects multiple inputs, they can be passed in\n directly as keyword arguments.\n\n Returns:\n The chain output.\n\n Example:\n .. code-block:: python\n\n # Suppose we have a single-input chain that takes a 'question' string:\n chain.run(\"What's the temperature in Boise, Idaho?\")\n # -> \"The temperature in Boise is...\"\n\n # Suppose we have a multi-input chain that takes a 'question' string\n # and 'context' string:\n question = \"What's the temperature in Boise, Idaho?\"\n context = \"Weather report for Boise, Idaho on 07/03/23...\"\n chain.run(question=question, context=context)\n # -> \"The temperature in Boise is...\"\n \"\"\"\n # Run at start to make sure this is possible/defined\n _output_key = self._run_output_key\n\n if args and not kwargs:\n if len(args) != 1:\n raise ValueError(\"`run` supports only one positional argument.\")\n return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[\n _output_key\n ]\n\n if kwargs and not args:\n return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[\n _output_key\n ]\n\n if not kwargs and not args:\n raise ValueError(\n \"`run` supported with either positional arguments or keyword arguments,\"\n \" but none were provided.\"\n )\n else:\n raise ValueError(\n f\"`run` supported with either positional arguments or keyword arguments\"\n f\" but not both. Got args: {args} and kwargs: {kwargs}.\"\n )\n\n async def arun(\n self,\n *args: Any,\n callbacks: Callbacks = None,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Convenience method for executing chain.\n\n The main difference between this method and `Chain.__call__` is that this\n method expects inputs to be passed directly in as positional arguments or\n keyword arguments, whereas `Chain.__call__` expects a single input dictionary\n with all the inputs\n\n\n Args:\n *args: If the chain expects a single input, it can be passed in as the\n sole positional argument.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n **kwargs: If the chain expects multiple inputs, they can be passed in\n directly as keyword arguments.\n\n Returns:\n The chain output.\n\n Example:\n .. code-block:: python\n\n # Suppose we have a single-input chain that takes a 'question' string:\n await chain.arun(\"What's the temperature in Boise, Idaho?\")\n # -> \"The temperature in Boise is...\"\n\n # Suppose we have a multi-input chain that takes a 'question' string\n # and 'context' string:\n question = \"What's the temperature in Boise, Idaho?\"\n context = \"Weather report for Boise, Idaho on 07/03/23...\"\n await chain.arun(question=question, context=context)\n # -> \"The temperature in Boise is...\"\n \"\"\"\n if len(self.output_keys) != 1:\n raise ValueError(\n f\"`run` not supported when there is not exactly \"\n f\"one output key. Got {self.output_keys}.\"\n )\n elif args and not kwargs:\n if len(args) != 1:\n raise ValueError(\"`run` supports only one positional argument.\")\n return (\n await self.acall(\n args[0], callbacks=callbacks, tags=tags, metadata=metadata\n )\n )[self.output_keys[0]]\n\n if kwargs and not args:\n return (\n await self.acall(\n kwargs, callbacks=callbacks, tags=tags, metadata=metadata\n )\n )[self.output_keys[0]]\n\n raise ValueError(\n f\"`run` supported with either positional arguments or keyword arguments\"\n f\" but not both. Got args: {args} and kwargs: {kwargs}.\"\n )\n\n def dict(self, **kwargs: Any) -> Dict:\n \"\"\"Dictionary representation of chain.\n\n Expects `Chain._chain_type` property to be implemented and for memory to be\n null.\n\n Args:\n **kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`\n method.\n\n Returns:\n A dictionary representation of the chain.\n\n Example:\n .. code-block:: python\n\n chain.dict(exclude_unset=True)\n # -> {\"_type\": \"foo\", \"verbose\": False, ...}\n \"\"\"\n if self.memory is not None:\n raise ValueError(\"Saving of memory is not yet supported.\")\n _dict = super().dict(**kwargs)\n _dict[\"_type\"] = self._chain_type\n return _dict\n\n def save(self, file_path: Union[Path, str]) -> None:\n \"\"\"Save the chain.\n\n Expects `Chain._chain_type` property to be implemented and for memory to be\n null.\n\n Args:\n file_path: Path to file to save the chain to.\n\n Example:\n .. code-block:: python\n\n chain.save(file_path=\"path/chain.yaml\")\n \"\"\"\n # Convert file to Path object.\n if isinstance(file_path, str):\n save_path = Path(file_path)\n else:\n save_path = file_path\n\n directory_path = save_path.parent\n directory_path.mkdir(parents=True, exist_ok=True)\n\n # Fetch dictionary to save\n chain_dict = self.dict()\n\n if save_path.suffix == \".json\":\n with open(file_path, \"w\") as f:\n json.dump(chain_dict, f, indent=4)\n elif save_path.suffix == \".yaml\":\n with open(file_path, \"w\") as f:\n yaml.dump(chain_dict, f, default_flow_style=False)\n else:\n raise ValueError(f\"{save_path} must be json or yaml\")\n\n def apply(\n self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None\n ) -> List[Dict[str, str]]:\n \"\"\"Call the chain on all inputs in the list.\"\"\"\n return [self(inputs, callbacks=callbacks) for inputs in input_list]"
},
{
"identifier": "LLMChain",
"path": "src/chains/llm_chain.py",
"snippet": "class LLMChain(Chain):\n \"\"\"Chain to run queries against LLMs.\n\n Example:\n .. code-block:: python\n\n from langchain import LLMChain, OpenAI, PromptTemplate\n prompt_template = \"Tell me a {adjective} joke\"\n prompt = PromptTemplate(\n input_variables=[\"adjective\"], template=prompt_template\n )\n llm = LLMChain(llm=OpenAI(), prompt=prompt)\n \"\"\"\n\n @property\n def lc_serializable(self) -> bool:\n return True\n\n prompt: BasePromptTemplate\n \"\"\"Prompt object to use.\"\"\"\n llm: BaseLanguageModel\n \"\"\"Language model to call.\"\"\"\n output_key: str = \"text\" #: :meta private:\n output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser)\n \"\"\"Output parser to use.\n Defaults to one that takes the most likely string but does not change it\n otherwise.\"\"\"\n return_final_only: bool = True\n \"\"\"Whether to return only the final parsed result. Defaults to True.\n If false, will return a bunch of extra information about the generation.\"\"\"\n llm_kwargs: dict = Field(default_factory=dict)\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.forbid\n arbitrary_types_allowed = True\n\n @property\n def input_keys(self) -> List[str]:\n \"\"\"Will be whatever keys the prompt expects.\n\n :meta private:\n \"\"\"\n return self.prompt.input_variables\n\n @property\n def output_keys(self) -> List[str]:\n \"\"\"Will always return text key.\n\n :meta private:\n \"\"\"\n if self.return_final_only:\n return [self.output_key]\n else:\n return [self.output_key, \"full_generation\"]\n\n def _call(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Dict[str, str]:\n response = self.generate([inputs], run_manager=run_manager)\n return self.create_outputs(response)[0]\n\n def generate(\n self,\n input_list: List[Dict[str, Any]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> LLMResult:\n \"\"\"Generate LLM result from inputs.\"\"\"\n prompts, stop = self.prep_prompts(input_list, run_manager=run_manager)\n return self.llm.generate_prompt(\n prompts,\n stop,\n callbacks=run_manager.get_child() if run_manager else None,\n **self.llm_kwargs,\n )\n\n async def agenerate(\n self,\n input_list: List[Dict[str, Any]],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> LLMResult:\n \"\"\"Generate LLM result from inputs.\"\"\"\n prompts, stop = await self.aprep_prompts(input_list, run_manager=run_manager)\n return await self.llm.agenerate_prompt(\n prompts,\n stop,\n callbacks=run_manager.get_child() if run_manager else None,\n **self.llm_kwargs,\n )\n\n def prep_prompts(\n self,\n input_list: List[Dict[str, Any]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Tuple[List[PromptValue], Optional[List[str]]]:\n \"\"\"Prepare prompts from inputs.\"\"\"\n stop = None\n if len(input_list) == 0:\n return [], stop\n if \"stop\" in input_list[0]:\n stop = input_list[0][\"stop\"]\n prompts = []\n for inputs in input_list:\n selected_inputs = {k: inputs[k] for k in self.prompt.input_variables}\n prompt = self.prompt.format_prompt(**selected_inputs)\n _colored_text = get_colored_text(prompt.to_string(), \"green\")\n _text = \"Prompt after formatting:\\n\" + _colored_text\n if run_manager:\n run_manager.on_text(_text, end=\"\\n\", verbose=self.verbose)\n if \"stop\" in inputs and inputs[\"stop\"] != stop:\n raise ValueError(\n \"If `stop` is present in any inputs, should be present in all.\"\n )\n prompts.append(prompt)\n return prompts, stop\n\n async def aprep_prompts(\n self,\n input_list: List[Dict[str, Any]],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> Tuple[List[PromptValue], Optional[List[str]]]:\n \"\"\"Prepare prompts from inputs.\"\"\"\n stop = None\n if len(input_list) == 0:\n return [], stop\n if \"stop\" in input_list[0]:\n stop = input_list[0][\"stop\"]\n prompts = []\n for inputs in input_list:\n selected_inputs = {k: inputs[k] for k in self.prompt.input_variables}\n prompt = self.prompt.format_prompt(**selected_inputs)\n _colored_text = get_colored_text(prompt.to_string(), \"green\")\n _text = \"Prompt after formatting:\\n\" + _colored_text\n if run_manager:\n await run_manager.on_text(_text, end=\"\\n\", verbose=self.verbose)\n if \"stop\" in inputs and inputs[\"stop\"] != stop:\n raise ValueError(\n \"If `stop` is present in any inputs, should be present in all.\"\n )\n prompts.append(prompt)\n return prompts, stop\n\n def apply(\n self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None\n ) -> List[Dict[str, str]]:\n \"\"\"Utilize the LLM generate method for speed gains.\"\"\"\n callback_manager = CallbackManager.configure(\n callbacks, self.callbacks, self.verbose\n )\n run_manager = callback_manager.on_chain_start(\n dumpd(self),\n {\"input_list\": input_list},\n )\n try:\n response = self.generate(input_list, run_manager=run_manager)\n except (KeyboardInterrupt, Exception) as e:\n run_manager.on_chain_error(e)\n raise e\n outputs = self.create_outputs(response)\n run_manager.on_chain_end({\"outputs\": outputs})\n return outputs\n\n async def aapply(\n self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None\n ) -> List[Dict[str, str]]:\n \"\"\"Utilize the LLM generate method for speed gains.\"\"\"\n callback_manager = AsyncCallbackManager.configure(\n callbacks, self.callbacks, self.verbose\n )\n run_manager = await callback_manager.on_chain_start(\n dumpd(self),\n {\"input_list\": input_list},\n )\n try:\n response = await self.agenerate(input_list, run_manager=run_manager)\n except (KeyboardInterrupt, Exception) as e:\n await run_manager.on_chain_error(e)\n raise e\n outputs = self.create_outputs(response)\n await run_manager.on_chain_end({\"outputs\": outputs})\n return outputs\n\n @property\n def _run_output_key(self) -> str:\n return self.output_key\n\n def create_outputs(self, llm_result: LLMResult) -> List[Dict[str, Any]]:\n \"\"\"Create outputs from response.\"\"\"\n result = [\n # Get the text of the top generated string.\n {\n self.output_key: self.output_parser.parse_result(generation),\n \"full_generation\": generation,\n }\n for generation in llm_result.generations\n ]\n if self.return_final_only:\n result = [{self.output_key: r[self.output_key]} for r in result]\n return result\n\n async def _acall(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> Dict[str, str]:\n response = await self.agenerate([inputs], run_manager=run_manager)\n return self.create_outputs(response)[0]\n\n def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:\n \"\"\"Format prompt with kwargs and pass to LLM.\n\n Args:\n callbacks: Callbacks to pass to LLMChain\n **kwargs: Keys to pass to prompt template.\n\n Returns:\n Completion from LLM.\n\n Example:\n .. code-block:: python\n\n completion = llm.predict(adjective=\"funny\")\n \"\"\"\n return self(kwargs, callbacks=callbacks)[self.output_key]\n\n async def apredict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:\n \"\"\"Format prompt with kwargs and pass to LLM.\n\n Args:\n callbacks: Callbacks to pass to LLMChain\n **kwargs: Keys to pass to prompt template.\n\n Returns:\n Completion from LLM.\n\n Example:\n .. code-block:: python\n\n completion = llm.predict(adjective=\"funny\")\n \"\"\"\n return (await self.acall(kwargs, callbacks=callbacks))[self.output_key]\n\n def predict_and_parse(\n self, callbacks: Callbacks = None, **kwargs: Any\n ) -> Union[str, List[str], Dict[str, Any]]:\n \"\"\"Call predict and then parse the results.\"\"\"\n warnings.warn(\n \"The predict_and_parse method is deprecated, \"\n \"instead pass an output parser directly to LLMChain.\"\n )\n result = self.predict(callbacks=callbacks, **kwargs)\n if self.prompt.output_parser is not None:\n return self.prompt.output_parser.parse(result)\n else:\n return result\n\n async def apredict_and_parse(\n self, callbacks: Callbacks = None, **kwargs: Any\n ) -> Union[str, List[str], Dict[str, str]]:\n \"\"\"Call apredict and then parse the results.\"\"\"\n warnings.warn(\n \"The apredict_and_parse method is deprecated, \"\n \"instead pass an output parser directly to LLMChain.\"\n )\n result = await self.apredict(callbacks=callbacks, **kwargs)\n if self.prompt.output_parser is not None:\n return self.prompt.output_parser.parse(result)\n else:\n return result\n\n def apply_and_parse(\n self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None\n ) -> Sequence[Union[str, List[str], Dict[str, str]]]:\n \"\"\"Call apply and then parse the results.\"\"\"\n warnings.warn(\n \"The apply_and_parse method is deprecated, \"\n \"instead pass an output parser directly to LLMChain.\"\n )\n result = self.apply(input_list, callbacks=callbacks)\n return self._parse_generation(result)\n\n def _parse_generation(\n self, generation: List[Dict[str, str]]\n ) -> Sequence[Union[str, List[str], Dict[str, str]]]:\n if self.prompt.output_parser is not None:\n return [\n self.prompt.output_parser.parse(res[self.output_key])\n for res in generation\n ]\n else:\n return generation\n\n async def aapply_and_parse(\n self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None\n ) -> Sequence[Union[str, List[str], Dict[str, str]]]:\n \"\"\"Call apply and then parse the results.\"\"\"\n warnings.warn(\n \"The aapply_and_parse method is deprecated, \"\n \"instead pass an output parser directly to LLMChain.\"\n )\n result = await self.aapply(input_list, callbacks=callbacks)\n return self._parse_generation(result)\n\n @property\n def _chain_type(self) -> str:\n return \"llm_chain\"\n\n @classmethod\n def from_string(cls, llm: BaseLanguageModel, template: str) -> LLMChain:\n \"\"\"Create LLMChain from LLM and template.\"\"\"\n prompt_template = PromptTemplate.from_template(template)\n return cls(llm=llm, prompt=prompt_template)"
}
] | import ast
import math
import re
import warnings
import numexpr
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.prompts.prompt import PromptTemplate
from langchain.pydantic_v1 import Extra, root_validator
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from src.chains.chain import Chain
from src.chains.llm_chain import LLMChain | 9,496 |
# flake8: noqa
_PROMPT_TEMPLATE = """Translate a math problem into a expression that can be executed using Python's numexpr library. Use the output of running this code to answer the question.
You MUST follow the following guidelines:
- Do not use "where(...)" expressions in your code since it is not supported.
- Do not use "fmax(...)" expression in your code since it is not supported. Use "max(...)" instead.
- Never introduce a variable. For instance "gazelle_max_speed * 1.4" is not allowed. Pick up a correct number from the given context.
Question: ${{Question with math problem.}}
```text
${{single line mathematical expression that solves the problem}}
```
...numexpr.evaluate(text)...
```output
${{Output of running the code}}
```
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```text
37593 * 67
```
...numexpr.evaluate("37593 * 67")...
```output
2518731
```
Answer: 2518731
Question: 37593^(1/5)
```text
37593**(1/5)
```
...numexpr.evaluate("37593**(1/5)")...
```output
8.222831614237718
```
Answer: 8.222831614237718
Question: {question}
"""
PROMPT = PromptTemplate(
input_variables=["question"],
template=_PROMPT_TEMPLATE,
)
# helper functions to handle min and max functions
def compute_function(match):
func, values = match.groups()
# Extract numbers and remove commas from between digits
numbers = [float(re.sub(r"(?<=\d),(?=\d)", "", v)) for v in values.split(",")]
# Compute the min or max based on the detected function
result = min(numbers) if func == "min" else max(numbers)
return str(result)
class MaxTransformer(ast.NodeTransformer):
def visit_Call(self, node):
self.generic_visit(node) # Apply the transformation to child nodes first
if isinstance(node.func, ast.Name) and node.func.id in ("max", "min"):
if all(isinstance(arg, (ast.Num, ast.Constant)) for arg in node.args):
# Calculate the max value
# print(node.args)
args_as_strings = (ast.unparse(arg) for arg in node.args)
args_str = ", ".join(args_as_strings)
print(args_str)
if node.func.id == "min":
value = min(
arg.n if isinstance(arg, ast.Num) else arg.value
for arg in node.args
)
else:
value = max(
arg.n if isinstance(arg, ast.Num) else arg.value
for arg in node.args
)
# Replace the max call with the max value directly
return ast.copy_location(ast.Constant(value=value), node)
return node
def replace_min_max_functions(expression):
# Parse the expression into an AST
parsed_expression = ast.parse(expression, mode="eval")
# Transform the AST
transformer = MaxTransformer()
transformed_ast = transformer.visit(parsed_expression)
# Fix the missing locations in the AST
transformed_ast = ast.fix_missing_locations(transformed_ast)
# Compile the transformed AST
compiled_code = compile(transformed_ast, "<string>", "eval")
# Evaluate the compiled code
result = eval(compiled_code)
return str(result)
class LLMMathChain(Chain):
"""Chain that interprets a prompt and executes python code to do math.
Example:
.. code-block:: python
from langchain import LLMMathChain, OpenAI
llm_math = LLMMathChain.from_llm(OpenAI())
"""
| """Chain that interprets a prompt and executes python code to do math."""
from __future__ import annotations
# flake8: noqa
_PROMPT_TEMPLATE = """Translate a math problem into a expression that can be executed using Python's numexpr library. Use the output of running this code to answer the question.
You MUST follow the following guidelines:
- Do not use "where(...)" expressions in your code since it is not supported.
- Do not use "fmax(...)" expression in your code since it is not supported. Use "max(...)" instead.
- Never introduce a variable. For instance "gazelle_max_speed * 1.4" is not allowed. Pick up a correct number from the given context.
Question: ${{Question with math problem.}}
```text
${{single line mathematical expression that solves the problem}}
```
...numexpr.evaluate(text)...
```output
${{Output of running the code}}
```
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```text
37593 * 67
```
...numexpr.evaluate("37593 * 67")...
```output
2518731
```
Answer: 2518731
Question: 37593^(1/5)
```text
37593**(1/5)
```
...numexpr.evaluate("37593**(1/5)")...
```output
8.222831614237718
```
Answer: 8.222831614237718
Question: {question}
"""
PROMPT = PromptTemplate(
input_variables=["question"],
template=_PROMPT_TEMPLATE,
)
# helper functions to handle min and max functions
def compute_function(match):
func, values = match.groups()
# Extract numbers and remove commas from between digits
numbers = [float(re.sub(r"(?<=\d),(?=\d)", "", v)) for v in values.split(",")]
# Compute the min or max based on the detected function
result = min(numbers) if func == "min" else max(numbers)
return str(result)
class MaxTransformer(ast.NodeTransformer):
def visit_Call(self, node):
self.generic_visit(node) # Apply the transformation to child nodes first
if isinstance(node.func, ast.Name) and node.func.id in ("max", "min"):
if all(isinstance(arg, (ast.Num, ast.Constant)) for arg in node.args):
# Calculate the max value
# print(node.args)
args_as_strings = (ast.unparse(arg) for arg in node.args)
args_str = ", ".join(args_as_strings)
print(args_str)
if node.func.id == "min":
value = min(
arg.n if isinstance(arg, ast.Num) else arg.value
for arg in node.args
)
else:
value = max(
arg.n if isinstance(arg, ast.Num) else arg.value
for arg in node.args
)
# Replace the max call with the max value directly
return ast.copy_location(ast.Constant(value=value), node)
return node
def replace_min_max_functions(expression):
# Parse the expression into an AST
parsed_expression = ast.parse(expression, mode="eval")
# Transform the AST
transformer = MaxTransformer()
transformed_ast = transformer.visit(parsed_expression)
# Fix the missing locations in the AST
transformed_ast = ast.fix_missing_locations(transformed_ast)
# Compile the transformed AST
compiled_code = compile(transformed_ast, "<string>", "eval")
# Evaluate the compiled code
result = eval(compiled_code)
return str(result)
class LLMMathChain(Chain):
"""Chain that interprets a prompt and executes python code to do math.
Example:
.. code-block:: python
from langchain import LLMMathChain, OpenAI
llm_math = LLMMathChain.from_llm(OpenAI())
"""
| llm_chain: LLMChain | 1 | 2023-12-06 21:12:54+00:00 | 12k |
bytedance/ImageDream | threestudio/systems/base.py | [
{
"identifier": "Exporter",
"path": "threestudio/models/exporters/base.py",
"snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n material: BaseMaterial,\n background: BaseBackground,\n ) -> None:\n @dataclass\n class SubModules:\n geometry: BaseImplicitGeometry\n material: BaseMaterial\n background: BaseBackground\n\n self.sub_modules = SubModules(geometry, material, background)\n\n @property\n def geometry(self) -> BaseImplicitGeometry:\n return self.sub_modules.geometry\n\n @property\n def material(self) -> BaseMaterial:\n return self.sub_modules.material\n\n @property\n def background(self) -> BaseBackground:\n return self.sub_modules.background\n\n def __call__(self, *args, **kwargs) -> List[ExporterOutput]:\n raise NotImplementedError"
},
{
"identifier": "ExporterOutput",
"path": "threestudio/models/exporters/base.py",
"snippet": "class ExporterOutput:\n save_name: str\n save_type: str\n params: Dict[str, Any]"
},
{
"identifier": "parse_optimizer",
"path": "threestudio/systems/utils.py",
"snippet": "def parse_optimizer(config, model):\n if hasattr(config, \"params\"):\n params = [\n {\"params\": get_parameters(model, name), \"name\": name, **args}\n for name, args in config.params.items()\n ]\n threestudio.debug(f\"Specify optimizer params: {config.params}\")\n else:\n params = model.parameters()\n if config.name in [\"FusedAdam\"]:\n import apex\n\n optim = getattr(apex.optimizers, config.name)(params, **config.args)\n elif config.name in [\"Adan\"]:\n from threestudio.systems import optimizers\n\n optim = getattr(optimizers, config.name)(params, **config.args)\n else:\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim"
},
{
"identifier": "parse_scheduler",
"path": "threestudio/systems/utils.py",
"snippet": "def parse_scheduler(config, optimizer):\n interval = config.get(\"interval\", \"epoch\")\n assert interval in [\"epoch\", \"step\"]\n if config.name == \"SequentialLR\":\n scheduler = {\n \"scheduler\": lr_scheduler.SequentialLR(\n optimizer,\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ],\n milestones=config.milestones,\n ),\n \"interval\": interval,\n }\n elif config.name == \"ChainedScheduler\":\n scheduler = {\n \"scheduler\": lr_scheduler.ChainedScheduler(\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ]\n ),\n \"interval\": interval,\n }\n else:\n scheduler = {\n \"scheduler\": get_scheduler(config.name)(optimizer, **config.args),\n \"interval\": interval,\n }\n return scheduler"
},
{
"identifier": "Updateable",
"path": "threestudio/utils/base.py",
"snippet": "class Updateable:\n def do_update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step(\n epoch, global_step, on_load_weights=on_load_weights\n )\n self.update_step(epoch, global_step, on_load_weights=on_load_weights)\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n # override this method to implement custom update logic\n # if on_load_weights is True, you should be careful doing things related to model evaluations,\n # as the models and tensors are not guarenteed to be on the same device\n pass"
},
{
"identifier": "update_if_possible",
"path": "threestudio/utils/base.py",
"snippet": "def update_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step(epoch, global_step)"
},
{
"identifier": "parse_structured",
"path": "threestudio/utils/config.py",
"snippet": "def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]] = None) -> Any:\n scfg = OmegaConf.structured(fields(**cfg))\n return scfg"
},
{
"identifier": "C",
"path": "threestudio/utils/misc.py",
"snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value"
},
{
"identifier": "cleanup",
"path": "threestudio/utils/misc.py",
"snippet": "def cleanup():\n gc.collect()\n torch.cuda.empty_cache()\n tcnn.free_temporary_memory()"
},
{
"identifier": "get_device",
"path": "threestudio/utils/misc.py",
"snippet": "def get_device():\n return torch.device(f\"cuda:{get_rank()}\")"
},
{
"identifier": "load_module_weights",
"path": "threestudio/utils/misc.py",
"snippet": "def load_module_weights(\n path, module_name=None, ignore_modules=None, map_location=None\n) -> Tuple[dict, int, int]:\n if module_name is not None and ignore_modules is not None:\n raise ValueError(\"module_name and ignore_modules cannot be both set\")\n if map_location is None:\n map_location = get_device()\n\n ckpt = torch.load(path, map_location=map_location)\n state_dict = ckpt[\"state_dict\"]\n state_dict_to_load = state_dict\n\n if ignore_modules is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n ignore = any(\n [k.startswith(ignore_module + \".\") for ignore_module in ignore_modules]\n )\n if ignore:\n continue\n state_dict_to_load[k] = v\n\n if module_name is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n m = re.match(rf\"^{module_name}\\.(.*)$\", k)\n if m is None:\n continue\n state_dict_to_load[m.group(1)] = v\n\n return state_dict_to_load, ckpt[\"epoch\"], ckpt[\"global_step\"]"
},
{
"identifier": "SaverMixin",
"path": "threestudio/utils/saving.py",
"snippet": "class SaverMixin:\n _save_dir: Optional[str] = None\n _wandb_logger: Optional[WandbLogger] = None\n\n def set_save_dir(self, save_dir: str):\n self._save_dir = save_dir\n\n def get_save_dir(self):\n if self._save_dir is None:\n raise ValueError(\"Save dir is not set\")\n return self._save_dir\n\n def convert_data(self, data):\n if data is None:\n return None\n elif isinstance(data, np.ndarray):\n return data\n elif isinstance(data, torch.Tensor):\n return data.detach().cpu().numpy()\n elif isinstance(data, list):\n return [self.convert_data(d) for d in data]\n elif isinstance(data, dict):\n return {k: self.convert_data(v) for k, v in data.items()}\n else:\n raise TypeError(\n \"Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting\",\n type(data),\n )\n\n def get_save_path(self, filename):\n save_path = os.path.join(self.get_save_dir(), filename)\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n return save_path\n\n def create_loggers(self, cfg_loggers: DictConfig) -> None:\n if \"wandb\" in cfg_loggers.keys() and cfg_loggers.wandb.enable:\n self._wandb_logger = WandbLogger(\n project=cfg_loggers.wandb.project, name=cfg_loggers.wandb.name\n )\n\n def get_loggers(self) -> List:\n if self._wandb_logger:\n return [self._wandb_logger]\n else:\n return []\n\n DEFAULT_RGB_KWARGS = {\"data_format\": \"HWC\", \"data_range\": (0, 1)}\n DEFAULT_UV_KWARGS = {\n \"data_format\": \"HWC\",\n \"data_range\": (0, 1),\n \"cmap\": \"checkerboard\",\n }\n DEFAULT_GRAYSCALE_KWARGS = {\"data_range\": None, \"cmap\": \"jet\"}\n DEFAULT_GRID_KWARGS = {\"align\": \"max\"}\n\n def get_rgb_image_(self, img, data_format, data_range, rgba=False):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n if img.dtype != np.uint8:\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (\n (img - data_range[0]) / (data_range[1] - data_range[0]) * 255.0\n ).astype(np.uint8)\n nc = 4 if rgba else 3\n imgs = [img[..., start : start + nc] for start in range(0, img.shape[-1], nc)]\n imgs = [\n img_\n if img_.shape[-1] == nc\n else np.concatenate(\n [\n img_,\n np.zeros(\n (img_.shape[0], img_.shape[1], nc - img_.shape[2]),\n dtype=img_.dtype,\n ),\n ],\n axis=-1,\n )\n for img_ in imgs\n ]\n img = np.concatenate(imgs, axis=1)\n if rgba:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n else:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_rgb_image(\n self,\n filename,\n img,\n data_format,\n data_range,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_rgb_image_(img, data_format, data_range)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_rgb_image(\n self,\n filename,\n img,\n data_format=DEFAULT_RGB_KWARGS[\"data_format\"],\n data_range=DEFAULT_RGB_KWARGS[\"data_range\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_rgb_image(save_path, img, data_format, data_range, name, step)\n return save_path\n\n def get_uv_image_(self, img, data_format, data_range, cmap):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [\"checkerboard\", \"color\"]\n if cmap == \"checkerboard\":\n n_grid = 64\n mask = (img * n_grid).astype(int)\n mask = (mask[..., 0] + mask[..., 1]) % 2 == 0\n img = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 255\n img[mask] = np.array([255, 0, 255], dtype=np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif cmap == \"color\":\n img_ = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n img_[..., 0] = (img[..., 0] * 255).astype(np.uint8)\n img_[..., 1] = (img[..., 1] * 255).astype(np.uint8)\n img_ = cv2.cvtColor(img_, cv2.COLOR_RGB2BGR)\n img = img_\n return img\n\n def save_uv_image(\n self,\n filename,\n img,\n data_format=DEFAULT_UV_KWARGS[\"data_format\"],\n data_range=DEFAULT_UV_KWARGS[\"data_range\"],\n cmap=DEFAULT_UV_KWARGS[\"cmap\"],\n ) -> str:\n save_path = self.get_save_path(filename)\n img = self.get_uv_image_(img, data_format, data_range, cmap)\n cv2.imwrite(save_path, img)\n return save_path\n\n def get_grayscale_image_(self, img, data_range, cmap):\n img = self.convert_data(img)\n img = np.nan_to_num(img)\n if data_range is None:\n img = (img - img.min()) / (img.max() - img.min())\n else:\n img = img.clip(data_range[0], data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [None, \"jet\", \"magma\", \"spectral\"]\n if cmap == None:\n img = (img * 255.0).astype(np.uint8)\n img = np.repeat(img[..., None], 3, axis=2)\n elif cmap == \"jet\":\n img = (img * 255.0).astype(np.uint8)\n img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n elif cmap == \"magma\":\n img = 1.0 - img\n base = cm.get_cmap(\"magma\")\n num_bins = 256\n colormap = LinearSegmentedColormap.from_list(\n f\"{base.name}{num_bins}\", base(np.linspace(0, 1, num_bins)), num_bins\n )(np.linspace(0, 1, num_bins))[:, :3]\n a = np.floor(img * 255.0)\n b = (a + 1).clip(max=255.0)\n f = img * 255.0 - a\n a = a.astype(np.uint16).clip(0, 255)\n b = b.astype(np.uint16).clip(0, 255)\n img = colormap[a] + (colormap[b] - colormap[a]) * f[..., None]\n img = (img * 255.0).astype(np.uint8)\n elif cmap == \"spectral\":\n colormap = plt.get_cmap(\"Spectral\")\n\n def blend_rgba(image):\n image = image[..., :3] * image[..., -1:] + (\n 1.0 - image[..., -1:]\n ) # blend A to RGB\n return image\n\n img = colormap(img)\n img = blend_rgba(img)\n img = (img * 255).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_grayscale_image(\n self,\n filename,\n img,\n data_range,\n cmap,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_grayscale_image_(img, data_range, cmap)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_grayscale_image(\n self,\n filename,\n img,\n data_range=DEFAULT_GRAYSCALE_KWARGS[\"data_range\"],\n cmap=DEFAULT_GRAYSCALE_KWARGS[\"cmap\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_grayscale_image(save_path, img, data_range, cmap, name, step)\n return save_path\n\n def get_image_grid_(self, imgs, align):\n if isinstance(imgs[0], list):\n return np.concatenate(\n [self.get_image_grid_(row, align) for row in imgs], axis=0\n )\n cols = []\n for col in imgs:\n assert col[\"type\"] in [\"rgb\", \"uv\", \"grayscale\"]\n if col[\"type\"] == \"rgb\":\n rgb_kwargs = self.DEFAULT_RGB_KWARGS.copy()\n rgb_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_rgb_image_(col[\"img\"], **rgb_kwargs))\n elif col[\"type\"] == \"uv\":\n uv_kwargs = self.DEFAULT_UV_KWARGS.copy()\n uv_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_uv_image_(col[\"img\"], **uv_kwargs))\n elif col[\"type\"] == \"grayscale\":\n grayscale_kwargs = self.DEFAULT_GRAYSCALE_KWARGS.copy()\n grayscale_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_grayscale_image_(col[\"img\"], **grayscale_kwargs))\n\n if align == \"max\":\n h = max([col.shape[0] for col in cols])\n w = max([col.shape[1] for col in cols])\n elif align == \"min\":\n h = min([col.shape[0] for col in cols])\n w = min([col.shape[1] for col in cols])\n elif isinstance(align, int):\n h = align\n w = align\n elif (\n isinstance(align, tuple)\n and isinstance(align[0], int)\n and isinstance(align[1], int)\n ):\n h, w = align\n else:\n raise ValueError(\n f\"Unsupported image grid align: {align}, should be min, max, int or (int, int)\"\n )\n\n for i in range(len(cols)):\n if cols[i].shape[0] != h or cols[i].shape[1] != w:\n cols[i] = cv2.resize(cols[i], (w, h), interpolation=cv2.INTER_LINEAR)\n return np.concatenate(cols, axis=1)\n\n def save_image_grid(\n self,\n filename,\n imgs,\n align=DEFAULT_GRID_KWARGS[\"align\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n texts: Optional[List[float]] = None,\n ):\n save_path = self.get_save_path(filename)\n img = self.get_image_grid_(imgs, align=align)\n\n if texts is not None:\n img = Image.fromarray(img)\n draw = ImageDraw.Draw(img)\n black, white = (0, 0, 0), (255, 255, 255)\n for i, text in enumerate(texts):\n draw.text((2, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((2, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((1, (img.size[1] // len(texts)) * i), f\"{text}\", black)\n img = np.asarray(img)\n\n cv2.imwrite(save_path, img)\n if name and self._wandb_logger:\n wandb.log({name: wandb.Image(save_path), \"trainer/global_step\": step})\n return save_path\n\n def save_image(self, filename, img) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.dtype == np.uint8 or img.dtype == np.uint16\n if img.ndim == 3 and img.shape[-1] == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif img.ndim == 3 and img.shape[-1] == 4:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n cv2.imwrite(save_path, img)\n return save_path\n\n def save_cubemap(self, filename, img, data_range=(0, 1), rgba=False) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.ndim == 4 and img.shape[0] == 6 and img.shape[1] == img.shape[2]\n\n imgs_full = []\n for start in range(0, img.shape[-1], 3):\n img_ = img[..., start : start + 3]\n img_ = np.stack(\n [\n self.get_rgb_image_(img_[i], \"HWC\", data_range, rgba=rgba)\n for i in range(img_.shape[0])\n ],\n axis=0,\n )\n size = img_.shape[1]\n placeholder = np.zeros((size, size, 3), dtype=np.float32)\n img_full = np.concatenate(\n [\n np.concatenate(\n [placeholder, img_[2], placeholder, placeholder], axis=1\n ),\n np.concatenate([img_[1], img_[4], img_[0], img_[5]], axis=1),\n np.concatenate(\n [placeholder, img_[3], placeholder, placeholder], axis=1\n ),\n ],\n axis=0,\n )\n imgs_full.append(img_full)\n\n imgs_full = np.concatenate(imgs_full, axis=1)\n cv2.imwrite(save_path, imgs_full)\n return save_path\n\n def save_data(self, filename, data) -> str:\n data = self.convert_data(data)\n if isinstance(data, dict):\n if not filename.endswith(\".npz\"):\n filename += \".npz\"\n save_path = self.get_save_path(filename)\n np.savez(save_path, **data)\n else:\n if not filename.endswith(\".npy\"):\n filename += \".npy\"\n save_path = self.get_save_path(filename)\n np.save(save_path, data)\n return save_path\n\n def save_state_dict(self, filename, data) -> str:\n save_path = self.get_save_path(filename)\n torch.save(data, save_path)\n return save_path\n\n def save_img_sequence(\n self,\n filename,\n img_dir,\n matcher,\n save_format=\"mp4\",\n fps=30,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n assert save_format in [\"gif\", \"mp4\"]\n if not filename.endswith(save_format):\n filename += f\".{save_format}\"\n save_path = self.get_save_path(filename)\n matcher = re.compile(matcher)\n img_dir = os.path.join(self.get_save_dir(), img_dir)\n imgs = []\n for f in os.listdir(img_dir):\n if matcher.search(f):\n imgs.append(f)\n imgs = sorted(imgs, key=lambda f: int(matcher.search(f).groups()[0]))\n imgs = [cv2.imread(os.path.join(img_dir, f)) for f in imgs]\n\n if save_format == \"gif\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps, palettesize=256)\n elif save_format == \"mp4\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Video(save_path, format=\"mp4\"),\n \"trainer/global_step\": step,\n }\n )\n return save_path\n\n def save_mesh(self, filename, v_pos, t_pos_idx, v_tex=None, t_tex_idx=None) -> str:\n save_path = self.get_save_path(filename)\n v_pos = self.convert_data(v_pos)\n t_pos_idx = self.convert_data(t_pos_idx)\n mesh = trimesh.Trimesh(vertices=v_pos, faces=t_pos_idx)\n mesh.export(save_path)\n return save_path\n\n def save_obj(\n self,\n filename: str,\n mesh: Mesh,\n save_mat: bool = False,\n save_normal: bool = False,\n save_uv: bool = False,\n save_vertex_color: bool = False,\n map_Kd: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Ks: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Bump: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Pm: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_Pr: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_format: str = \"jpg\",\n ) -> List[str]:\n save_paths: List[str] = []\n if not filename.endswith(\".obj\"):\n filename += \".obj\"\n v_pos, t_pos_idx = self.convert_data(mesh.v_pos), self.convert_data(\n mesh.t_pos_idx\n )\n v_nrm, v_tex, t_tex_idx, v_rgb = None, None, None, None\n if save_normal:\n v_nrm = self.convert_data(mesh.v_nrm)\n if save_uv:\n v_tex, t_tex_idx = self.convert_data(mesh.v_tex), self.convert_data(\n mesh.t_tex_idx\n )\n if save_vertex_color:\n v_rgb = self.convert_data(mesh.v_rgb)\n matname, mtllib = None, None\n if save_mat:\n matname = \"default\"\n mtl_filename = filename.replace(\".obj\", \".mtl\")\n mtllib = os.path.basename(mtl_filename)\n mtl_save_paths = self._save_mtl(\n mtl_filename,\n matname,\n map_Kd=self.convert_data(map_Kd),\n map_Ks=self.convert_data(map_Ks),\n map_Bump=self.convert_data(map_Bump),\n map_Pm=self.convert_data(map_Pm),\n map_Pr=self.convert_data(map_Pr),\n map_format=map_format,\n )\n save_paths += mtl_save_paths\n obj_save_path = self._save_obj(\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=v_nrm,\n v_tex=v_tex,\n t_tex_idx=t_tex_idx,\n v_rgb=v_rgb,\n matname=matname,\n mtllib=mtllib,\n )\n save_paths.append(obj_save_path)\n return save_paths\n\n def _save_obj(\n self,\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=None,\n v_tex=None,\n t_tex_idx=None,\n v_rgb=None,\n matname=None,\n mtllib=None,\n ) -> str:\n obj_str = \"\"\n if matname is not None:\n obj_str += f\"mtllib {mtllib}\\n\"\n obj_str += f\"g object\\n\"\n obj_str += f\"usemtl {matname}\\n\"\n for i in range(len(v_pos)):\n obj_str += f\"v {v_pos[i][0]} {v_pos[i][1]} {v_pos[i][2]}\"\n if v_rgb is not None:\n obj_str += f\" {v_rgb[i][0]} {v_rgb[i][1]} {v_rgb[i][2]}\"\n obj_str += \"\\n\"\n if v_nrm is not None:\n for v in v_nrm:\n obj_str += f\"vn {v[0]} {v[1]} {v[2]}\\n\"\n if v_tex is not None:\n for v in v_tex:\n obj_str += f\"vt {v[0]} {1.0 - v[1]}\\n\"\n\n for i in range(len(t_pos_idx)):\n obj_str += \"f\"\n for j in range(3):\n obj_str += f\" {t_pos_idx[i][j] + 1}/\"\n if v_tex is not None:\n obj_str += f\"{t_tex_idx[i][j] + 1}\"\n obj_str += \"/\"\n if v_nrm is not None:\n obj_str += f\"{t_pos_idx[i][j] + 1}\"\n obj_str += \"\\n\"\n\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(obj_str)\n return save_path\n\n def _save_mtl(\n self,\n filename,\n matname,\n Ka=(0.0, 0.0, 0.0),\n Kd=(1.0, 1.0, 1.0),\n Ks=(0.0, 0.0, 0.0),\n map_Kd=None,\n map_Ks=None,\n map_Bump=None,\n map_Pm=None,\n map_Pr=None,\n map_format=\"jpg\",\n step: Optional[int] = None,\n ) -> List[str]:\n mtl_save_path = self.get_save_path(filename)\n save_paths = [mtl_save_path]\n mtl_str = f\"newmtl {matname}\\n\"\n mtl_str += f\"Ka {Ka[0]} {Ka[1]} {Ka[2]}\\n\"\n if map_Kd is not None:\n map_Kd_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_kd.{map_format}\"\n )\n mtl_str += f\"map_Kd texture_kd.{map_format}\\n\"\n self._save_rgb_image(\n map_Kd_save_path,\n map_Kd,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Kd\",\n step=step,\n )\n save_paths.append(map_Kd_save_path)\n else:\n mtl_str += f\"Kd {Kd[0]} {Kd[1]} {Kd[2]}\\n\"\n if map_Ks is not None:\n map_Ks_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_ks.{map_format}\"\n )\n mtl_str += f\"map_Ks texture_ks.{map_format}\\n\"\n self._save_rgb_image(\n map_Ks_save_path,\n map_Ks,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Ks\",\n step=step,\n )\n save_paths.append(map_Ks_save_path)\n else:\n mtl_str += f\"Ks {Ks[0]} {Ks[1]} {Ks[2]}\\n\"\n if map_Bump is not None:\n map_Bump_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_nrm.{map_format}\"\n )\n mtl_str += f\"map_Bump texture_nrm.{map_format}\\n\"\n self._save_rgb_image(\n map_Bump_save_path,\n map_Bump,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Bump\",\n step=step,\n )\n save_paths.append(map_Bump_save_path)\n if map_Pm is not None:\n map_Pm_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_metallic.{map_format}\"\n )\n mtl_str += f\"map_Pm texture_metallic.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pm_save_path,\n map_Pm,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_refl\",\n step=step,\n )\n save_paths.append(map_Pm_save_path)\n if map_Pr is not None:\n map_Pr_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_roughness.{map_format}\"\n )\n mtl_str += f\"map_Pr texture_roughness.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pr_save_path,\n map_Pr,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_Ns\",\n step=step,\n )\n save_paths.append(map_Pr_save_path)\n with open(self.get_save_path(filename), \"w\") as f:\n f.write(mtl_str)\n return save_paths\n\n def save_file(self, filename, src_path) -> str:\n save_path = self.get_save_path(filename)\n shutil.copyfile(src_path, save_path)\n return save_path\n\n def save_json(self, filename, payload) -> str:\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(json.dumps(payload))\n return save_path"
}
] | import os
import pytorch_lightning as pl
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from threestudio.models.exporters.base import Exporter, ExporterOutput
from threestudio.systems.utils import parse_optimizer, parse_scheduler
from threestudio.utils.base import Updateable, update_if_possible
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import C, cleanup, get_device, load_module_weights
from threestudio.utils.saving import SaverMixin
from threestudio.utils.typing import *
from threestudio.utils.config import load_config, parse_structured | 10,307 | update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "predict")
self.dataset = self.trainer.predict_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
pass
def on_before_optimizer_step(self, optimizer):
"""
# some gradient-related debugging goes here, example:
from lightning.pytorch.utilities import grad_norm
norms = grad_norm(self.geometry, norm_type=2)
print(norms)
"""
pass
class BaseLift3DSystem(BaseSystem):
@dataclass
class Config(BaseSystem.Config):
geometry_type: str = ""
geometry: dict = field(default_factory=dict)
geometry_convert_from: Optional[str] = None
geometry_convert_inherit_texture: bool = False
# used to override configurations of the previous geometry being converted from,
# for example isosurface_threshold
geometry_convert_override: dict = field(default_factory=dict)
material_type: str = ""
material: dict = field(default_factory=dict)
background_type: str = ""
background: dict = field(default_factory=dict)
renderer_type: str = ""
renderer: dict = field(default_factory=dict)
guidance_type: str = ""
guidance: dict = field(default_factory=dict)
prompt_processor_type: str = ""
prompt_processor: dict = field(default_factory=dict)
# geometry export configurations, no need to specify in training
exporter_type: str = "mesh-exporter"
exporter: dict = field(default_factory=dict)
cfg: Config
def configure(self) -> None:
if (
self.cfg.geometry_convert_from # from_coarse must be specified
and not self.cfg.weights # not initialized from coarse when weights are specified
and not self.resumed # not initialized from coarse when resumed from checkpoints
):
threestudio.info("Initializing geometry from a given checkpoint ...")
prev_cfg = load_config(
os.path.join(
os.path.dirname(self.cfg.geometry_convert_from),
"../configs/parsed.yaml",
)
) # TODO: hard-coded relative path
prev_system_cfg: BaseLift3DSystem.Config = parse_structured(
self.Config, prev_cfg.system
)
prev_geometry_cfg = prev_system_cfg.geometry
prev_geometry_cfg.update(self.cfg.geometry_convert_override)
prev_geometry = threestudio.find(prev_system_cfg.geometry_type)(
prev_geometry_cfg
)
state_dict, epoch, global_step = load_module_weights(
self.cfg.geometry_convert_from,
module_name="geometry",
map_location="cpu",
)
prev_geometry.load_state_dict(state_dict, strict=False)
# restore step-dependent states
prev_geometry.do_update_step(epoch, global_step, on_load_weights=True)
# convert from coarse stage geometry
prev_geometry = prev_geometry.to(get_device())
self.geometry = threestudio.find(self.cfg.geometry_type).create_from(
prev_geometry,
self.cfg.geometry,
copy_net=self.cfg.geometry_convert_inherit_texture,
)
del prev_geometry
cleanup()
else:
self.geometry = threestudio.find(self.cfg.geometry_type)(self.cfg.geometry)
self.material = threestudio.find(self.cfg.material_type)(self.cfg.material)
self.background = threestudio.find(self.cfg.background_type)(
self.cfg.background
)
self.renderer = threestudio.find(self.cfg.renderer_type)(
self.cfg.renderer,
geometry=self.geometry,
material=self.material,
background=self.background,
)
def on_fit_start(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Validation results will be saved to {self._save_dir}")
else:
threestudio.warn(
f"Saving directory not set for the system, visualization results will not be saved"
)
def on_test_end(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Test results saved to {self._save_dir}")
def on_predict_start(self) -> None:
|
class BaseSystem(pl.LightningModule, Updateable, SaverMixin):
@dataclass
class Config:
loggers: dict = field(default_factory=dict)
loss: dict = field(default_factory=dict)
optimizer: dict = field(default_factory=dict)
scheduler: Optional[dict] = None
weights: Optional[str] = None
weights_ignore_modules: Optional[List[str]] = None
cleanup_after_validation_step: bool = False
cleanup_after_test_step: bool = False
cfg: Config
def __init__(self, cfg, resumed=False) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
self._save_dir: Optional[str] = None
self._resumed: bool = resumed
self._resumed_eval: bool = False
self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0}
if "loggers" in cfg:
self.create_loggers(cfg.loggers)
self.configure()
if self.cfg.weights is not None:
self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules)
self.post_configure()
def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None):
state_dict, epoch, global_step = load_module_weights(
weights, ignore_modules=ignore_modules, map_location="cpu"
)
self.load_state_dict(state_dict, strict=False)
# restore step-dependent states
self.do_update_step(epoch, global_step, on_load_weights=True)
def set_resume_status(self, current_epoch: int, global_step: int):
# restore correct epoch and global step in eval
self._resumed_eval = True
self._resumed_eval_status["current_epoch"] = current_epoch
self._resumed_eval_status["global_step"] = global_step
@property
def resumed(self):
# whether from resumed checkpoint
return self._resumed
@property
def true_global_step(self):
if self._resumed_eval:
return self._resumed_eval_status["global_step"]
else:
return self.global_step
@property
def true_current_epoch(self):
if self._resumed_eval:
return self._resumed_eval_status["current_epoch"]
else:
return self.current_epoch
def configure(self) -> None:
pass
def post_configure(self) -> None:
"""
executed after weights are loaded
"""
pass
def C(self, value: Any) -> float:
return C(value, self.true_current_epoch, self.true_global_step)
def configure_optimizers(self):
optim = parse_optimizer(self.cfg.optimizer, self)
ret = {
"optimizer": optim,
}
if self.cfg.scheduler is not None:
ret.update(
{
"lr_scheduler": parse_scheduler(self.cfg.scheduler, optim),
}
)
return ret
def training_step(self, batch, batch_idx):
raise NotImplementedError
def validation_step(self, batch, batch_idx):
raise NotImplementedError
def on_validation_batch_end(self, outputs, batch, batch_idx):
if self.cfg.cleanup_after_validation_step:
# cleanup to save vram
cleanup()
def on_validation_epoch_end(self):
raise NotImplementedError
def test_step(self, batch, batch_idx):
raise NotImplementedError
def on_test_batch_end(self, outputs, batch, batch_idx):
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_test_epoch_end(self):
pass
def predict_step(self, batch, batch_idx):
raise NotImplementedError
def on_predict_batch_end(self, outputs, batch, batch_idx):
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_predict_epoch_end(self):
pass
def preprocess_data(self, batch, stage):
pass
"""
Implementing on_after_batch_transfer of DataModule does the same.
But on_after_batch_transfer does not support DP.
"""
def on_train_batch_start(self, batch, batch_idx, unused=0):
self.preprocess_data(batch, "train")
self.dataset = self.trainer.train_dataloader.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_validation_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "validation")
self.dataset = self.trainer.val_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_test_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "test")
self.dataset = self.trainer.test_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "predict")
self.dataset = self.trainer.predict_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
pass
def on_before_optimizer_step(self, optimizer):
"""
# some gradient-related debugging goes here, example:
from lightning.pytorch.utilities import grad_norm
norms = grad_norm(self.geometry, norm_type=2)
print(norms)
"""
pass
class BaseLift3DSystem(BaseSystem):
@dataclass
class Config(BaseSystem.Config):
geometry_type: str = ""
geometry: dict = field(default_factory=dict)
geometry_convert_from: Optional[str] = None
geometry_convert_inherit_texture: bool = False
# used to override configurations of the previous geometry being converted from,
# for example isosurface_threshold
geometry_convert_override: dict = field(default_factory=dict)
material_type: str = ""
material: dict = field(default_factory=dict)
background_type: str = ""
background: dict = field(default_factory=dict)
renderer_type: str = ""
renderer: dict = field(default_factory=dict)
guidance_type: str = ""
guidance: dict = field(default_factory=dict)
prompt_processor_type: str = ""
prompt_processor: dict = field(default_factory=dict)
# geometry export configurations, no need to specify in training
exporter_type: str = "mesh-exporter"
exporter: dict = field(default_factory=dict)
cfg: Config
def configure(self) -> None:
if (
self.cfg.geometry_convert_from # from_coarse must be specified
and not self.cfg.weights # not initialized from coarse when weights are specified
and not self.resumed # not initialized from coarse when resumed from checkpoints
):
threestudio.info("Initializing geometry from a given checkpoint ...")
prev_cfg = load_config(
os.path.join(
os.path.dirname(self.cfg.geometry_convert_from),
"../configs/parsed.yaml",
)
) # TODO: hard-coded relative path
prev_system_cfg: BaseLift3DSystem.Config = parse_structured(
self.Config, prev_cfg.system
)
prev_geometry_cfg = prev_system_cfg.geometry
prev_geometry_cfg.update(self.cfg.geometry_convert_override)
prev_geometry = threestudio.find(prev_system_cfg.geometry_type)(
prev_geometry_cfg
)
state_dict, epoch, global_step = load_module_weights(
self.cfg.geometry_convert_from,
module_name="geometry",
map_location="cpu",
)
prev_geometry.load_state_dict(state_dict, strict=False)
# restore step-dependent states
prev_geometry.do_update_step(epoch, global_step, on_load_weights=True)
# convert from coarse stage geometry
prev_geometry = prev_geometry.to(get_device())
self.geometry = threestudio.find(self.cfg.geometry_type).create_from(
prev_geometry,
self.cfg.geometry,
copy_net=self.cfg.geometry_convert_inherit_texture,
)
del prev_geometry
cleanup()
else:
self.geometry = threestudio.find(self.cfg.geometry_type)(self.cfg.geometry)
self.material = threestudio.find(self.cfg.material_type)(self.cfg.material)
self.background = threestudio.find(self.cfg.background_type)(
self.cfg.background
)
self.renderer = threestudio.find(self.cfg.renderer_type)(
self.cfg.renderer,
geometry=self.geometry,
material=self.material,
background=self.background,
)
def on_fit_start(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Validation results will be saved to {self._save_dir}")
else:
threestudio.warn(
f"Saving directory not set for the system, visualization results will not be saved"
)
def on_test_end(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Test results saved to {self._save_dir}")
def on_predict_start(self) -> None: | self.exporter: Exporter = threestudio.find(self.cfg.exporter_type)( | 0 | 2023-12-13 21:09:37+00:00 | 12k |
TencentARC/MotionCtrl | lvdm/models/ddpm3d.py | [
{
"identifier": "disabled_train",
"path": "lvdm/basics.py",
"snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self"
},
{
"identifier": "default",
"path": "lvdm/common.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "exists",
"path": "lvdm/common.py",
"snippet": "def exists(val):\n return val is not None"
},
{
"identifier": "extract_into_tensor",
"path": "lvdm/common.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))"
},
{
"identifier": "noise_like",
"path": "lvdm/common.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "DiagonalGaussianDistribution",
"path": "lvdm/distributions.py",
"snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self, noise=None):\n if noise is None:\n noise = torch.randn(self.mean.shape)\n \n x = self.mean + self.std * noise.to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean"
},
{
"identifier": "normal_kl",
"path": "lvdm/distributions.py",
"snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )"
},
{
"identifier": "LitEma",
"path": "lvdm/ema.py",
"snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)"
},
{
"identifier": "DDIMSampler",
"path": "lvdm/models/samplers/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.counter = 0\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n schedule_verbose=False,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n \n # check condition bs\n if conditioning is not None:\n if isinstance(conditioning, dict):\n try:\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n except:\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\n\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=schedule_verbose)\n \n # make shape\n if len(shape) == 3:\n C, H, W = shape\n size = (batch_size, C, H, W)\n elif len(shape) == 4:\n C, T, H, W = shape\n size = (batch_size, C, T, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n \n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n verbose=verbose,\n **kwargs)\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, verbose=True,\n **kwargs):\n device = self.model.betas.device \n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n \n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n \n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n if verbose:\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n else:\n iterator = time_range\n\n clean_cond = kwargs.pop(\"clean_cond\", False)\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n # use mask to blend noised original latent (img_orig) & new sampled latent (img)\n if mask is not None:\n assert x0 is not None\n if clean_cond:\n img_orig = x0\n else:\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? <ddim inversion>\n img = img_orig * mask + (1. - mask) * img # keep original & modify use img\n \n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n \n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n uc_type=None, conditional_guidance_scale_temporal=None, **kwargs):\n b, *_, device = *x.shape, x.device\n if x.dim() == 5:\n is_video = True\n else:\n is_video = False\n # f=open('/apdcephfs_cq2/share_1290939/yingqinghe/code/LVDM-private/cfg_range_s5noclamp.txt','a')\n # print(f't={t}, model input, min={torch.min(x)}, max={torch.max(x)}',file=f)\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, **kwargs) # unet denoiser\n else:\n # with unconditional condition\n if isinstance(c, torch.Tensor):\n un_kwargs = kwargs.copy()\n if isinstance(unconditional_conditioning, dict):\n for uk, uv in unconditional_conditioning.items():\n if uk in un_kwargs:\n un_kwargs[uk] = uv\n unconditional_conditioning = unconditional_conditioning['uc']\n if 'cond_T' in kwargs and t < kwargs['cond_T']:\n if 'features_adapter' in kwargs:\n kwargs.pop('features_adapter')\n un_kwargs.pop('features_adapter')\n # kwargs['features_adapter'] = None\n # un_kwargs['features_adapter'] = None\n # if 'pose_emb' in kwargs:\n # kwargs.pop('pose_emb')\n # un_kwargs.pop('pose_emb')\n # kwargs['pose_emb'] = None\n # un_kwargs['pose_emb'] = None\n e_t = self.model.apply_model(x, t, c, **kwargs)\n # e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **un_kwargs)\n elif isinstance(c, dict):\n e_t = self.model.apply_model(x, t, c, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n else:\n raise NotImplementedError\n # text cfg\n if uc_type is None:\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n else:\n if uc_type == 'cfg_original':\n e_t = e_t + unconditional_guidance_scale * (e_t - e_t_uncond)\n elif uc_type == 'cfg_ours':\n e_t = e_t + unconditional_guidance_scale * (e_t_uncond - e_t)\n else:\n raise NotImplementedError\n # temporal guidance\n if conditional_guidance_scale_temporal is not None:\n e_t_temporal = self.model.apply_model(x, t, c, **kwargs)\n e_t_image = self.model.apply_model(x, t, c, no_temporal_attn=True, **kwargs)\n e_t = e_t + conditional_guidance_scale_temporal * (e_t_temporal - e_t_image)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n \n if is_video:\n size = (b, 1, 1, 1, 1)\n else:\n size = (b, 1, 1, 1)\n a_t = torch.full(size, alphas[index], device=device)\n a_prev = torch.full(size, alphas_prev[index], device=device)\n sigma_t = torch.full(size, sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(size, sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n # print(f't={t}, pred_x0, min={torch.min(pred_x0)}, max={torch.max(pred_x0)}',file=f)\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n # # norm pred_x0\n # p=2\n # s=()\n # pred_x0 = pred_x0 - torch.max(torch.abs(pred_x0))\n\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n \n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n\n return x_prev, pred_x0"
},
{
"identifier": "make_beta_schedule",
"path": "lvdm/models/utils_diffusion.py",
"snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()"
},
{
"identifier": "instantiate_from_config",
"path": "utils/utils.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
}
] | import logging
import os
import random
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
from contextlib import contextmanager
from functools import partial
from einops import rearrange, repeat
from tqdm import tqdm
from pytorch_lightning.utilities import rank_zero_only
from torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR
from torchvision.utils import make_grid
from lvdm.basics import disabled_train
from lvdm.common import default, exists, extract_into_tensor, noise_like
from lvdm.distributions import DiagonalGaussianDistribution, normal_kl
from lvdm.ema import LitEma
from lvdm.models.samplers.ddim import DDIMSampler
from lvdm.models.utils_diffusion import make_beta_schedule
from utils.utils import instantiate_from_config | 8,589 | betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
self.register_buffer('posterior_mean_coef2', to_torch(
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
if self.parameterization == "eps":
lvlb_weights = self.betas ** 2 / (
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
elif self.parameterization == "x0":
lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
else:
raise NotImplementedError("mu not supported")
# TODO how to choose this term
lvlb_weights[0] = lvlb_weights[1]
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
assert not torch.isnan(self.lvlb_weights).all()
@contextmanager
def ema_scope(self, context=None):
if self.use_ema:
self.model_ema.store(self.model.parameters())
self.model_ema.copy_to(self.model)
if context is not None:
mainlogger.info(f"{context}: Switched to EMA weights")
try:
yield None
finally:
if self.use_ema:
self.model_ema.restore(self.model.parameters())
if context is not None:
mainlogger.info(f"{context}: Restored training weights")
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
sd = torch.load(path, map_location="cpu")
if "state_dict" in list(sd.keys()):
sd = sd["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
mainlogger.info("Deleting key {} from state_dict.".format(k))
del sd[k]
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
sd, strict=False)
mainlogger.info(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
if len(missing) > 0:
mainlogger.info(f"Missing Keys: {missing}")
if len(unexpected) > 0:
mainlogger.info(f"Unexpected Keys: {unexpected}")
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def predict_start_from_noise(self, x_t, t, noise):
return (
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, t, clip_denoised: bool):
model_out = self.model(x, t)
if self.parameterization == "eps":
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
elif self.parameterization == "x0":
x_recon = model_out
if clip_denoised:
x_recon.clamp_(-1., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance
@torch.no_grad()
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
b, *_, device = *x.shape, x.device
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
noise = noise_like(x.shape, device, repeat_noise)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@torch.no_grad()
def p_sample_loop(self, shape, return_intermediates=False):
device = self.betas.device
b = shape[0]
img = torch.randn(shape, device=device)
intermediates = [img]
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
clip_denoised=self.clip_denoised)
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
intermediates.append(img)
if return_intermediates:
return img, intermediates
return img
@torch.no_grad()
def sample(self, batch_size=16, return_intermediates=False):
image_size = self.image_size
channels = self.channels
return self.p_sample_loop((batch_size, channels, image_size, image_size),
return_intermediates=return_intermediates)
def q_sample(self, x_start, t, noise=None):
| """
wild mixture of
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/CompVis/taming-transformers
-- merci
"""
mainlogger = logging.getLogger('mainlogger')
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor=None,
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
):
super().__init__()
assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
mainlogger.info(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.channels = channels
self.temporal_length = unet_config.params.temporal_length
self.image_size = image_size # try conv?
if isinstance(self.image_size, int):
self.image_size = [self.image_size, self.image_size]
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
#count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
mainlogger.info(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
self.loss_type = loss_type
self.learn_logvar = learn_logvar
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
if self.learn_logvar:
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if exists(given_betas):
betas = given_betas
else:
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
cosine_s=cosine_s)
alphas = 1. - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.linear_start = linear_start
self.linear_end = linear_end
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
to_torch = partial(torch.tensor, dtype=torch.float32)
self.register_buffer('betas', to_torch(betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
1. - alphas_cumprod) + self.v_posterior * betas
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer('posterior_variance', to_torch(posterior_variance))
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
self.register_buffer('posterior_mean_coef1', to_torch(
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
self.register_buffer('posterior_mean_coef2', to_torch(
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
if self.parameterization == "eps":
lvlb_weights = self.betas ** 2 / (
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
elif self.parameterization == "x0":
lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
else:
raise NotImplementedError("mu not supported")
# TODO how to choose this term
lvlb_weights[0] = lvlb_weights[1]
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
assert not torch.isnan(self.lvlb_weights).all()
@contextmanager
def ema_scope(self, context=None):
if self.use_ema:
self.model_ema.store(self.model.parameters())
self.model_ema.copy_to(self.model)
if context is not None:
mainlogger.info(f"{context}: Switched to EMA weights")
try:
yield None
finally:
if self.use_ema:
self.model_ema.restore(self.model.parameters())
if context is not None:
mainlogger.info(f"{context}: Restored training weights")
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
sd = torch.load(path, map_location="cpu")
if "state_dict" in list(sd.keys()):
sd = sd["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
mainlogger.info("Deleting key {} from state_dict.".format(k))
del sd[k]
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
sd, strict=False)
mainlogger.info(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
if len(missing) > 0:
mainlogger.info(f"Missing Keys: {missing}")
if len(unexpected) > 0:
mainlogger.info(f"Unexpected Keys: {unexpected}")
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def predict_start_from_noise(self, x_t, t, noise):
return (
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, t, clip_denoised: bool):
model_out = self.model(x, t)
if self.parameterization == "eps":
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
elif self.parameterization == "x0":
x_recon = model_out
if clip_denoised:
x_recon.clamp_(-1., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance
@torch.no_grad()
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
b, *_, device = *x.shape, x.device
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
noise = noise_like(x.shape, device, repeat_noise)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@torch.no_grad()
def p_sample_loop(self, shape, return_intermediates=False):
device = self.betas.device
b = shape[0]
img = torch.randn(shape, device=device)
intermediates = [img]
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
clip_denoised=self.clip_denoised)
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
intermediates.append(img)
if return_intermediates:
return img, intermediates
return img
@torch.no_grad()
def sample(self, batch_size=16, return_intermediates=False):
image_size = self.image_size
channels = self.channels
return self.p_sample_loop((batch_size, channels, image_size, image_size),
return_intermediates=return_intermediates)
def q_sample(self, x_start, t, noise=None): | noise = default(noise, lambda: torch.randn_like(x_start)) | 1 | 2023-12-06 07:27:45+00:00 | 12k |
TianxingWu/FreeInit | examples/AnimateDiff/animatediff/models/unet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "examples/AnimateDiff/animatediff/models/unet_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n output_states = ()\n\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "examples/AnimateDiff/animatediff/models/unet_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n ):\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "examples/AnimateDiff/animatediff/models/unet_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None):\n output_states = ()\n\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "examples/AnimateDiff/animatediff/models/unet_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n ]\n attentions = []\n motion_modules = []\n\n for _ in range(num_layers):\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n in_channels // attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=in_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "examples/AnimateDiff/animatediff/models/unet_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,):\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "examples/AnimateDiff/animatediff/models/unet_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n):\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "examples/AnimateDiff/animatediff/models/unet_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
},
{
"identifier": "InflatedConv3d",
"path": "examples/AnimateDiff/animatediff/models/resnet.py",
"snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
},
{
"identifier": "InflatedGroupNorm",
"path": "examples/AnimateDiff/animatediff/models/resnet.py",
"snippet": "class InflatedGroupNorm(nn.GroupNorm):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
}
] | from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.modeling_utils import ModelMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from .unet_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d, InflatedGroupNorm
from diffusers.utils import WEIGHTS_NAME
import os
import json
import pdb
import torch
import torch.nn as nn
import torch.utils.checkpoint | 8,036 | in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
use_inflated_groupnorm=False,
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn":
| # Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
use_inflated_groupnorm=False,
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn": | self.mid_block = UNetMidBlock3DCrossAttn( | 3 | 2023-12-12 13:11:24+00:00 | 12k |
allenai/unified-io-2 | t5x/precompile.py | [
{
"identifier": "models",
"path": "t5x/models.py",
"snippet": "class TokensIdsToLogitsCallable(typing_extensions.Protocol):\nclass DecodeFnCallable(typing_extensions.Protocol):\nclass BaseModel(abc.ABC):\nclass BaseTransformerModel(BaseModel):\nclass EncoderDecoderModel(BaseTransformerModel):\nclass DecoderOnlyModel(BaseTransformerModel):\n def __call__(\n self, decoding_state: decoding.DecodingState\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def __call__(self, *, inputs: jnp.ndarray, cache: Mapping[str, jnp.ndarray],\n tokens_to_logits: TokensIdsToLogitsCallable, eos_id: int,\n num_decodes: int, decode_rng: Optional[jax.random.KeyArray],\n cache_offset: int, **kwargs) -> Tuple[jnp.ndarray, jnp.ndarray]:\n def __init__(self, optimizer_def: optimizers.OptimizerDefType):\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def eval_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def predict_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None) -> jnp.ndarray:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def __init__(\n self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: Optional[DecodeFnCallable] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[Union[\n float, int, str, losses.SpecialLossNormalizingFactor]] = None,\n ):\n def input_vocabulary(self):\n def output_vocabulary(self):\n def decode_fn(self):\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None) -> jnp.ndarray:\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def _compute_metrics(\n self,\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n mask: jnp.ndarray,\n loss: jnp.ndarray,\n z_loss: Optional[jnp.ndarray] = None,\n segment_ids: Optional[Mapping[str, jnp.ndarray]] = None,\n ) -> MetricsMap:\n def __init__(\n self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: DecodeFnCallable = decoding.beam_search,\n feature_converter_cls: Optional[Callable[...,\n seqio.FeatureConverter]] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None,\n ):\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None,\n mutable: flax_scope.CollectionFilter = False,\n other_variables: Optional[PyTreeDef] = None,\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, flax_scope.FrozenVariableDict]]:\n def _compute_logits_from_slice(\n self, decoding_state: decoding.DecodingState, params: PyTreeDef,\n encoded_inputs: jnp.ndarray, raw_inputs: jnp.ndarray,\n max_decode_length: int) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n prompt_with_targets: bool = False\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False,\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, Mapping[str, Any]]]:\n def __init__(\n self,\n module: nn.Module,\n vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: DecodeFnCallable = decoding.temperature_sample,\n inputs_bidirectional_attention: bool = False,\n feature_converter_cls: Optional[Callable[...,\n seqio.FeatureConverter]] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None,\n ):\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def _get_decoder_causal_attention(self, batch):\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None,\n mutable: flax_scope.CollectionFilter = False) -> jnp.ndarray:\n def _compute_logits_from_slice(\n self,\n decoding_state: decoding.DecodingState,\n params: PyTreeDef,\n max_decode_length: int,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n def _compute_kv_cache(\n self,\n params: PyTreeDef,\n inputs: jnp.ndarray,\n inputs_lengths: jnp.ndarray,\n decoder_causal_attention: jnp.ndarray,\n ) -> PyTreeDef:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n *,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\ndef remove_prefix(sequence: jnp.ndarray,\n prefix_length: jnp.ndarray) -> jnp.ndarray:\ndef compute_weighted_accuracy(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n weights: Optional[jnp.ndarray] = None) -> Tuple[jnp.ndarray, jnp.ndarray]:\ndef compute_metrics(logits: jnp.ndarray, targets: jnp.ndarray,\n weights: jnp.ndarray, loss: jnp.ndarray,\n weight_sum: jnp.ndarray,\n additional_metrics: MetricsMap) -> MetricsMap:\ndef compute_base_metrics(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n mask: jnp.ndarray,\n loss: jnp.ndarray,\n z_loss: Optional[jnp.ndarray] = None,\n segment_ids: Optional[Mapping[str, jnp.ndarray]] = None,\n) -> MetricsMap:\ndef get_input_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\ndef get_output_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\n FEATURE_CONVERTER_CLS: Callable[..., seqio.FeatureConverter]\n FEATURE_CONVERTER_CLS = seqio.EncDecFeatureConverter\n FEATURE_CONVERTER_CLS = seqio.DecoderFeatureConverter"
},
{
"identifier": "partitioning",
"path": "t5x/partitioning.py",
"snippet": "class AxisNames(tuple):\nclass LocalChunkInfo:\nclass LocalChunker:\nclass DataLayout:\nclass BasePartitioner(metaclass=abc.ABCMeta):\nclass PjittedFnWithContext(PartitionedCallable):\nclass BasePjitPartitioner(BasePartitioner):\nclass PjitPartitioner(BasePjitPartitioner):\n def __new__(cls, *names):\n def __repr__(self):\ndef pjit(\n fun: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = (),\n backend: Optional[str] = None):\ndef pjit_with_cpu_fallback(\n fun: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = (),\n backend: Optional[str] = None):\ndef with_sharding_constraint(x, axis_resources):\ndef bounds_from_last_device(\n last_device: jax.lib.xla_client.Device) -> HardwareMesh:\ndef get_coords(device: jax.lib.xla_client.Device) -> HardwareMesh:\ndef global_mesh_defined():\ndef get_mesh(model_parallel_submesh: HardwareMesh,\n input_devices: Sequence[JaxDevice] = (),\n input_local_devices: Sequence[JaxDevice] = (),\n tile_by_host_if_needed: bool = True,\n backend: Optional[str] = None) -> Mesh:\n def dh_dd_mh_md(g: int, m: int, l: int) -> Tuple[int, int, int, int]:\ndef get_cpu_mesh() -> Mesh:\ndef get_gpu_mesh(num_partitions: int) -> Mesh:\ndef default_mesh(num_partitions: int,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n backend: Optional[str] = None) -> Mesh:\n def __init__(self, global_mesh: Mesh):\n def get_local_chunk_info(\n self, global_shape: Tuple[int, ...],\n mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo:\ndef standard_logical_axis_rules(\n activation_partitioning_dims: int = 1,\n parameter_partitioning_dims: int = 1,\n additional_rules: Optional[LogicalAxisRules] = None) -> LogicalAxisRules:\ndef _id_fn(x, ix):\n def __init__(self,\n num_partitions: Optional[int] = None,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n params_on_devices: bool = True,\n backend: Optional[str] = None):\n def mesh(self) -> Mesh:\n def data_partition_spec(self) -> PartitionSpec:\n def get_data_layout(self,\n batch_size: Optional[int] = None,\n host_index: Optional[int] = None) -> DataLayout:\n def get_local_chunk_info(\n self, global_shape: Tuple[int, ...],\n mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo:\n def params_on_devices(self):\n def move_params_to_devices(self, train_state: TrainState,\n train_state_axes: TrainState) -> TrainState:\n def _local_chunker(self):\n def get_logical_axes(self, train_state: TrainState) -> TrainState:\n def get_mesh_axes(self, train_state: TrainState) -> TrainState:\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PartitionedCallable:\n def compile(self, partitioned_fn: PartitionedCallable,\n *args) -> CompiledPartitionedCallable:\n def __init__(self,\n pjitted_fn,\n partition_mesh: Mesh,\n logical_axis_rules: flax_partitioning.LogicalRules = ()):\n def __call__(self, *args):\n def lower(self, *args):\n def _local_chunker(self) -> LocalChunker:\n def mesh(self) -> Mesh:\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PjittedFnWithContext:\n def compile(self, partitioned_fn: PjittedFnWithContext,\n *args) -> CompiledPartitionedCallable:\n def __init__(self,\n num_partitions: Optional[int] = None,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n params_on_devices: bool = True,\n backend: Optional[str] = None,\n logical_axis_rules: Optional[LogicalAxisRules] = None,\n use_cpu_pjit: Optional[bool] = False):\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PjittedFnWithContext:\n def logical_axis_rules(self):\n def get_logical_axes(self, train_state: TrainState) -> TrainState:\n def get_mesh_axes(self, train_state: TrainState) -> TrainState:\n def _logical_to_mesh_axes(param_name, logical_axes):"
},
{
"identifier": "trainer",
"path": "t5x/trainer.py",
"snippet": "def _merge_metrics(a, b):\ndef merge_metrics(a, b):\n def result(self) -> Mapping[str, Array]:\n def result(self) -> Mapping[str, clu.values.Value]:\n def result(self) -> float:\n def __call__(\n self,\n step: jnp.ndarray,\n ) -> jnp.ndarray:\n def __call__(self, metrics: MetricMapType, duration: float,\n num_steps: int) -> Mapping[str, jnp.ndarray]:\n def __call__(\n self, train_state: train_state_lib.TrainState,\n batch: BatchType) -> Tuple[train_state_lib.TrainState, MetricMapType]:\n def __call__(self, train_state: train_state_lib.TrainState,\n batch: jnp.ndarray) -> MetricMapType:\n def compute_metrics(\n self, gradients: ModelWeights,\n old_train_state: train_state_lib.TrainState,\n new_train_state: train_state_lib.TrainState) -> MutableMetricMapType:\n def _make_rms_metrics(name, tree):\n def _make_max_metrics(name, tree):\n def compute_metrics(\n self, gradients: ModelWeights,\n old_train_state: train_state_lib.TrainState,\n new_train_state: train_state_lib.TrainState) -> MutableMetricMapType:\n def __init__(self):\n def close(self):\n def __del__(self):\n def _get_completion_future(self, block_on: PyTreeDef = ()) -> TimeFuture:\n def _get_completion_time():\n def start(self, block_on: PyTreeDef = ()):\n def stop(self, block_on: PyTreeDef = ()) -> TimeFuture:\n def __init__(self, name: str, summary_dir: Optional[str] = None, log_to_wandb=False):\n def __del__(self):\n def close(self):\n def summary_writer(self) -> metric_writers.MetricWriter:\n def write_scalar(self, key: str, val: metric_writers.interface.Scalar,\n step: int):\n def write_scalars(self, step: int,\n scalars: Mapping[str, metric_writers.interface.Scalar]):\n def start_duration_timer(self, block_on: PyTreeDef = ()):\n def write_metrics_summary(self, metrics: MetricMapType, step: int,\n num_steps: int) -> MetricValueMapFuture:\n def _summarize_and_write():\n def _ensure_not_on_device(x):\n def flush(self):\n def __init__(self, model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str], summary_dir: Optional[str],\n train_state_axes: Any, rng: Rng,\n use_wandb=False, packing_strategy=None, log_weights=None):\n def __enter__(self):\n def __exit__(self, exc_type, exc_value, traceback):\n def close(self):\n def _get_step_rng(self, step: int) -> Rng:\n def train_state(self):\n def train_state(self, train_state: PyTreeDef):\n def _weight_metric_fn(self):\n def _get_weight_metrics_fn(_params):\n def train(self,\n batch_iter: Union[Iterator[BatchType],\n clu.data.dataset_iterator.DatasetIterator],\n num_steps: int,\n start_step: Optional[int] = None) -> ArrayMapFuture:\n def compile_train(self, batch: ElementSpec) -> None:\n def eval(\n self, batch_iters: Mapping[str,\n Iterator[BatchType]], pbar_nsteps=None) -> Mapping[str, Array]:\n def compile_eval(self, batches: Mapping[str, BatchType]) -> None:\n def _partitioned_train_step(self) -> PartitionedTrainCallable:\n def _partitioned_eval_step(self) -> PartitionedEvalCallable:\ndef accumulate_grads_microbatched(\n model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n batch: BatchType,\n dropout_rng: Rng,\n num_microbatches: Optional[int],\n data_partition_spec: PartitionSpec = PartitionSpec(\"data\"),\n loss_fn_args=None\n) -> Tuple[train_state_lib.TrainState, MutableMetricMapType,\n def get_microbatch(batch: BatchType, idx: int) -> Mapping[str, jnp.ndarray]:\n def metrics_and_grad(loop_cnt, dropout_rng, flax_mutables=None):\n def per_microbatch_train_step(\n loop_cnt: int, state: Tuple[jnp.ndarray, jnp.ndarray,\n Mapping[str, jnp.ndarray],\n Optional[FlaxMutables]]\n ) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, jnp.ndarray],\ndef apply_grads(\n train_state: train_state_lib.TrainState,\n grad_accum: ModelWeights,\n metrics: MutableMetricMapType,\n learning_rate: jnp.ndarray,\n weight_metrics_computer: Optional[WeightMetricsComputer],\n other_state_variables: Optional[Mapping[str, Any]] = None\n) -> Tuple[train_state_lib.TrainState, MetricMapType]:\ndef eval_step(model: models.BaseModel, train_state: train_state_lib.TrainState,\n batch: jnp.ndarray) -> MetricMapType:\ndef train_with_lr(\n train_state: train_state_lib.TrainState,\n batch: BatchType,\n learning_rate: jnp.ndarray,\n dropout_rng: Rng,\n model: models.BaseModel,\n num_microbatches: Optional[int],\n weight_metrics_computer: Optional[WeightMetricsComputer] = None,\n data_partition_spec: PartitionSpec = PartitionSpec(\"data\"),\n loss_fn_args=None\n):\n def __call__(self, model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str], summary_dir: Optional[str],\n train_state_axes: Any, rng: Rng) -> BaseTrainer:\n def __init__(self,\n model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str],\n summary_dir: Optional[str],\n train_state_axes: Any,\n rng: Rng,\n learning_rate_fn: LearningRateCallable,\n num_microbatches: Optional[int],\n weight_metrics_computer: Optional[WeightMetricsComputer] = None,\n use_wandb=True,\n packing_strategy=None,\n log_weights=False\n ):\n def _partitioned_train_step(self) -> PartitionedTrainCallable:\n def train_step(train_state: train_state_lib.TrainState, batch: BatchType, static_args=None):\n def _partitioned_eval_step(self) -> PartitionedEvalCallable:\ndef _warn_action_not_run(action, task, metric):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\n def __init__(self,\n metric: Tuple[str, str],\n mode: str,\n patience: int = 3,\n atol: float = 0.,\n rtol: float = 0.):\n def _compare_fn(self, current, previous):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\n def __init__(self, task: str, metric: str = \"loss\"):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\nclass ArrayMapFuture(typing_extensions.Protocol):\nclass MetricValueMapFuture(typing_extensions.Protocol):\nclass TimeFuture(typing_extensions.Protocol):\nclass LearningRateCallable(typing_extensions.Protocol):\nclass SummarizeMetricsCallable(typing_extensions.Protocol):\nclass PartitionedTrainCallable(typing_extensions.Protocol):\nclass PartitionedEvalCallable(typing_extensions.Protocol):\nclass GradNormComputer(object):\nclass WeightMetricsComputer(object):\nclass _AsyncTimer(object):\nclass MetricsManager(object):\nclass PreemptionError(Exception):\nclass BaseTrainer(abc.ABC):\nclass BaseTrainerConstructor(Protocol):\nclass Trainer(BaseTrainer):\nclass ActionMode(enum.Enum):\nclass BaseAction(abc.ABC):\nclass EarlyStoppingAction(BaseAction):\nclass TerminateOnNanAction(BaseAction):\n _WEIGHT_METRICS = [\n \"weight_rms\", \"weight_gradient_rms\", \"weight_update_rms\", \"weight_max\"\n ]\n TRAIN = 1\n TRAIN_EVAL = 2\n INFER_EVAL = 3"
},
{
"identifier": "utils",
"path": "t5x/utils.py",
"snippet": "class EvaluatorConstructor(typing_extensions.Protocol):\nclass SaveCheckpointConfig:\nclass RestoreCheckpointConfig:\nclass CheckpointConfig:\nclass LegacyCheckpointer(orbax.checkpoint.Checkpointer):\nclass LegacyCheckpointManager(orbax.checkpoint.CheckpointManager):\nclass DatasetConfig:\nclass GDADatasetIterator(clu.data.dataset_iterator.DatasetIterator):\nclass InitFnCallable(typing_extensions.Protocol):\nclass LearningRateCallable(typing_extensions.Protocol):\nclass TrainStateInitializer:\nclass InferStepWithRngCallable(typing_extensions.Protocol):\nclass InferStepWithoutRngCallable(typing_extensions.Protocol):\nclass InferFnCallable(typing_extensions.Protocol):\nclass GetDatasetCallable(typing_extensions.Protocol):\nclass GetEvalDatasetCallable(typing_extensions.Protocol):\nclass _RegexMap(collections.abc.Mapping):\n def __call__(\n self,\n mixture_or_task_name: str,\n feature_converter: seqio.FeatureConverter,\n eval_split: str,\n use_cached: bool,\n seed: Optional[int],\n sequence_length: Optional[Mapping[str, int]],\n log_dir: Optional[str],\n use_memory_cache: bool,\n ) -> seqio.Evaluator:\n def __post_init__(self):\n def __post_init__(self):\n def __init__(self,\n *,\n save_checkpointer: Optional[checkpoints.Checkpointer] = None,\n restore_checkpointer: checkpoints.Checkpointer,\n strict: Optional[bool] = False):\n async def async_save(self, path: str, item: Any):\n async def async_restore(self, path: str, item: Optional[Any] = None) -> Any:\n def save(self,\n path: str,\n item: train_state_lib.TrainState,\n state_transformation_fns: Sequence[\n checkpoints.SaveStateTransformationFn] = (),\n *,\n concurrent_gb: int = 128):\n def restore(self,\n path: str,\n item: Optional[train_state_lib.TrainState],\n state_transformation_fns: Sequence[\n checkpoints.RestoreStateTransformationFn] = (),\n fallback_state: Optional[Mapping[str, Any]] = None,\n lazy_parameters: bool = False) -> train_state_lib.TrainState:\n def __init__(self,\n *,\n save_cfg: Optional[SaveCheckpointConfig] = None,\n restore_cfg: RestoreCheckpointConfig,\n train_state_shape: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n ds_iter: Optional[\n Union[tf.data.Iterator,\n clu.data.dataset_iterator.DatasetIterator]] = None,\n model_dir: Optional[str] = None,\n use_gda: Optional[bool] = True):\n def save(self,\n train_state: train_state_lib.TrainState,\n state_transformation_fns: Sequence[\n checkpoints.SaveStateTransformationFn] = ()):\n def restore(\n self,\n paths: Sequence[str],\n restore_cfg: RestoreCheckpointConfig,\n fallback_state: Optional[Mapping[str, Any]] = None\n ) -> Union[train_state_lib.TrainState, Sequence[train_state_lib.TrainState]]:\ndef _get_index_mappings(device_to_idxs):\ndef _create_gda(partitioner: partitioning.BasePartitioner,\n global_shapes: PyTreeDef, host_arrays: PyTreeDef) -> PyTreeDef:\n def _put_to_devices(x, global_shape):\n def _gda(dbs, global_shape):\n def __init__(self, iterator: clu.data.dataset_iterator.DatasetIterator,\n partitioner: partitioning.BasePartitioner,\n global_shapes: PyTreeDef):\n def __next__(self):\n def reset(self):\n def element_spec(self):\n def save(self, filename):\n def restore(self, filename):\n def iterator(self):\ndef sync_global_devices(name: str) -> None:\ndef multihost_assert_equal(input_tree, fail_message: str = ''):\ndef _hardware_uniform(\n rng_key: Array,\n shape: Shape,\n dtype: jnp.dtype = np.float32,\n minval: Array = np.float32(0),\n maxval: Array = np.float32(1)\n) -> Array:\ndef _hardware_bernoulli(\n rng_key: Array, p: np.ndarray = np.float32(0.5),\n shape: Shape = ()) -> Array:\ndef set_hardware_rng_ops():\ndef get_zeros_batch_like_spec(\n batch_spec: Mapping[str,\n jax.ShapeDtypeStruct]) -> Mapping[str, jnp.ndarray]:\ndef get_zeros_batch_like_dataset(dataset: tf.data.Dataset,\n batch_size=None) -> Mapping[str, jnp.ndarray]:\n def __call__(\n self, rng: Array, input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str,\n DType]]) -> flax_scope.FrozenVariableDict:\n def __call__(self, step: jnp.ndarray) -> jnp.ndarray:\ndef create_learning_rate_scheduler(\n factors: str = 'constant * linear_warmup * rsqrt_decay',\n base_learning_rate: float = 0.5,\n warmup_steps: int = 1000,\n decay_factor: float = 0.5,\n steps_per_decay: int = 20000,\n steps_per_cycle: int = 100000,\n step_offset: int = 0,\n min_learning_rate: float = 1e-8) -> LearningRateCallable:\n def step_fn(step: jnp.ndarray) -> jnp.ndarray:\ndef steps(prefix, config, data_size=None, batch_size=None, default=ValueError):\ndef create_vision_learning_rate_scheduler(\n total_steps, batch_size=None, data_size=None,\n base=1.0, decay_type=\"stair\",\n scale_with_batchsize=False, **kw):\n def step_fn(step):\ndef get_first_valid_restore_config_and_paths(\n restore_cfgs: Sequence[RestoreCheckpointConfig]\n) -> Tuple[Optional[RestoreCheckpointConfig], Sequence[str]]:\ndef get_fallback_state(restore_cfg: RestoreCheckpointConfig,\n init_fn: Callable[[jnp.ndarray], Mapping[str, Any]],\n init_rng: jnp.ndarray) -> Optional[Mapping[str, Any]]:\n def __init__(self,\n optimizer_def: Optional[optimizers.OptimizerDefType],\n init_fn: InitFnCallable,\n input_shapes: Mapping[str, Array],\n partitioner: partitioning.BasePartitioner,\n model=None,\n input_types: Optional[Mapping[str, DType]] = None):\n def initialize_train_state(rng: Array):\n def from_scratch(self, init_rng: Array) -> train_state_lib.TrainState:\n def from_checkpoints(\n self,\n restore_cfgs: Sequence[RestoreCheckpointConfig],\n ds_iter: Optional[tf.data.Iterator] = None,\n init_rng: Optional[jnp.ndarray] = None,\n ) -> Iterable[train_state_lib.TrainState]:\n def _restore_path(path, cfg):\n def from_checkpoint(\n self,\n ckpt_cfgs: Sequence[RestoreCheckpointConfig],\n *,\n ds_iter: Optional[tf.data.Iterator] = None,\n init_rng: Optional[jnp.ndarray] = None\n ) -> Optional[train_state_lib.TrainState]:\n def from_checkpoint_or_scratch(\n self,\n ckpt_cfgs: Sequence[RestoreCheckpointConfig],\n *,\n init_rng: Array,\n ds_iter: Optional[tf.data.Iterator] = None) -> train_state_lib.TrainState:\ndef log_model_info(log_file: Optional[str],\n full_train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner):\n def _log_info_and_write_to_file(writer, format_str, *args):\n def _log_variable(name: str, arr: Optional[np.ndarray],\n logical_axes: Optional[partitioning.AxisNames],\n mesh_axes: Optional[partitioning.PartitionSpec]):\n def __call__(self,\n params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray],\n rng: jnp.ndarray = None) -> PyTreeDef:\n def __call__(self, params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray]) -> PyTreeDef:\n def __call__(\n self,\n ds: tf.data.Dataset,\n train_state: train_state_lib.TrainState,\n rng: Optional[jnp.ndarray] = None\n ) -> Union[_InferFnResult, _InferFnWithAuxResult]:\ndef _remove_padding(all_inferences, all_indices):\ndef get_infer_fn(infer_step: InferStepCallable, batch_size: int,\n train_state_axes: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner, \n pbar=False) -> InferFnCallable:\n def infer_step_with_indices(params, batch, rng, indices):\n def infer_fn(ds: tf.data.Dataset,\n train_state: train_state_lib.TrainState,\n rng: Optional[jnp.ndarray] = None):\n def _copy_to_host_async(x):\ndef import_module(module: str):\ndef get_vocabulary(\n cfg: DatasetConfig) -> Tuple[seqio.Vocabulary, seqio.Vocabulary]:\ndef verify_matching_vocabs(cfg: DatasetConfig, model: Any):\ndef get_dataset(cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n num_epochs: Optional[int] = None,\n continue_from_last_checkpoint: bool = False,\n batching_fn=None) -> tf.data.Dataset:\ndef get_dataset_inner(cfg: DatasetConfig,\n shard_info: seqio.ShardInfo,\n feature_converter_cls: Callable[...,\n seqio.FeatureConverter],\n seed: Optional[int] = None,\n num_epochs: Optional[int] = None,\n batching_fn=None\n ):\n def __call__(\n self,\n cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n num_epochs: Optional[int] = None,\n continue_from_last_checkpoint: bool = True\n ) -> Union[clu.data.dataset_iterator.DatasetIterator, tf.data.Dataset]:\n def __call__(\n self, cfg: DatasetConfig, shard_id: int, num_shards: int, eval_steps: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter]\n ) -> Mapping[str, tf.data.Dataset]:\ndef get_training_eval_datasets(\n cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n eval_steps: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n deterministic: bool = False,\n model_dir: Optional[str] = None,\n start_step: int = 0,\n) -> Mapping[str, tf.data.Dataset]:\n def _repeat_shard_batch_take_cache(ds: tf.data.Dataset):\ndef round_vocab_size_to_multiple(vocabulary: seqio.Vocabulary,\n divisor: int = 128):\ndef flatten_dict_string_keys(x):\ndef flatten_lists(lsts: Iterable[Iterable]) -> Sequence:\n def __init__(self, kvs: Sequence[Tuple[str, Any]]):\n def __getitem__(self, key: str) -> Any:\n def __len__(self) -> int:\n def __iter__(self) -> Iterable[Tuple[re.Pattern, Any]]:\ndef override_params_axes_names(\n model_variables: flax_scope.FrozenVariableDict,\n params_axes_names_override: Sequence[Tuple[str, Tuple[str, ...]]] = ()\n) -> flax_scope.FrozenVariableDict:\ndef get_local_data(x):"
}
] | import os
import clu.data
import jax
import numpy as np
import t5.data.mixtures # pylint:disable=unused-import
import tensorflow as tf
from typing import Callable, Optional
from jax import random
from t5x import models
from t5x import partitioning
from t5x import trainer as trainer_lib
from t5x import utils | 8,427 | # Copyright 2022 The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Precompile and generates HLO from TPU metadata backend.
TPU Metadata backend is a TPU backend without real TPU devices while supporting
any TPU topologies, to allow work that doesn't require real TPUs to run as if
it is, e.g., compiling/lowering a HLO graph with the backend.
Ideally, the precompile defaults to cpu backend for default device array
placement since metadata backend does not have memory allocation.
The pjit function is pinned to use available TPU Metadata backend, for getting
a proper lowering under TPU mesh.
"""
def precompile(
*,
| # Copyright 2022 The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Precompile and generates HLO from TPU metadata backend.
TPU Metadata backend is a TPU backend without real TPU devices while supporting
any TPU topologies, to allow work that doesn't require real TPUs to run as if
it is, e.g., compiling/lowering a HLO graph with the backend.
Ideally, the precompile defaults to cpu backend for default device array
placement since metadata backend does not have memory allocation.
The pjit function is pinned to use available TPU Metadata backend, for getting
a proper lowering under TPU mesh.
"""
def precompile(
*, | model: models.BaseTransformerModel, | 0 | 2023-12-12 20:23:33+00:00 | 12k |
SafeAILab/EAGLE | train/main.py | [
{
"identifier": "Model",
"path": "model/cnets.py",
"snippet": "class Model(nn.Module):\r\n def __init__(self,config,load_emb=False,path=None):\r\n super().__init__()\r\n\r\n\r\n\r\n\r\n self.gradient_checkpointing = True\r\n self.padding_idx = config.pad_token_id\r\n self.vocab_size = config.vocab_size\r\n\r\n self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)\r\n if load_emb:\r\n from safetensors import safe_open\r\n import json\r\n try:\r\n with open(os.path.join(path,\"model.safetensors.index.json\"),\"r\") as f:\r\n index_json=json.loads(f.read())\r\n emb_path=index_json[\"weight_map\"][\"model.embed_tokens.weight\"]\r\n with safe_open(os.path.join(path,emb_path),\r\n framework=\"pt\",\r\n device=\"cpu\") as f:\r\n tensor_slice = f.get_slice(\"model.embed_tokens.weight\")\r\n vocab_size, hidden_dim = tensor_slice.get_shape()\r\n tensor = tensor_slice[:, :hidden_dim].float()\r\n except:\r\n with open(os.path.join(path, \"pytorch_model.bin.index.json\"), \"r\") as f:\r\n index_json = json.loads(f.read())\r\n emb_path = index_json[\"weight_map\"][\"model.embed_tokens.weight\"]\r\n weights=torch.load(os.path.join(path,emb_path))\r\n tensor=weights[\"model.embed_tokens.weight\"].float()\r\n self.embed_tokens.weight.data = tensor\r\n\r\n\r\n #self.init_tree()\r\n\r\n self.layers = nn.ModuleList([LlamaDecoderLayer(config,index) for index in range(config.num_hidden_layers)])\r\n self.fc=nn.Linear(2*config.hidden_size,config.hidden_size)\r\n self.act=ACT2FN[config.hidden_act]\r\n for param in self.embed_tokens.parameters():\r\n param.requires_grad = False\r\n\r\n\r\n def init_tree(self):\r\n self.tree = mc_sim_7b_63\r\n self.tree_buffer=generate_tree_buffers(self.tree,self.embed_tokens.weight.device)\r\n\r\n\r\n def reset(self):\r\n self.tree_mask=None\r\n\r\n\r\n def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):\r\n # create causal mask\r\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\r\n combined_attention_mask = None\r\n if input_shape[-1] > 1:\r\n combined_attention_mask = _make_causal_mask(\r\n input_shape,\r\n #inputs_embeds.dtype,\r\n torch.float32, # [MODIFIED] force to cast to float32\r\n device=inputs_embeds.device,\r\n past_key_values_length=past_key_values_length,\r\n )\r\n\r\n if attention_mask is not None:\r\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\r\n expanded_attn_mask = _expand_mask(attention_mask, torch.float32, tgt_len=input_shape[-1]).to(\r\n inputs_embeds.device\r\n )\r\n combined_attention_mask = (\r\n expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask\r\n )\r\n\r\n # [MODIFIED] add tree mask\r\n if hasattr(self, \"tree_mask\") and self.tree_mask is not None:\r\n tree_mask = self.tree_mask\r\n tree_len = tree_mask.size(-1)\r\n combined_attention_mask[:, :, -tree_len:, -tree_len:][\r\n tree_mask == 0\r\n ] = torch.finfo(torch.float32).min\r\n\r\n\r\n return combined_attention_mask\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n input_ids,\r\n attention_mask: Optional[torch.Tensor] = None,\r\n position_ids: Optional[torch.LongTensor] = None,\r\n past_key_values: Optional[List[torch.FloatTensor]] = None,\r\n inputs_embeds: Optional[torch.FloatTensor] = None,\r\n use_cache: Optional[bool] = None,\r\n output_attentions: Optional[bool] = None,\r\n output_hidden_states: Optional[bool] = None,\r\n return_dict: Optional[bool] = None,\r\n std=None\r\n ):\r\n batch_size, seq_length, _ = hidden_states.shape\r\n seq_length_with_past = seq_length\r\n past_key_values_length = 0\r\n\r\n with torch.no_grad():\r\n inputs_embeds = self.embed_tokens(input_ids)\r\n #inputs_embeds = inputs_embeds.detach()\r\n\r\n # if std is not None:\r\n # noise = torch.randn(inputs_embeds.size(),device=inputs_embeds.device) * std\r\n # inputs_embeds=inputs_embeds+noise\r\n\r\n if past_key_values is not None:\r\n past_key_values_length = past_key_values[0][0].shape[2]\r\n seq_length_with_past = seq_length_with_past + past_key_values_length\r\n if position_ids is None:\r\n device = hidden_states.device if hidden_states is not None else inputs_embeds.device\r\n position_ids = torch.arange(\r\n past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device\r\n )\r\n position_ids = position_ids.unsqueeze(0).view(-1, seq_length)\r\n else:\r\n position_ids = position_ids.view(-1, seq_length).long()\r\n\r\n if attention_mask is None:\r\n attention_mask = torch.ones(\r\n (batch_size, seq_length_with_past), dtype=torch.bool, device=hidden_states.device\r\n )\r\n attention_mask = self._prepare_decoder_attention_mask(\r\n attention_mask, (batch_size, seq_length), hidden_states, past_key_values_length\r\n )\r\n\r\n # if self.gradient_checkpointing and self.training:\r\n # if use_cache:\r\n # use_cache = False\r\n\r\n\r\n #hidden_states=self.act(self.fc(torch.cat((inputs_embeds,hidden_states),dim=-1)))\r\n inputs_embeds=inputs_embeds.to(hidden_states.dtype)\r\n hidden_states = self.fc(torch.cat((inputs_embeds, hidden_states), dim=-1))\r\n\r\n\r\n all_hidden_states = () if output_hidden_states else None\r\n next_decoder_cache = () if use_cache else None\r\n\r\n for idx, decoder_layer in enumerate(self.layers):\r\n if output_hidden_states:\r\n all_hidden_states += (hidden_states,)\r\n\r\n past_key_value = past_key_values[idx] if past_key_values is not None else None\r\n\r\n if self.gradient_checkpointing and self.training:\r\n\r\n def create_custom_forward(module):\r\n def custom_forward(*inputs):\r\n # None for past_key_value\r\n return module(*inputs, past_key_value, output_attentions)\r\n\r\n return custom_forward\r\n\r\n layer_outputs = torch.utils.checkpoint.checkpoint(\r\n create_custom_forward(decoder_layer),\r\n hidden_states,\r\n attention_mask,\r\n position_ids,\r\n )\r\n else:\r\n layer_outputs = decoder_layer(\r\n hidden_states,\r\n attention_mask=attention_mask,\r\n position_ids=position_ids,\r\n past_key_value=past_key_value,\r\n output_attentions=output_attentions,\r\n use_cache=use_cache,\r\n )\r\n\r\n hidden_states = layer_outputs[0]\r\n\r\n if use_cache:\r\n next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)\r\n\r\n if use_cache:\r\n return hidden_states,next_decoder_cache\r\n\r\n return hidden_states\r\n\r\n @torch.no_grad()\r\n def generate(self,hidden_states,input_ids,head,max_length=4,use_cache=False):\r\n return_input_ids=copy.deepcopy(input_ids[0].tolist())\r\n input_ids=input_ids[:,1:]\r\n\r\n #input_ids=input_ids.to(hidden_states.device)\r\n if use_cache:\r\n past_key_values=None\r\n for i in range(max_length):\r\n if past_key_values!=None:\r\n out_hidden,past_key_values = self(out_hidden[:, -1:], input_ids=torch.tensor([[token]]).to(input_ids.device),past_key_values=past_key_values,use_cache=True)\r\n else:\r\n out_hidden, past_key_values = self(hidden_states, input_ids=input_ids,use_cache=True)\r\n last_hidden = out_hidden[:, -1]\r\n last_headout = head(last_hidden)\r\n token = torch.argmax(last_headout)\r\n #input_ids = torch.cat((input_ids, torch.tensor([[token]]).to(input_ids.device)), dim=1)\r\n return_input_ids.append(token.item())\r\n if token == 2:\r\n break\r\n #hidden_states = torch.cat((hidden_states, out_hidden[:, -1:]), dim=1)\r\n else:\r\n for i in range(max_length):\r\n out_hidden=self(hidden_states,input_ids=input_ids)\r\n last_hidden = out_hidden[:, -1]\r\n last_headout = head(last_hidden)\r\n token = torch.argmax(last_headout)\r\n return_input_ids.append(token.item())\r\n input_ids = torch.cat((input_ids, torch.tensor([[token]]).to(input_ids.device)), dim=1)\r\n if token==2:\r\n break\r\n hidden_states = torch.cat((hidden_states, out_hidden[:, -1:]), dim=1)\r\n\r\n return return_input_ids\r\n\r\n @torch.no_grad()\r\n def repeat_kv(self,kv,numr):\r\n newkv=[]\r\n for i in kv:\r\n newkv.append((i[0].repeat(numr,1,1,1),i[1].repeat(numr,1,1,1)))\r\n return tuple(newkv)\r\n\r\n @torch.no_grad()\r\n def reduce_kv(self,kv,numr):\r\n newkv=[]\r\n for i in kv:\r\n newkv.append((i[0][:numr],i[1][:numr]))\r\n return tuple(newkv)\r\n\r\n\r\n def reset_kv(self):\r\n self.stable_kv=None\r\n\r\n @torch.no_grad()\r\n def repeat_hidden(self,hidden_state,repeat_num):\r\n new_hidden=[]\r\n for id,i in enumerate(repeat_num):\r\n new_hidden.append(hidden_state[:,id:id+1].repeat(1,i,1))\r\n return torch.cat(new_hidden,dim=1)\r\n\r\n # @torch.no_grad()\r\n # def sample(self,tensor,k=1,replacement=True):\r\n # probabilities = torch.nn.functional.softmax(tensor, dim=1)\r\n # sampled_indices = torch.multinomial(probabilities, k,replacement=replacement)\r\n # sampled_probs = torch.gather(probabilities, 1, sampled_indices)\r\n #\r\n # return sampled_indices,sampled_probs\r\n\r\n def sample(self,logits, logits_processor,k=1, replacement=False):\r\n logits = logits_processor(None, logits)\r\n probabilities = torch.nn.functional.softmax(logits, dim=1)\r\n sampled_indices = torch.multinomial(probabilities, k, replacement=False)\r\n sampled_probs = torch.gather(probabilities, 1, sampled_indices)\r\n\r\n cumulative_sum = torch.cumsum(sampled_probs, dim=1)\r\n cumulative_sum = torch.cat(\r\n (torch.zeros(cumulative_sum.shape[0], 1, device=cumulative_sum.device), cumulative_sum[:, :-1]), dim=-1)\r\n\r\n sampled_probs = sampled_probs / (1 - cumulative_sum)\r\n sampled_probs[torch.isinf(sampled_probs)] = -1\r\n sampled_probs[torch.isnan(sampled_probs)] = -1\r\n\r\n sampled_probs = torch.clamp(sampled_probs, min=0.0, max=1.0)\r\n\r\n return sampled_indices, sampled_probs,probabilities\r\n\r\n # if replacement:\r\n # sampled_indices = torch.multinomial(probabilities, k, replacement=True)\r\n # sampled_probs = torch.gather(probabilities, 1, sampled_indices)\r\n # return sampled_indices, sampled_probs\r\n # else:\r\n # sampled_indices = torch.multinomial(probabilities, k, replacement=False)\r\n # sampled_probs = torch.gather(probabilities, 1, sampled_indices)\r\n #\r\n # cumulative_sum = torch.cumsum(sampled_probs, dim=1)\r\n # cumulative_sum = torch.cat((torch.zeros(cumulative_sum.shape[0],1, device=cumulative_sum.device), cumulative_sum[:, :-1]),dim=-1)\r\n #\r\n # sampled_probs=sampled_probs/(1-cumulative_sum)\r\n # sampled_probs[torch.isinf(sampled_probs)] = -1\r\n # sampled_probs[torch.isnan(sampled_probs)] = -1\r\n #\r\n # sampled_probs = torch.clamp(sampled_probs, min=0.0, max=1.0)\r\n #\r\n # # has_nan = torch.isnan(sampled_probs).any()\r\n # # if has_nan:\r\n # # print(1)\r\n #\r\n # # sampled_probs_list=sampled_probs[0].tolist()\r\n # # sum_list=[1-sum(sampled_probs_list[:i]) for i in range(len(sampled_probs_list))]\r\n # # for i in range(len(sampled_probs_list)):\r\n # # a=sampled_probs_list[i]/(sum_list[i])\r\n # # if sum_list[i]==0:\r\n # # sampled_probs_list[i]=1.0\r\n # # else:\r\n # # sampled_probs_list[i]=sampled_probs_list[i]/(sum_list[i])\r\n # # sampled_probs=torch.tensor([sampled_probs_list],device=sampled_probs.device)\r\n #\r\n #\r\n #\r\n # return sampled_indices, sampled_probs\r\n\r\n @torch.no_grad()\r\n def topK_genrate(self, hidden_states, input_ids, head, logits_processor,max_length=4, use_cache=True):\r\n # test_=input_ids\r\n # input_ids = torch.tensor([state[1:]])\r\n input_ids = input_ids[:, 1:]\r\n input_ids = input_ids.to(hidden_states.device)\r\n ss_token,ss_prob,ss_op = [],[],[]\r\n len_posi=input_ids.shape[1]\r\n self.reset()\r\n if use_cache:\r\n\r\n\r\n if hasattr(self, \"stable_kv\") and self.stable_kv is not None:\r\n kv_len=self.stable_kv[0][0].shape[2]\r\n out_hidden, past_key_values = self(hidden_states, input_ids=input_ids[:,kv_len:], past_key_values=self.stable_kv,use_cache=True)\r\n else:\r\n out_hidden, past_key_values = self(hidden_states, input_ids=input_ids, use_cache=True)\r\n self.stable_kv=past_key_values\r\n last_hidden = out_hidden[:, -1]\r\n if not self.diff_device:\r\n last_headout = head(last_hidden)\r\n else:\r\n if hasattr(self, \"layer_device\"):\r\n last_headout = head(last_hidden)\r\n last_headout=last_headout.to(self.layer_device)\r\n else:\r\n last_headout=F.linear(last_hidden,self.headweight)\r\n\r\n\r\n\r\n for i in range(len(self.tree_buffer['tree_indices'])):\r\n if logits_processor is not None:\r\n topk_index,topk_prob,op=self.sample(last_headout,logits_processor,k=top_k,)\r\n else:\r\n top=torch.topk(last_headout, top_k, dim=-1)\r\n topk_index,topk_prob = top.indices,top.values\r\n op=None\r\n\r\n ss_token.append(topk_index)\r\n ss_prob.append(topk_prob)\r\n ss_op.append(op)\r\n #topk_index = torch.topk(last_headout, top_k, dim=-1).indices\r\n topk_index = topk_index.view(-1)\r\n select_index=topk_index[self.tree_buffer['tree_indices'][i]]\r\n #len_sq=select_index.shape[0]\r\n input_ids=select_index[None,:]\r\n if i==0:\r\n hidden_states = out_hidden[:, -1:]\r\n else:\r\n hidden_states=out_hidden\r\n hidden_states=self.repeat_hidden(hidden_states,self.tree_buffer[\"repeat_nums\"][i])\r\n #hidden_states = hidden_states.repeat(1,len_sq,1)\r\n self.tree_mask=self.tree_buffer['attn_mask'][i]\r\n position_ids=len_posi+self.tree_buffer[\"position_ids\"][i]\r\n out_hidden, past_key_values = self(hidden_states, input_ids=input_ids, past_key_values=past_key_values,\r\n position_ids=position_ids,use_cache=True)\r\n len_posi += 1\r\n\r\n if not self.diff_device:\r\n last_headout = head(out_hidden[0])\r\n else:\r\n if hasattr(self, \"layer_device\"):\r\n last_headout = head(out_hidden[0])\r\n last_headout = last_headout.to(self.layer_device)\r\n else:\r\n last_headout = F.linear(out_hidden[0], self.headweight)\r\n #last_headout = head(out_hidden[0])\r\n #sslogits.append(last_headout)\r\n #print(select_index)\r\n\r\n if logits_processor is not None:\r\n topk_index,topk_prob,op=self.sample(last_headout,logits_processor,k=top_k,)\r\n else:\r\n top = torch.topk(last_headout, top_k, dim=-1)\r\n topk_index, topk_prob = top.indices, top.values\r\n op=None\r\n ss_token.append(topk_index)\r\n ss_prob.append(topk_prob)\r\n ss_op.append(op)\r\n\r\n else:\r\n # TODO\r\n pass\r\n\r\n return (torch.cat(ss_token),torch.cat(ss_prob),ss_op)\r\n\r\n\r\n\r\n\r\n @torch.no_grad()\r\n def acc(self,data,head,max_length=5):\r\n hidden_states=data[\"hidden_states\"]\r\n input_ids=data[\"input_ids\"]\r\n #attention_mask=data[\"attention_mask\"]\r\n loss_mask=data[\"loss_mask\"]\r\n sample_mask=data[\"sample_mask\"]\r\n target=data[\"target\"]\r\n total=[0 for _ in range(max_length)]\r\n correct=[0 for _ in range(max_length)]\r\n bs,sl=hidden_states.shape[0],hidden_states.shape[1]\r\n target_headout = head(target)\r\n hidden_states_headout=head(hidden_states)\r\n\r\n for i in range(bs):\r\n for j in range(sl):\r\n if loss_mask[i,j]==0:\r\n continue\r\n single_hidden_states=hidden_states[i,:j]\r\n single_input_ids=input_ids[i,:j]\r\n\r\n\r\n single_hidden_states = single_hidden_states[None, :, :]\r\n single_input_ids = single_input_ids[None, :]\r\n for k in range(max_length):\r\n tmp_in_target_headout = hidden_states_headout[i,single_hidden_states.shape[1]-1]\r\n tmp_out_target_headout = target_headout[i, single_hidden_states.shape[1]-1]\r\n target_in_token = torch.argmax(tmp_in_target_headout)\r\n target_out_token = torch.argmax(tmp_out_target_headout)\r\n tmp_token=input_ids[i,single_hidden_states.shape[1]-1]\r\n tmp_sample_mask=sample_mask[i,single_hidden_states.shape[1]-1]\r\n if not (target_in_token==tmp_token):\r\n break\r\n out_hidden = self(single_hidden_states, input_ids=single_input_ids)\r\n last_hidden = out_hidden[:, -1]\r\n last_headout = head(last_hidden)\r\n token = torch.argmax(last_headout)\r\n total[k] += 1\r\n if token==target_out_token:\r\n correct[k]+=1\r\n else:\r\n for kk in range(k,max_length):\r\n total[kk]+=1\r\n break\r\n\r\n single_hidden_states=torch.cat((single_hidden_states,out_hidden[:,-1:]),dim=1)\r\n single_input_ids = torch.cat((single_input_ids, torch.tensor([[token]]).to(single_input_ids.device)), dim=1)\r\n\r\n\r\n acc=[correct[i]/total[i] for i in range(len(correct))]\r\n return acc\r"
},
{
"identifier": "EConfig",
"path": "model/configs.py",
"snippet": "class EConfig(PretrainedConfig):\r\n r\"\"\"\r\n This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA\r\n model according to the specified arguments, defining the model architecture. Instantiating a configuration with the\r\n defaults will yield a similar configuration to that of the LLaMA-7B.\r\n\r\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\r\n documentation from [`PretrainedConfig`] for more information.\r\n\r\n\r\n Args:\r\n vocab_size (`int`, *optional*, defaults to 32000):\r\n Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the\r\n `inputs_ids` passed when calling [`LlamaModel`]\r\n hidden_size (`int`, *optional*, defaults to 4096):\r\n Dimension of the hidden representations.\r\n intermediate_size (`int`, *optional*, defaults to 11008):\r\n Dimension of the MLP representations.\r\n num_hidden_layers (`int`, *optional*, defaults to 32):\r\n Number of hidden layers in the Transformer encoder.\r\n num_attention_heads (`int`, *optional*, defaults to 32):\r\n Number of attention heads for each attention layer in the Transformer encoder.\r\n num_key_value_heads (`int`, *optional*):\r\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\r\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\r\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\r\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\r\n by meanpooling all the original heads within that group. For more details checkout [this\r\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to\r\n `num_attention_heads`.\r\n pretraining_tp (`int`, *optional*, defaults to `1`):\r\n Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this\r\n document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is\r\n necessary to ensure exact reproducibility of the pretraining results. Please refer to [this\r\n issue](https://github.com/pytorch/pytorch/issues/76232).\r\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\r\n The non-linear activation function (function or string) in the decoder.\r\n max_position_embeddings (`int`, *optional*, defaults to 2048):\r\n The maximum sequence length that this model might ever be used with. Typically set this to something large\r\n just in case (e.g., 512 or 1024 or 2048).\r\n initializer_range (`float`, *optional*, defaults to 0.02):\r\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\r\n rms_norm_eps (`float`, *optional*, defaults to 1e-12):\r\n The epsilon used by the rms normalization layers.\r\n use_cache (`bool`, *optional*, defaults to `True`):\r\n Whether or not the model should return the last key/values attentions (not used by all models). Only\r\n relevant if `config.is_decoder=True`.\r\n tie_word_embeddings(`bool`, *optional*, defaults to `False`):\r\n Whether to tie weight embeddings\r\n rope_scaling (`Dict`, *optional*):\r\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling\r\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\r\n is `{\"type\": strategy name, \"factor\": scaling factor}`. When using this flag, don't update\r\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\r\n these scaling strategies behave:\r\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\r\n experimental feature, subject to breaking API changes in future versions.\r\n\r\n Example:\r\n\r\n ```python\r\n >>> from transformers import LlamaModel, LlamaConfig\r\n\r\n >>> # Initializing a LLaMA llama-7b style configuration\r\n >>> configuration = LlamaConfig()\r\n\r\n >>> # Initializing a model from the llama-7b style configuration\r\n >>> model = LlamaModel(configuration)\r\n\r\n >>> # Accessing the model configuration\r\n >>> configuration = model.config\r\n ```\"\"\"\r\n model_type = \"llama\"\r\n keys_to_ignore_at_inference = [\"past_key_values\"]\r\n\r\n def __init__(\r\n self,\r\n vocab_size=32000,\r\n hidden_size=4096,\r\n intermediate_size=11008,\r\n num_hidden_layers=32,\r\n num_attention_heads=32,\r\n num_key_value_heads=None,\r\n hidden_act=\"silu\",\r\n max_position_embeddings=2048,\r\n initializer_range=0.02,\r\n rms_norm_eps=1e-6,\r\n use_cache=True,\r\n pad_token_id=None,\r\n bos_token_id=1,\r\n eos_token_id=2,\r\n pretraining_tp=1,\r\n tie_word_embeddings=False,\r\n rope_scaling=None,\r\n **kwargs,\r\n ):\r\n self.vocab_size = vocab_size\r\n self.max_position_embeddings = max_position_embeddings\r\n self.hidden_size = hidden_size\r\n self.intermediate_size = intermediate_size\r\n self.num_hidden_layers = num_hidden_layers\r\n self.num_attention_heads = num_attention_heads\r\n\r\n # for backward compatibility\r\n if num_key_value_heads is None:\r\n num_key_value_heads = num_attention_heads\r\n\r\n self.num_key_value_heads = num_key_value_heads\r\n self.hidden_act = hidden_act\r\n self.initializer_range = initializer_range\r\n self.rms_norm_eps = rms_norm_eps\r\n self.pretraining_tp = pretraining_tp\r\n self.use_cache = use_cache\r\n self.rope_scaling = rope_scaling\r\n self._rope_scaling_validation()\r\n\r\n super().__init__(\r\n pad_token_id=pad_token_id,\r\n bos_token_id=bos_token_id,\r\n eos_token_id=eos_token_id,\r\n tie_word_embeddings=tie_word_embeddings,\r\n **kwargs,\r\n )\r\n\r\n def _rope_scaling_validation(self):\r\n \"\"\"\r\n Validate the `rope_scaling` configuration.\r\n \"\"\"\r\n if self.rope_scaling is None:\r\n return\r\n\r\n if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:\r\n raise ValueError(\r\n \"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, \"\r\n f\"got {self.rope_scaling}\"\r\n )\r\n rope_scaling_type = self.rope_scaling.get(\"type\", None)\r\n rope_scaling_factor = self.rope_scaling.get(\"factor\", None)\r\n if rope_scaling_type is None or rope_scaling_type not in [\"linear\", \"dynamic\"]:\r\n raise ValueError(\r\n f\"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}\"\r\n )\r\n if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:\r\n raise ValueError(f\"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}\")"
}
] | import argparse
import json
import os
import torch
import numpy as np
import wandb
from safetensors import safe_open
from accelerate import Accelerator
from accelerate.utils import set_seed
from model.cnets import Model
from model.configs import EConfig
from typing import Any, Dict, List
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from transformers import get_linear_schedule_with_warmup, AutoConfig
| 9,193 | max_length = max(item['hidden_state_big'].shape[1] for item in features)
batch_input_ids = torch.cat([self.paddingtensor2D(item['input_ids'], max_length) for item in features])
batch_hidden_states = torch.cat([self.paddingtensor(item['hidden_state_big'], max_length) for item in features])
batch_target = torch.cat([self.paddingtensor(item['target'], max_length) for item in features])
batch_loss_mask = torch.tensor(
[item['loss_mask'] + [0] * (max_length - len(item['loss_mask'])) for item in features])
batch_attention_mask = torch.tensor(
[item['attention_mask'] + [0] * (max_length - len(item['attention_mask'])) for item in features])
# batch_loss_mask = torch.ones_like(batch_loss_mask)
# batch_attention_mask=torch.ones_like(batch_attention_mask)
batch = {
"input_ids": batch_input_ids,
"hidden_states": batch_hidden_states,
"target": batch_target,
"attention_mask": batch_attention_mask,
"loss_mask": batch_loss_mask,
}
return batch
def top_accuracy(output, target, topk=(1,)):
# output.shape (bs, num_classes), target.shape (bs, )
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k)
return res
@torch.no_grad()
def getkacc(model, data, head, max_length=5):
hidden_states = data["hidden_states"]
input_ids = data["input_ids"]
# attention_mask=data["attention_mask"]
loss_mask = data["loss_mask"]
# sample_mask=data["sample_mask"]
target = data["target"]
total = [0 for _ in range(max_length)]
correct = [0 for _ in range(max_length)]
bs, sl = hidden_states.shape[0], hidden_states.shape[1]
target_headout = head(target)
hidden_states_headout = head(hidden_states)
for i in range(bs):
for j in range(sl):
single_hidden_states = hidden_states[i, :j]
single_input_ids = input_ids[i, :j]
single_hidden_states = single_hidden_states[None, :, :]
single_input_ids = single_input_ids[None, :]
for k in range(max_length):
if loss_mask[i, single_hidden_states.shape[1] - 1] == 0:
break
tmp_in_target_headout = hidden_states_headout[i, single_hidden_states.shape[1] - 1]
tmp_out_target_headout = target_headout[i, single_hidden_states.shape[1] - 1]
target_in_token = torch.argmax(tmp_in_target_headout)
target_out_token = torch.argmax(tmp_out_target_headout)
tmp_token = input_ids[i, single_hidden_states.shape[1] - 1]
# tmp_sample_mask=sample_mask[i,single_hidden_states.shape[1]-1]
if not (target_in_token == tmp_token):
break
out_hidden = model(single_hidden_states, input_ids=single_input_ids)
last_hidden = out_hidden[:, -1]
last_headout = head(last_hidden)
token = torch.argmax(last_headout)
total[k] += 1
if token == target_out_token:
correct[k] += 1
else:
for kk in range(k + 1, max_length):
total[kk] += 1
break
single_hidden_states = torch.cat((single_hidden_states, out_hidden[:, -1:]), dim=1)
single_input_ids = torch.cat((single_input_ids, torch.tensor([[token]]).to(single_input_ids.device)),
dim=1)
acc = [correct[i] / total[i] for i in range(len(correct))]
return acc
if train_config["data_noise"]:
if train_config["noise"] == "uniform":
aug = AddUniformNoise(std=train_config["std"])
else:
aug = AddGaussianNoise(mean=train_config["mean"], std=train_config["std"])
else:
aug = None
datapath = list_files(train_config["datapath"])
traindatapath = datapath[:int(len(datapath) * 0.95)]
testdatapath = datapath[int(len(datapath) * 0.95):]
# print('td',train_config["datapath"])
# print(datapath)
# exit()
traindataset = CustomDataset(traindatapath, transform=aug)
testdataset = CustomDataset(testdatapath)
train_loader = DataLoader(traindataset, batch_size=train_config["bs"], shuffle=True,
collate_fn=DataCollatorWithPadding(), num_workers=train_config["num_workers"],
pin_memory=True)
test_loader = DataLoader(testdataset, batch_size=train_config["bs"], shuffle=False,
collate_fn=DataCollatorWithPadding(), num_workers=train_config["num_workers"], pin_memory=True)
# for batch_data in train_loader:
# print(batch_data)
if accelerator.is_main_process:
if not os.path.exists(args.cpdir):
os.makedirs(args.cpdir)
|
parser = argparse.ArgumentParser(description='sp')
parser.add_argument('--basepath', type=str, default='/home/lyh/weights/hf/vicuna_v13/7B/')
parser.add_argument('--configpath', type=str, default="config.json")
parser.add_argument('--lr', type=float, default=3e-5)
parser.add_argument('--bs', type=int, default=4)
parser.add_argument('--gradient-accumulation-steps', type=int, default=8)
parser.add_argument('--tmpdir', type=str, default='0')
parser.add_argument('--outdir', type=str, default='0')
parser.add_argument('--cpdir', type=str, default='0')
args = parser.parse_args()
train_config = {
"lr": args.lr,
"bs": args.bs,
"gradient_accumulation_steps": args.gradient_accumulation_steps,
"datapath": f"{args.tmpdir}",
"is_warmup": True,
"num_epochs": 20,
# Depending on your data and model size, the larger the model, the higher the sample efficiency. We recommend setting it between 20-40.
"num_warmup_steps": 2000,
"total_steps": 800000,
"p_w": 0.1,
"v_w": 1.0,
"head_w": 0.1,
"num_workers": 2,
"embeding": True,
"act": "No",
"data_noise": True,
"noise": "uniform",
"mean": 0.0,
"std": 0.2,
"residual": "true,norm",
"max_len": 2048,
# During training, truncating the training sequences means that the larger the setting, the more training data is used, and the better the effect, but it also consumes more VRAM.
"config_path": args.configpath,
"b1": 0.9,
"b2": 0.95,
"grad_clip": 0.5,
"save_freq": 5
}
# from transformers import AutoModelForCausalLM, AutoTokenizer,AutoModelForSequenceClassification
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
torch.backends.cuda.matmul.allow_tf32 = True
set_seed(0)
accelerator = Accelerator(mixed_precision='bf16',
gradient_accumulation_steps=train_config["gradient_accumulation_steps"])
# import accelerate
if accelerator.is_main_process:
wandb.init(project="ess", entity="yuhui-li", config=train_config)
baseconfig = AutoConfig.from_pretrained(args.basepath)
head = torch.nn.Linear(baseconfig.hidden_size, baseconfig.vocab_size, bias=False)
try:
with open(os.path.join(args.basepath, "model.safetensors.index.json"), "r") as f:
index_json = json.loads(f.read())
head_path = index_json["weight_map"]["lm_head.weight"]
with safe_open(os.path.join(args.basepath, head_path),
framework="pt",
device="cpu") as f:
tensor_slice = f.get_slice("lm_head.weight")
vocab_size, hidden_dim = tensor_slice.get_shape()
tensor = tensor_slice[:, :hidden_dim].float()
except:
with open(os.path.join(args.basepath, "pytorch_model.bin.index.json"), "r") as f:
index_json = json.loads(f.read())
head_path = index_json["weight_map"]["lm_head.weight"]
weights = torch.load(os.path.join(args.basepath, head_path))
tensor = weights["lm_head.weight"].float()
head.weight.data = tensor
head.eval()
for param in head.parameters():
param.requires_grad = False
def list_files(path):
datapath = []
for root, directories, files in os.walk(path):
for file in files:
file_path = os.path.join(root, file)
datapath.append(file_path)
return datapath
class AddGaussianNoise:
def __init__(self, mean=0.0, std=0.0):
self.mean = mean
self.std = std
def __call__(self, data):
tensor = data["hidden_state_big"]
noise = torch.randn(tensor.size()) * self.std + self.mean
noisy_tensor = tensor + noise
data["hidden_state_big"] = noisy_tensor
return data
class AddUniformNoise:
def __init__(self, std=0.0):
self.std = std
def __call__(self, data):
tensor = data["hidden_state_big"]
noise = (torch.rand_like(tensor) - 0.5) * self.std * 512 / tensor.shape[1]
noisy_tensor = tensor + noise
data["hidden_state_big"] = noisy_tensor
return data
class CustomDataset(Dataset):
def __init__(self, datapath, transform=None):
self.data = datapath
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, index):
# try:
data = torch.load(self.data[index])
new_data = {}
hidden_state = data['hidden_state'][:train_config["max_len"]][None, :]
input_ids = data['input_ids'][:train_config["max_len"]][None, :]
loss_mask = data["loss_mask"][:train_config["max_len"]][None, :]
# except:
# with open("error_path.txt", "w") as file:
# file.write(self.data[index])
# print('error path',self.data[index])
length = hidden_state.shape[1]
# length_q = data['query_ids'].shape[1]
attention_mask = [1] * length
loss_mask = loss_mask[0].tolist()
loss_mask[-1] = 0
input_ids_target = input_ids[:, 1:]
zeropadding = torch.tensor([[0]])
input_ids_target = torch.cat((input_ids_target, zeropadding), dim=1)
target = hidden_state[:, 1:, :]
zeropadding = torch.zeros(1, 1, target.shape[2])
target = torch.cat((target, zeropadding), dim=1)
loss_mask[-1] = 0
new_data["attention_mask"] = attention_mask
new_data["loss_mask"] = loss_mask
new_data["target"] = target
new_data["hidden_state_big"] = hidden_state
new_data["input_ids"] = input_ids_target
# sample = torch.cat((data['xs'],data['xb']))
# sample=torch.cat((self.data[index]['x'],self.data[index]['logits']))
# label = data['y']
if self.transform:
new_data = self.transform(new_data)
return new_data
class DataCollatorWithPadding:
def paddingtensor(self, intensors, N):
B, n, S = intensors.shape
# padding_tensor = torch.zeros(B, N - n, S,dtype=intensors.dtype)
padding_tensor = torch.zeros(B, N - n, S)
outtensors = torch.cat((intensors, padding_tensor), dim=1)
return outtensors
def paddingtensor2D(self, intensors, N):
B, n = intensors.shape
padding_tensor = torch.zeros(B, N - n, dtype=intensors.dtype)
outtensors = torch.cat((intensors, padding_tensor), dim=1)
return outtensors
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
max_length = max(item['hidden_state_big'].shape[1] for item in features)
batch_input_ids = torch.cat([self.paddingtensor2D(item['input_ids'], max_length) for item in features])
batch_hidden_states = torch.cat([self.paddingtensor(item['hidden_state_big'], max_length) for item in features])
batch_target = torch.cat([self.paddingtensor(item['target'], max_length) for item in features])
batch_loss_mask = torch.tensor(
[item['loss_mask'] + [0] * (max_length - len(item['loss_mask'])) for item in features])
batch_attention_mask = torch.tensor(
[item['attention_mask'] + [0] * (max_length - len(item['attention_mask'])) for item in features])
# batch_loss_mask = torch.ones_like(batch_loss_mask)
# batch_attention_mask=torch.ones_like(batch_attention_mask)
batch = {
"input_ids": batch_input_ids,
"hidden_states": batch_hidden_states,
"target": batch_target,
"attention_mask": batch_attention_mask,
"loss_mask": batch_loss_mask,
}
return batch
def top_accuracy(output, target, topk=(1,)):
# output.shape (bs, num_classes), target.shape (bs, )
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k)
return res
@torch.no_grad()
def getkacc(model, data, head, max_length=5):
hidden_states = data["hidden_states"]
input_ids = data["input_ids"]
# attention_mask=data["attention_mask"]
loss_mask = data["loss_mask"]
# sample_mask=data["sample_mask"]
target = data["target"]
total = [0 for _ in range(max_length)]
correct = [0 for _ in range(max_length)]
bs, sl = hidden_states.shape[0], hidden_states.shape[1]
target_headout = head(target)
hidden_states_headout = head(hidden_states)
for i in range(bs):
for j in range(sl):
single_hidden_states = hidden_states[i, :j]
single_input_ids = input_ids[i, :j]
single_hidden_states = single_hidden_states[None, :, :]
single_input_ids = single_input_ids[None, :]
for k in range(max_length):
if loss_mask[i, single_hidden_states.shape[1] - 1] == 0:
break
tmp_in_target_headout = hidden_states_headout[i, single_hidden_states.shape[1] - 1]
tmp_out_target_headout = target_headout[i, single_hidden_states.shape[1] - 1]
target_in_token = torch.argmax(tmp_in_target_headout)
target_out_token = torch.argmax(tmp_out_target_headout)
tmp_token = input_ids[i, single_hidden_states.shape[1] - 1]
# tmp_sample_mask=sample_mask[i,single_hidden_states.shape[1]-1]
if not (target_in_token == tmp_token):
break
out_hidden = model(single_hidden_states, input_ids=single_input_ids)
last_hidden = out_hidden[:, -1]
last_headout = head(last_hidden)
token = torch.argmax(last_headout)
total[k] += 1
if token == target_out_token:
correct[k] += 1
else:
for kk in range(k + 1, max_length):
total[kk] += 1
break
single_hidden_states = torch.cat((single_hidden_states, out_hidden[:, -1:]), dim=1)
single_input_ids = torch.cat((single_input_ids, torch.tensor([[token]]).to(single_input_ids.device)),
dim=1)
acc = [correct[i] / total[i] for i in range(len(correct))]
return acc
if train_config["data_noise"]:
if train_config["noise"] == "uniform":
aug = AddUniformNoise(std=train_config["std"])
else:
aug = AddGaussianNoise(mean=train_config["mean"], std=train_config["std"])
else:
aug = None
datapath = list_files(train_config["datapath"])
traindatapath = datapath[:int(len(datapath) * 0.95)]
testdatapath = datapath[int(len(datapath) * 0.95):]
# print('td',train_config["datapath"])
# print(datapath)
# exit()
traindataset = CustomDataset(traindatapath, transform=aug)
testdataset = CustomDataset(testdatapath)
train_loader = DataLoader(traindataset, batch_size=train_config["bs"], shuffle=True,
collate_fn=DataCollatorWithPadding(), num_workers=train_config["num_workers"],
pin_memory=True)
test_loader = DataLoader(testdataset, batch_size=train_config["bs"], shuffle=False,
collate_fn=DataCollatorWithPadding(), num_workers=train_config["num_workers"], pin_memory=True)
# for batch_data in train_loader:
# print(batch_data)
if accelerator.is_main_process:
if not os.path.exists(args.cpdir):
os.makedirs(args.cpdir)
| config = EConfig.from_pretrained(train_config["config_path"])
| 1 | 2023-12-07 19:08:39+00:00 | 12k |
zju3dv/EasyVolcap | scripts/ray_tracing/ray_tracing.py | [
{
"identifier": "log",
"path": "easyvolcap/utils/console_utils.py",
"snippet": "def log(*stuff,\n back=1,\n file: Optional[IO[str]] = None,\n no_prefix=False,\n module_color=blue,\n func_color=green,\n console: Optional[Console] = console,\n **kwargs):\n \"\"\"\n Perform logging using the built in shared logger\n \"\"\"\n writer = console if file is None else Console(file=file, soft_wrap=True, tab_size=4, log_time_format=verbose_time_format) # shared\n writer._log_render.time_format = verbose_time_format if verbose_log else slim_time_format\n if no_prefix or not verbose_log: writer.log(*stuff, _stack_offset=2, **kwargs)\n else: writer.log(get_log_prefix(back + 1, module_color, func_color), *stuff, _stack_offset=2, **kwargs)"
},
{
"identifier": "dotdict",
"path": "easyvolcap/utils/base_utils.py",
"snippet": "class dotdict(dict, Dict[KT, VT]):\n \"\"\"\n This is the default data passing object used throughout the codebase\n Main function: dot access for dict values & dict like merging and updates\n\n a dictionary that supports dot notation \n as well as dictionary access notation \n usage: d = make_dotdict() or d = make_dotdict{'val1':'first'})\n set attributes: d.val2 = 'second' or d['val2'] = 'second'\n get attributes: d.val2 or d['val2']\n \"\"\"\n\n def update(self, dct: Dict = None, **kwargs):\n dct = copy(dct) # avoid modifying the original dict, use super's copy to avoid recursion\n\n # Handle different arguments\n if dct is None:\n dct = kwargs\n elif isinstance(dct, Mapping):\n dct.update(kwargs)\n else:\n super().update(dct, **kwargs)\n return\n\n # Recursive updates\n for k, v in dct.items():\n if k in self:\n\n # Handle type conversions\n target_type = type(self[k])\n if not isinstance(v, target_type):\n # NOTE: bool('False') will be True\n if target_type == bool and isinstance(v, str):\n dct[k] = v == 'True'\n else:\n dct[k] = target_type(v)\n\n if isinstance(v, dict):\n self[k].update(v) # recursion from here\n else:\n self[k] = v\n else:\n if isinstance(v, dict):\n self[k] = dotdict(v) # recursion?\n else:\n self[k] = v\n return self\n\n def __init__(self, *args, **kwargs):\n self.update(*args, **kwargs)\n\n copy = return_dotdict(dict.copy)\n fromkeys = return_dotdict(dict.fromkeys)\n\n # def __hash__(self):\n # # return hash(''.join([str(self.values().__hash__())]))\n # return super(dotdict, self).__hash__()\n\n # def __init__(self, *args, **kwargs):\n # super(dotdict, self).__init__(*args, **kwargs)\n\n \"\"\"\n Uncomment following lines and \n comment out __getattr__ = dict.__getitem__ to get feature:\n \n returns empty numpy array for undefined keys, so that you can easily copy things around\n TODO: potential caveat, harder to trace where this is set to np.array([], dtype=np.float32)\n \"\"\"\n\n def __getitem__(self, key):\n try:\n return dict.__getitem__(self, key)\n except KeyError as e:\n raise AttributeError(e)\n # MARK: Might encounter exception in newer version of pytorch\n # Traceback (most recent call last):\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/queues.py\", line 245, in _feed\n # obj = _ForkingPickler.dumps(obj)\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/reduction.py\", line 51, in dumps\n # cls(buf, protocol).dump(obj)\n # KeyError: '__getstate__'\n # MARK: Because you allow your __getattr__() implementation to raise the wrong kind of exception.\n # FIXME: not working typing hinting code\n __getattr__: Callable[..., 'torch.Tensor'] = __getitem__ # type: ignore # overidden dict.__getitem__\n __getattribute__: Callable[..., 'torch.Tensor'] # type: ignore\n # __getattr__ = dict.__getitem__\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n # TODO: better ways to programmically define these special variables?\n\n @property\n def meta(self) -> dotdict:\n # Special variable used for storing cpu tensor in batch\n if 'meta' not in self:\n self.meta = dotdict()\n return self.__getitem__('meta')\n\n @meta.setter\n def meta(self, meta):\n self.__setitem__('meta', meta)\n\n @property\n def output(self) -> dotdict: # late annotation needed for this\n # Special entry for storing output tensor in batch\n if 'output' not in self:\n self.output = dotdict()\n return self.__getitem__('output')\n\n @output.setter\n def output(self, output):\n self.__setitem__('output', output)\n\n @property\n def persistent(self) -> dotdict: # late annotation needed for this\n # Special entry for storing persistent tensor in batch\n if 'persistent' not in self:\n self.persistent = dotdict()\n return self.__getitem__('persistent')\n\n @persistent.setter\n def persistent(self, persistent):\n self.__setitem__('persistent', persistent)\n\n @property\n def type(self) -> str: # late annotation needed for this\n # Special entry for type based construction system\n return self.__getitem__('type')\n\n @type.setter\n def type(self, type):\n self.__setitem__('type', type)\n\n def to_dict(self):\n out = dict()\n for k, v in self.items():\n if isinstance(v, dotdict):\n v = v.to_dict() # recursion point\n out[k] = v\n return out"
},
{
"identifier": "read_camera",
"path": "easyvolcap/utils/easy_utils.py",
"snippet": "def read_camera(intri_path: str, extri_path: str = None, cam_names=[]) -> dotdict:\n if extri_path is None:\n extri_path = os.path.join(intri_path, 'extri.yml')\n intri_path = os.path.join(intri_path, 'intri.yml')\n assert os.path.exists(intri_path), intri_path\n assert os.path.exists(extri_path), extri_path\n\n intri = FileStorage(intri_path)\n extri = FileStorage(extri_path)\n cams = dotdict()\n cam_names = intri.read('names', dt='list')\n for cam in cam_names:\n # Intrinsics\n cams[cam] = dotdict()\n cams[cam].K = intri.read('K_{}'.format(cam))\n cams[cam].H = intri.read('H_{}'.format(cam), dt='real') or -1\n cams[cam].W = intri.read('W_{}'.format(cam), dt='real') or -1\n cams[cam].invK = np.linalg.inv(cams[cam]['K'])\n\n # Extrinsics\n Tvec = extri.read('T_{}'.format(cam))\n Rvec = extri.read('R_{}'.format(cam))\n if Rvec is not None: R = cv2.Rodrigues(Rvec)[0]\n else:\n R = extri.read('Rot_{}'.format(cam))\n Rvec = cv2.Rodrigues(R)[0]\n RT = np.hstack((R, Tvec))\n\n cams[cam].R = R\n cams[cam].T = Tvec\n cams[cam].C = - Rvec.T @ Tvec\n cams[cam].RT = RT\n cams[cam].Rvec = Rvec\n cams[cam].P = cams[cam].K @ cams[cam].RT\n\n # Distortion\n D = intri.read('D_{}'.format(cam))\n if D is None: D = intri.read('dist_{}'.format(cam))\n cams[cam].D = D\n\n # Time input\n cams[cam].t = extri.read('t_{}'.format(cam), dt='real') or 0 # temporal index, might all be 0\n cams[cam].v = extri.read('v_{}'.format(cam), dt='real') or 0 # temporal index, might all be 0\n\n # Bounds, could be overwritten\n cams[cam].n = extri.read('n_{}'.format(cam), dt='real') or 0.0001 # temporal index, might all be 0\n cams[cam].f = extri.read('f_{}'.format(cam), dt='real') or 1e6 # temporal index, might all be 0\n cams[cam].bounds = extri.read('bounds_{}'.format(cam))\n cams[cam].bounds = np.array([[-1e6, -1e6, -1e6], [1e6, 1e6, 1e6]]) if cams[cam].bounds is None else cams[cam].bounds\n\n # CCM\n cams[cam].ccm = intri.read('ccm_{}'.format(cam))\n cams[cam].ccm = np.eye(3) if cams[cam].ccm is None else cams[cam].ccm\n\n # # Average\n # avg_c2w_R = extri.read('avg_c2w_R')\n # avg_c2w_T = extri.read('avg_c2w_T')\n # if avg_c2w_R is not None: cams.avg_c2w_R = avg_c2w_R\n # if avg_c2w_T is not None: cams.avg_c2w_T = avg_c2w_T\n\n return dotdict(cams)"
},
{
"identifier": "parallel_execution",
"path": "easyvolcap/utils/parallel_utils.py",
"snippet": "def parallel_execution(*args, action: Callable, num_workers=32, print_progress=False, sequential=False, async_return=False, desc=None, use_process=False, **kwargs):\n \"\"\"\n Executes a given function in parallel using threads or processes.\n When using threads, the parallelism is achieved during IO blocking (i.e. when loading images from disk or writing something to disk).\n If your task is compute intensive, consider using packages like numpy or torch since they release the GIL during heavy lifting.\n\n Args:\n *args: Variable length argument list.\n action (Callable): The function to execute in parallel.\n num_workers (int): The number of worker threads or processes to use.\n print_progress (bool): Whether to print a progress bar.\n sequential (bool): Whether to execute the function sequentially instead of in parallel.\n async_return (bool): Whether to return a pool object for asynchronous results.\n desc (str): The description to use for the progress bar.\n use_process (bool): Whether to use processes instead of threads.\n **kwargs: Arbitrary keyword arguments.\n\n Returns:\n If `async_return` is False, returns a list of the results of executing the function on each input argument.\n If `async_return` is True, returns a pool object for asynchronous results.\n \"\"\"\n\n # https://superfastpython.com/threadpool-python/\n # Python threads are well suited for use with IO-bound tasks\n # MARK: DO NOT USE THIS FOR CPU BOUND TASK. THIS IS A CHEAP \"THREAD\" POOL WHICH SUCCUMBS TO PYTHON GIL\n # MARK: USE POOL INTEAD OF THREAD POOL IF THAT IS THE CASE\n # NOTE: we expect first arg / or kwargs to be distributed\n # NOTE: print_progress arg is reserved\n\n def get_length(args: List, kwargs: Dict):\n for a in args:\n if isinstance(a, list):\n return len(a)\n for v in kwargs.values():\n if isinstance(v, list):\n return len(v)\n raise NotImplementedError\n\n def get_action_args(length: int, args: List, kwargs: Dict, i: int):\n action_args = [(arg[i] if isinstance(arg, list) and len(arg) == length else arg) for arg in args]\n # TODO: Support all types of iterable\n action_kwargs = {key: (kwargs[key][i] if isinstance(kwargs[key], list) and len(kwargs[key]) == length else kwargs[key]) for key in kwargs}\n return action_args, action_kwargs\n\n if not sequential:\n # Create ThreadPool\n if use_process:\n pool = Pool(processes=num_workers)\n else:\n pool = ThreadPool(processes=num_workers)\n\n # Spawn threads\n results = []\n asyncs = []\n length = get_length(args, kwargs)\n for i in range(length):\n action_args, action_kwargs = get_action_args(length, args, kwargs, i)\n async_result = pool.apply_async(action, action_args, action_kwargs)\n asyncs.append(async_result)\n\n # Join threads and get return values\n if not async_return:\n for async_result in tqdm(asyncs, back=3, desc=desc, disable=not print_progress): # log previous frame\n results.append(async_result.get()) # will sync the corresponding thread\n pool.close()\n pool.join()\n return results\n else:\n return pool\n else:\n results = []\n length = get_length(args, kwargs)\n for i in tqdm(range(length), back=3, desc=desc, disable=not print_progress): # log previous frame\n action_args, action_kwargs = get_action_args(length, args, kwargs, i)\n async_result = action(*action_args, **action_kwargs)\n results.append(async_result)\n return results"
},
{
"identifier": "load_mesh",
"path": "easyvolcap/utils/data_utils.py",
"snippet": "def load_mesh(filename: str, device='cuda', load_uv=False, load_aux=False, backend='pytorch3d'):\n from pytorch3d.io import load_ply, load_obj\n if backend == 'trimesh':\n import trimesh\n mesh: trimesh.Trimesh = trimesh.load(filename)\n return mesh.vertices, mesh.faces\n\n vm, fm = None, None\n if filename.endswith('.npz'):\n mesh = np.load(filename)\n v = torch.from_numpy(mesh['verts'])\n f = torch.from_numpy(mesh['faces'])\n\n if load_uv:\n vm = torch.from_numpy(mesh['uvs'])\n fm = torch.from_numpy(mesh['uvfaces'])\n else:\n if filename.endswith('.ply'):\n v, f = load_ply(filename)\n elif filename.endswith('.obj'):\n v, faces_attr, aux = load_obj(filename)\n f = faces_attr.verts_idx\n\n if load_uv:\n vm = aux.verts_uvs\n fm = faces_attr.textures_idx\n else:\n raise NotImplementedError(f'Unrecognized input format for: {filename}')\n\n v = v.to(device, non_blocking=True).contiguous()\n f = f.to(device, non_blocking=True).contiguous()\n\n if load_uv:\n vm = vm.to(device, non_blocking=True).contiguous()\n fm = fm.to(device, non_blocking=True).contiguous()\n\n if load_uv:\n if load_aux:\n return v, f, vm, fm, aux\n else:\n return v, f, vm, fm\n else:\n return v, f"
},
{
"identifier": "save_unchanged",
"path": "easyvolcap/utils/data_utils.py",
"snippet": "def save_unchanged(img_path: str, img: np.ndarray, quality=100, compression=6):\n if img.shape[-1] >= 3:\n img[..., :3] = img[..., [2, 1, 0]]\n if img_path.endswith('.hdr'):\n return cv2.imwrite(img_path, img) # nothing to say about hdr\n if os.path.dirname(img_path):\n os.makedirs(os.path.dirname(img_path), exist_ok=True)\n return cv2.imwrite(img_path, img, [cv2.IMWRITE_JPEG_QUALITY, quality, cv2.IMWRITE_PNG_COMPRESSION, compression])"
},
{
"identifier": "multi_gather_tris",
"path": "easyvolcap/utils/net_utils.py",
"snippet": "def multi_gather_tris(v: torch.Tensor, f: torch.Tensor, dim=-2) -> torch.Tensor:\n # compute faces normals w.r.t the vertices (considering batch dimension)\n if v.ndim == (f.ndim + 1): f = f[None].expand(v.shape[0], *f.shape)\n # assert verts.shape[0] == faces.shape[0]\n shape = torch.tensor(v.shape)\n remainder = shape.flip(0)[:(len(shape) - dim - 1) % len(shape)]\n return multi_gather(v, f.view(*f.shape[:-2], -1), dim=dim).view(*f.shape, *remainder) # B, F, 3, 3"
},
{
"identifier": "normalize",
"path": "easyvolcap/utils/net_utils.py",
"snippet": "@torch.jit.script\ndef normalize(x: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:\n # channel last: normalization\n return x / (torch.norm(x, dim=-1, keepdim=True) + eps)"
},
{
"identifier": "read_hdr",
"path": "easyvolcap/utils/relight_utils.py",
"snippet": "def read_hdr(path):\n # TODO: will this support openexr? could not find valid openexr python binding\n # TODO: implement saving in hdr format\n \"\"\"Reads an HDR map from disk.\n\n Args:\n path (str): Path to the .hdr file.\n\n Returns:\n numpy.ndarray: Loaded (float) HDR map with RGB channels in order.\n \"\"\"\n with open(path, 'rb') as h:\n buffer_ = np.fromstring(h.read(), np.uint8)\n bgr = cv2.imdecode(buffer_, cv2.IMREAD_UNCHANGED)\n # bgr = cv2.imread(path, cv2.IMREAD_UNCHANGED)\n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n return rgb.astype(np.float32)"
},
{
"identifier": "sample_envmap_image",
"path": "easyvolcap/utils/relight_utils.py",
"snippet": "def sample_envmap_image(image: torch.Tensor, ray_d: torch.Tensor):\n sh = ray_d.shape\n if image.ndim == 4:\n image = image[0]\n ray_d = ray_d.view(-1, 3)\n # envmap: H, W, C\n # viewdirs: N, 3\n\n # https://github.com/zju3dv/InvRender/blob/45e6cdc5e3c9f092b5d10e2904bbf3302152bb2f/code/model/sg_render.py\n image = image.permute(2, 0, 1).unsqueeze(0)\n\n theta = torch.arccos(ray_d[:, 2]).reshape(-1) - 1e-6\n phi = torch.atan2(ray_d[:, 1], ray_d[:, 0]).reshape(-1) # 0 - pi\n\n # normalize to [-1, 1]\n query_y = (theta / torch.pi) * 2 - 1\n query_x = - phi / torch.pi\n grid = torch.stack((query_x, query_y)).permute(1, 0).unsqueeze(0).unsqueeze(0)\n\n rgb = F.grid_sample(image, grid, align_corners=False, padding_mode='border')\n rgb = rgb.squeeze().permute(1, 0)\n return rgb.view(sh)"
},
{
"identifier": "spher2cart",
"path": "easyvolcap/utils/sh_utils.py",
"snippet": "def spher2cart(theta, phi):\n \"\"\"Convert spherical coordinates into Cartesian coordinates (radius 1).\"\"\"\n r = torch.sin(theta)\n x = r * torch.cos(phi)\n y = r * torch.sin(phi)\n z = torch.cos(theta)\n return torch.stack([x, y, z], dim=-1)"
},
{
"identifier": "spherical_uniform_sampling_upper",
"path": "easyvolcap/utils/sh_utils.py",
"snippet": "def spherical_uniform_sampling_upper(sample_count, device=\"cuda\"):\n # See: https://www.bogotobogo.com/Algorithms/uniform_distribution_sphere.php\n theta = torch.acos(1.0 * torch.rand([sample_count], device=device))\n phi = 2.0 * math.pi * torch.rand([sample_count], device=device)\n return theta, phi"
},
{
"identifier": "render_nvdiffrast",
"path": "easyvolcap/utils/raster_utils.py",
"snippet": "def render_nvdiffrast(verts: torch.Tensor,\n faces: torch.IntTensor,\n attrs: torch.Tensor = None,\n uv: torch.Tensor = None,\n img: torch.Tensor = None,\n # uvfaces: torch.Tensor = None,\n H: int = 512,\n W: int = 512,\n R: torch.Tensor = None,\n T: torch.Tensor = None,\n K: torch.Tensor = None,\n R_S: int = 2,\n pos_gradient_boost: float = 1.0,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Rasterize a mesh using nvdiffrast\n Possibly taking uvmap & texture as input, or just vertex color (rgb)\n\n TODO: implement ranged mode rasterization to get better batching\n\n verts: n_batch, n_verts, 3, or n_batch, n_verts, 4 (possible already in clip space or ndc space)\n faces: n_faces, 3, NOTE: SHARED\n uv: n_batch, n_uv_verts, 2\n img: n_batch, tex_h, tex_w, 3, texture\n H: target image height NOTE: SHARED\n W: target image width NOTE: SHARED\n R: n_batch, 3, 3,\n T: n_batch, 3, 1,\n K: n_batch, 4, 4,\n R_S: render scaling, for better gradients\n\n boost: apply gradient boost to verts\n \"\"\"\n\n if not hasattr(render_nvdiffrast, 'glctx'):\n import nvdiffrast.torch as dr\n try:\n run('python -c \\\"import nvdiffrast.torch as dr; glctx = dr.RasterizeGLContext(output_db=True)\\\"', quite=True) # if this executes without error, we're good to go\n glctx = dr.RasterizeGLContext(output_db=True) # this will not even raise, the program just crashes\n except RuntimeError as e:\n log('Failed to create OpenGL context, please check your OpenGL installation and Nvidia drivers. Will use CUDA context instead. Note that this might cause performance hits.', 'red')\n glctx = dr.RasterizeCudaContext()\n render_nvdiffrast.glctx = glctx\n glctx = render_nvdiffrast.glctx\n\n assert uv is not None and img is not None or attrs is not None # will have to render something out\n assert int(R_S) == R_S # only supports integer upscaling for now\n # assert not (uvfaces is not None and (uv is None or img is None))\n\n # support single mesh rendering on multiple views at the same time\n if verts.ndim == 2 and R is not None:\n verts = verts[None].expand(R.shape[0], *verts.shape)\n if attrs.ndim == 2 and R is not None:\n attrs = attrs[None].expand(R.shape[0], *attrs.shape)\n\n # support changing dtype of faces\n verts = verts.float().contiguous() # might cause excessive memory usage, after expansion\n attrs = attrs.float().contiguous() # might cause excessive memory usage, after expansion\n faces = faces.int().contiguous()\n\n render_attrs = attrs is not None\n render_texture = uv is not None and img is not None\n # unmerge_needed = uvfaces is not None\n\n B = verts.shape[0]\n if render_attrs:\n A = attrs.shape[-1]\n\n # if unmerge_needed:\n # faces, ind_v, ind_uv = unmerge_faces(faces, uvfaces) # different number of vertices and uv, need to merge them\n # verts = verts[ind_v] # 3d locations are also vertex attributes\n # uv = uv[ind_uv] # texture coordinate (and its attributes)\n\n # if render_attrs:\n # attrs = attrs[ind_v] # vertex attributes\n\n # prepare vertices, apply model view projection transformation maybe\n if R is not None and T is not None: # prepare intrinsics and extrinsics\n vverts = verts @ R.mT + T.mT\n padding = vverts.new_ones((*vverts.shape[:-1], 1)) # w -> 1\n homovverts = torch.cat([vverts, padding], dim=-1)\n elif verts.shape[-1] == 3: # manually adding padding of w to homogenccords\n padding = verts.new_ones((*verts.shape[:-1], 1)) # w -> 1\n homovverts = torch.cat([verts, padding], dim=-1)\n else:\n homovverts = verts\n verts = homovverts[..., :-1] / homovverts[..., -1:]\n\n # projection into NDC space (will be clipped later)\n if K is None:\n K = get_ndc_perspective_matrix(torch.tensor([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0, -2 * 0.001],\n [0, 0, 1, 0],\n ], device=verts.device, dtype=verts.dtype), H, W) # 1, 4, 4 -> 1, 4, 4\n ndcverts = homovverts @ K.mT\n else:\n ndcverts = homovverts @ K.mT\n\n # rasterization (possibly upsampled)\n R_S = int(R_S)\n R_H, R_W = int(H * R_S), int(W * R_S)\n rast, rast_db = dr.rasterize(glctx, ndcverts, faces, resolution=[R_H, R_W])\n\n # downsampled mask\n rend_msk = rast[:, :, :, 3] != 0\n rast_msk_ind = rend_msk.nonzero(as_tuple=True)\n msk = rend_msk[:, ::R_S, ::R_S].contiguous() # nearest interpolation\n msk = msk.view(B, H * W) # flatten\n\n # perform texture interpolation\n if render_texture and render_attrs:\n interp = torch.cat([attrs, uv], dim=-1)\n interp, puv_db = dr.interpolate(interp, rast, faces, rast_db=rast_db, diff_attrs=[A, A + 1])\n pattrs, puv = interp.split([A, 2], dim=-1)\n elif render_texture and not render_attrs:\n puv, puv_db = dr.interpolate(uv, rast, faces, rast_db=rast_db, diff_attrs=[0, 1])\n else:\n pattrs, pattrs_db = dr.interpolate(attrs, rast, faces, rast_db=rast_db)\n\n if render_texture:\n prgb = dr.texture(img.flip(1), puv.contiguous(), puv_db.contiguous(), max_mip_level=8)\n # filter unwanted rgb output\n full_prgb = torch.zeros_like(prgb)\n full_prgb[rast_msk_ind] = prgb[rast_msk_ind]\n prgb = full_prgb\n\n # prepare for anti aliasing\n rend_msk = rend_msk.view(B, R_H, R_W, 1)\n if render_texture and render_attrs:\n aa = torch.cat([pattrs, prgb, rend_msk], dim=-1)\n elif render_texture and not render_attrs:\n aa = torch.cat([prgb, rend_msk], dim=-1)\n else:\n aa = torch.cat([pattrs, rend_msk], dim=-1)\n\n # perform antialiasing\n aa = dr.antialias(aa, rast, ndcverts, faces, pos_gradient_boost=pos_gradient_boost) # nvdiffrast wants float tensor as input\n aa = bilinear_interpolation(aa, [H, W])\n\n # return results\n if render_texture and render_attrs:\n rast_attrs, rast_img, rast_msk = aa.split([A, 3, 1], dim=-1)\n elif render_texture and not render_attrs:\n rast_img, rast_msk = aa.split([3, 1], dim=-1)\n else:\n rast_attrs, rast_msk = aa.split([A, 1], dim=-1)\n\n rast_msk = rast_msk[..., 0]\n if render_texture and render_attrs:\n return rast_attrs, rast_img, rast_msk\n elif render_texture and not render_attrs:\n return rast_img, rast_msk\n else:\n return rast_attrs, rast_msk"
},
{
"identifier": "get_ndc_perspective_matrix",
"path": "easyvolcap/utils/raster_utils.py",
"snippet": "def get_ndc_perspective_matrix(K: torch.Tensor,\n H: int,\n W: int,\n n: torch.Tensor = 0.002, # near far bound\n f: torch.Tensor = 100, # near far bound\n ):\n \"\"\"\n Note: This is not a academically accurate projection matrix, see z\n Get the perspective matrix that projects camera space points to ndc [-1,1]^3 space\n K[0, 0] and K[1, 1] should be the focal length multiplied by pixel per meter\n x: [-1, -1] should * 2 / W and add W / 2 to be in the center\n y: [-1, -1] should * 2 / H and add H / 2 to be in the center\n z: we're assuming the reciprocal to be with in [-1, 1]\n\n OpenGL has y going up, x going right and z going inwards window in ndc space\n Our camera is x going right, y going down and z going away from in ndc space\n `nvdiffrast` says its z increases towards the viewer, just like in OpenGL\n And we need to set the actual z to -1/z to get the actual rendering results\n \"\"\"\n if isinstance(K, torch.Tensor):\n if K.ndim == 3:\n ixt = K.new_zeros(K.shape[0], 4, 4)\n else:\n ixt = K.new_zeros(4, 4)\n elif isinstance(K, np.ndarray):\n if K.ndim == 3:\n ixt = np.zeros((K.shape[0], 4, 4), dtype=K.dtype)\n else:\n ixt = np.zeros((4, 4), dtype=K.dtype)\n else:\n raise NotImplementedError('unsupport data type for K conversion')\n fx = K[..., 0, 0]\n fy = K[..., 1, 1]\n cx = K[..., 0, 2]\n cy = K[..., 1, 2]\n s = K[..., 0, 1]\n\n ixt[..., 0, 0] = 2 * fx / W\n ixt[..., 0, 1] = 2 * s / W\n ixt[..., 0, 2] = 1 - 2 * (cx / W)\n\n ixt[..., 1, 1] = 2 * fy / H\n # ixt[..., 1, 2] = 2 * (cy / H) - 1\n ixt[..., 1, 2] = 1 - 2 * (cy / H)\n\n ixt[..., 2, 2] = (f + n) / (n - f)\n ixt[..., 2, 3] = 2 * f * n / (n - f)\n\n ixt[..., 3, 2] = -1\n\n # ixt[..., 0, 0] = (K[..., 0, 0]) * 2.0 / W # F * Sx / W * 2\n # ixt[..., 1, 1] = (K[..., 1, 1]) * 2.0 / H # F * Sy / H * 2\n # ixt[..., 0, 2] = (K[..., 0, 2] - W / 2.0) * 2.0 / W # Cx / W * 2 - 1\n # ixt[..., 1, 2] = (K[..., 1, 2] - H / 2.0) * 2.0 / H # Cy / H * 2 - 1\n # ixt[..., 2, 2] = 0\n # ixt[..., 2, 3] = -2 * CLIP_NEAR\n # ixt[..., 3, 2] = 1\n # ixt[..., 3, 3] = 0\n\n # @property\n # def gl_ixt(self):\n # # Construct opengl camera matrix with projection & clipping\n # # https://fruty.io/2019/08/29/augmented-reality-with-opencv-and-opengl-the-tricky-projection-matrix/\n # # https://gist.github.com/davegreenwood/3a32d779f81f08dce32f3bb423672191\n # # fmt: off\n # gl_ixt = mat4(\n # 2 * self.fx / self.W, 0, 0, 0,\n # 2 * self.s / self.W, 2 * self.fy / self.H, 0, 0,\n # 1 - 2 * (self.cx / self.W), 2 * (self.cy / self.H) - 1, (self.f + self.n) / (self.n - self.f), -1,\n # 0, 0, 2 * self.f * self.n / (self.n - self.f), 0,\n # )\n # # fmt: on\n\n # return gl_ixt\n\n # fx, 0, 0, cx\n # 0, fy, 0, cy\n # 0, 0, 0, -2C\n # 0, 0, 1, 0\n return ixt"
}
] | import os
import torch
import argparse
import numpy as np
import sys
from tqdm import tqdm
from os.path import join
from termcolor import colored
from bvh_ray_tracing import BVH
from pytorch3d.structures import Meshes
from easyvolcap.utils.console_utils import log
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.easy_utils import read_camera
from easyvolcap.utils.parallel_utils import parallel_execution
from easyvolcap.utils.data_utils import load_mesh, save_unchanged
from easyvolcap.utils.net_utils import multi_gather_tris, normalize
from easyvolcap.utils.relight_utils import read_hdr, sample_envmap_image
from easyvolcap.utils.sh_utils import spher2cart, spherical_uniform_sampling_upper
from easyvolcap.utils.raster_utils import render_nvdiffrast, get_ndc_perspective_matrix | 10,425 | if surf.ndim == 4:
surf = surf.view(surf.shape[0], -1, 3)
if norm.ndim == 4:
norm = norm.view(surf.shape[0], -1, 3)
B, P, _ = surf.shape
T = B * N * P
# Generate sample_count uniformly and stratified samples over the sphere
# See http://www.bogotobogo.com/Algorithms/uniform_distribution_sphere.php
theta, phi = spherical_uniform_sampling_upper(T, device=surf.device) # T, T,
ray_d = spher2cart(theta, phi) # T, 3, z always bigger than zero
# Preparing shapes
norm = norm[:, None].expand(B, N, P, 3).reshape(T, 3) # T, 3
ray_o = surf[:, None].expand(B, N, P, 3).reshape(T, 3) # T, 3
# Transform ray_d to be pointing upward from normal direction
R = torch.zeros([T, 3, 3], device=norm.device)
R[..., 0, 0] = 1.0
R[..., :3, 2] = norm # c2w, z axis is normal direction
R[..., :3, 1] = normalize(torch.cross(R[..., :3, 2], R[..., :3, 0]))
R[..., :3, 0] = normalize(torch.cross(R[..., :3, 1], R[..., :3, 2]))
ray_d = (R @ ray_d[..., None])[..., 0]
# Compute shading
ldot = (ray_d * norm).sum(dim=-1).reshape(T) # T
def ray_tracing_intersection(ray_o: torch.Tensor, ray_d: torch.Tensor, tris: torch.Tensor) -> torch.Tensor:
# assume all tris batch are the same
sh = ray_o.shape # B, S, 3
tris = tris[:1] # 1, F, 3, 3
ray_o = ray_o.view(-1, 3)[None] # 1, B * S, 3
ray_d = ray_d.view(-1, 3)[None] # 1, B * S, 3
bvh = BVH() # is this too wasteful, reconstructing the BVH in every iteration?
# pts: 1, P, 3
dists_sq, points, face_ids, barys = bvh(tris, ray_o + ray_d * 0.01, ray_d) # 1, P, 3
lvis: torch.Tensor = 1 - (dists_sq > 0).float() # all barycentri coordinates are valid -> intersection -> zero vis
lvis = lvis.view(*sh[:-1]) # TODO: messy shapes
lvis.nan_to_num(0.) # sometimes the moller trumbore returns nan...?
return lvis
# Here lren is the indices of the ray direction and pixel to render
# Perform rendering on lren ray-pixel pair
if compute_lvis:
lvis = ray_tracing_intersection(ray_o, ray_d, tris)
else:
lvis = torch.ones_like(ldot)
lvis = lvis.view(B, N, P)
ldot = ldot.view(B, N, P)
ray_d = ray_d.view(B, N, P, 3)
return lvis, ldot, ray_d
def main():
"""
We have a few assumptions about the ground truth rendering process
We require a pivot mesh for textures, and other meshes can be loaded with only the vertices
Since animation should only be about changing the positions of the vertices (without topology warps)
We don't need to render the full model, only
1. Normal (geometry only)
2. Ray-tracing soft shadow (geometry only) (visibility)
3. Albedo (diffuse albedo map)
4. Roughness (roughness value map)
5. Full rendering pipeline? (no, since the material model and indirection illumation is not implemented)
What do we do?
"""
# All other related stuff should have been loaded implicitly from the object file's definition
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='normal', choices=['normal', 'depth', 'surf', 'shade', 'ibl', 'ao'])
parser.add_argument('--ext', type=str, default='.obj') # should not change this
parser.add_argument('--device', type=str, default='cuda') # should not change this
# Input output related
parser.add_argument('--data_root', type=str, default='data/synthetic_human/jody')
parser.add_argument('--mesh', type=str, default="object/000000.obj")
parser.add_argument('--width', type=str, default=1024)
parser.add_argument('--height', type=str, default=1024)
parser.add_argument('--ratio', type=float, default=0.5)
parser.add_argument('--extri', type=str, default='extri.yml')
parser.add_argument('--intri', type=str, default='intri.yml')
parser.add_argument('--output', type=str, default='ray_tracing')
# Environment map related
parser.add_argument('--envmap_root', type=str, default='data/lighting/16x32')
parser.add_argument('--envmap', type=str, default='gym_entrance.hdr')
# Visualization related
parser.add_argument('--depth_min', type=float, default=1.0)
parser.add_argument('--depth_max', type=float, default=5.0)
parser.add_argument('--surf_min', type=float, default=-3.0)
parser.add_argument('--surf_max', type=float, default=3.0)
parser.add_argument('--shading_albedo', type=float, default=0.8)
# Misc stuff
# parser.add_argument('--remesh', action='store_true', help='whether to perform remesh before the visibility computation') # slow
parser.add_argument('--transpose', action='store_true', help='transpose the y and z axis for the synthetic human dataset')
parser.add_argument('--ground', action='store_true', help='whether the visibility term should consider the ground?')
parser.add_argument('--sub', nargs='*')
# Visibility and shading related
parser.add_argument('--chunk_size', type=int, default=2500, help='chunk size of monte carlo samples (w.r.t. 1 x 512 x 512 image)')
parser.add_argument('--n_light_sample', type=int, default=50000, help='number of monte carlo samples for each pixel')
parser.add_argument('--ground_origin', type=float, default=[0, 0, 0], required=False, nargs='*', help='origin of the ground')
parser.add_argument('--ground_normal', type=float, default=[0, 0, 1], required=False, nargs='*', help='normal of the ground')
# Prepare arguments
args = parser.parse_args()
args.mesh = join(args.data_root, args.mesh)
args.extri = join(args.data_root, args.extri)
args.intri = join(args.data_root, args.intri)
args.output = join(args.data_root, args.output, args.mode) # {data_root}/ray_tracing/{mode}
args.envmap = join(args.envmap_root, args.envmap) # do not merge envmap with data_root
assert args.ext == '.obj', 'Only obj files are supported'
# Loading camera intrinsics and extrinsics
log(f'Loading cameras from {colored(args.intri, "blue")} and {colored(args.extri, "blue")} onto {colored(args.device, "magenta")}')
| # this file loads obj with textures and mlt files, then perform rasteization with ray traced shadow
# it can also render components like normal, albedo, roughness, visibility etc.
# this should only be used for rendering ground truth values to compute metrics
# for visibility, we should compute metrics on visibility or the whole shading? only for soft-shadow?
# maybe both would be better...
# fmt: off
sys.path.append(".")
# fmt: on
def light_visibility(surf: torch.Tensor, # B, P, 3
norm: torch.Tensor, # B, P, 3
tris: torch.Tensor,
N: int = 100, # number of samples per pixel (randomly distribute on sphere)
compute_lvis: bool = True,
): # this function will compute both lvis and ldot
# Prepare shapes of verts and faces (could have same batch size as surf and norm)
if surf.ndim == 4:
surf = surf.view(surf.shape[0], -1, 3)
if norm.ndim == 4:
norm = norm.view(surf.shape[0], -1, 3)
B, P, _ = surf.shape
T = B * N * P
# Generate sample_count uniformly and stratified samples over the sphere
# See http://www.bogotobogo.com/Algorithms/uniform_distribution_sphere.php
theta, phi = spherical_uniform_sampling_upper(T, device=surf.device) # T, T,
ray_d = spher2cart(theta, phi) # T, 3, z always bigger than zero
# Preparing shapes
norm = norm[:, None].expand(B, N, P, 3).reshape(T, 3) # T, 3
ray_o = surf[:, None].expand(B, N, P, 3).reshape(T, 3) # T, 3
# Transform ray_d to be pointing upward from normal direction
R = torch.zeros([T, 3, 3], device=norm.device)
R[..., 0, 0] = 1.0
R[..., :3, 2] = norm # c2w, z axis is normal direction
R[..., :3, 1] = normalize(torch.cross(R[..., :3, 2], R[..., :3, 0]))
R[..., :3, 0] = normalize(torch.cross(R[..., :3, 1], R[..., :3, 2]))
ray_d = (R @ ray_d[..., None])[..., 0]
# Compute shading
ldot = (ray_d * norm).sum(dim=-1).reshape(T) # T
def ray_tracing_intersection(ray_o: torch.Tensor, ray_d: torch.Tensor, tris: torch.Tensor) -> torch.Tensor:
# assume all tris batch are the same
sh = ray_o.shape # B, S, 3
tris = tris[:1] # 1, F, 3, 3
ray_o = ray_o.view(-1, 3)[None] # 1, B * S, 3
ray_d = ray_d.view(-1, 3)[None] # 1, B * S, 3
bvh = BVH() # is this too wasteful, reconstructing the BVH in every iteration?
# pts: 1, P, 3
dists_sq, points, face_ids, barys = bvh(tris, ray_o + ray_d * 0.01, ray_d) # 1, P, 3
lvis: torch.Tensor = 1 - (dists_sq > 0).float() # all barycentri coordinates are valid -> intersection -> zero vis
lvis = lvis.view(*sh[:-1]) # TODO: messy shapes
lvis.nan_to_num(0.) # sometimes the moller trumbore returns nan...?
return lvis
# Here lren is the indices of the ray direction and pixel to render
# Perform rendering on lren ray-pixel pair
if compute_lvis:
lvis = ray_tracing_intersection(ray_o, ray_d, tris)
else:
lvis = torch.ones_like(ldot)
lvis = lvis.view(B, N, P)
ldot = ldot.view(B, N, P)
ray_d = ray_d.view(B, N, P, 3)
return lvis, ldot, ray_d
def main():
"""
We have a few assumptions about the ground truth rendering process
We require a pivot mesh for textures, and other meshes can be loaded with only the vertices
Since animation should only be about changing the positions of the vertices (without topology warps)
We don't need to render the full model, only
1. Normal (geometry only)
2. Ray-tracing soft shadow (geometry only) (visibility)
3. Albedo (diffuse albedo map)
4. Roughness (roughness value map)
5. Full rendering pipeline? (no, since the material model and indirection illumation is not implemented)
What do we do?
"""
# All other related stuff should have been loaded implicitly from the object file's definition
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='normal', choices=['normal', 'depth', 'surf', 'shade', 'ibl', 'ao'])
parser.add_argument('--ext', type=str, default='.obj') # should not change this
parser.add_argument('--device', type=str, default='cuda') # should not change this
# Input output related
parser.add_argument('--data_root', type=str, default='data/synthetic_human/jody')
parser.add_argument('--mesh', type=str, default="object/000000.obj")
parser.add_argument('--width', type=str, default=1024)
parser.add_argument('--height', type=str, default=1024)
parser.add_argument('--ratio', type=float, default=0.5)
parser.add_argument('--extri', type=str, default='extri.yml')
parser.add_argument('--intri', type=str, default='intri.yml')
parser.add_argument('--output', type=str, default='ray_tracing')
# Environment map related
parser.add_argument('--envmap_root', type=str, default='data/lighting/16x32')
parser.add_argument('--envmap', type=str, default='gym_entrance.hdr')
# Visualization related
parser.add_argument('--depth_min', type=float, default=1.0)
parser.add_argument('--depth_max', type=float, default=5.0)
parser.add_argument('--surf_min', type=float, default=-3.0)
parser.add_argument('--surf_max', type=float, default=3.0)
parser.add_argument('--shading_albedo', type=float, default=0.8)
# Misc stuff
# parser.add_argument('--remesh', action='store_true', help='whether to perform remesh before the visibility computation') # slow
parser.add_argument('--transpose', action='store_true', help='transpose the y and z axis for the synthetic human dataset')
parser.add_argument('--ground', action='store_true', help='whether the visibility term should consider the ground?')
parser.add_argument('--sub', nargs='*')
# Visibility and shading related
parser.add_argument('--chunk_size', type=int, default=2500, help='chunk size of monte carlo samples (w.r.t. 1 x 512 x 512 image)')
parser.add_argument('--n_light_sample', type=int, default=50000, help='number of monte carlo samples for each pixel')
parser.add_argument('--ground_origin', type=float, default=[0, 0, 0], required=False, nargs='*', help='origin of the ground')
parser.add_argument('--ground_normal', type=float, default=[0, 0, 1], required=False, nargs='*', help='normal of the ground')
# Prepare arguments
args = parser.parse_args()
args.mesh = join(args.data_root, args.mesh)
args.extri = join(args.data_root, args.extri)
args.intri = join(args.data_root, args.intri)
args.output = join(args.data_root, args.output, args.mode) # {data_root}/ray_tracing/{mode}
args.envmap = join(args.envmap_root, args.envmap) # do not merge envmap with data_root
assert args.ext == '.obj', 'Only obj files are supported'
# Loading camera intrinsics and extrinsics
log(f'Loading cameras from {colored(args.intri, "blue")} and {colored(args.extri, "blue")} onto {colored(args.device, "magenta")}') | camera = read_camera(args.intri, args.extri) # camera dictionary | 2 | 2023-12-07 08:53:42+00:00 | 12k |
alibaba/animate-anything | models/unet_3d_condition_mask.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "models/unet_3d_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n resnets = []\n attentions = []\n temp_attentions = []\n temp_convs = []\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1\n )\n )\n attentions.append(\n Transformer2DModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n def forward(\n self,\n hidden_states,\n temb=None,\n encoder_hidden_states=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None,\n ):\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for resnet, temp_conv, attn, temp_attn in zip(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions\n ):\n \n if self.gradient_checkpointing:\n hidden_states = cross_attn_g_c(\n attn, \n temp_attn, \n resnet, \n temp_conv, \n hidden_states, \n encoder_hidden_states, \n cross_attention_kwargs, \n temb, \n num_frames,\n inverse_temp=True\n )\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n ).sample\n\n if num_frames > 1:\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "models/unet_3d_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n attentions = []\n temp_attentions = []\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1\n )\n )\n attentions.append(\n Transformer2DModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None,\n ):\n # TODO(Patrick, William) - attention mask is not used\n for resnet, temp_conv, attn, temp_attn in zip(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions\n ):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.gradient_checkpointing:\n hidden_states = cross_attn_g_c(\n attn, \n temp_attn, \n resnet, \n temp_conv, \n hidden_states, \n encoder_hidden_states, \n cross_attention_kwargs, \n temb, \n num_frames,\n inverse_temp=True\n )\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n ).sample\n\n if num_frames > 1:\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "models/unet_3d_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n\n self.gradient_checkpointing = False\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n def forward(self, hidden_states, temb=None, num_frames=1):\n output_states = ()\n\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n if self.gradient_checkpointing:\n hidden_states = up_down_g_c(resnet, temp_conv, hidden_states, temb, num_frames)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "models/unet_3d_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=True,\n upcast_attention=False,\n ):\n super().__init__()\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n temp_convs = [\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1\n )\n ]\n attentions = []\n temp_attentions = []\n\n for _ in range(num_layers):\n attentions.append(\n Transformer2DModel(\n in_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n in_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n def forward(\n self,\n hidden_states,\n temb=None,\n encoder_hidden_states=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None,\n ):\n if self.gradient_checkpointing:\n hidden_states = up_down_g_c(\n self.resnets[0], \n self.temp_convs[0], \n hidden_states, \n temb, \n num_frames\n )\n else:\n hidden_states = self.resnets[0](hidden_states, temb)\n hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames)\n \n for attn, temp_attn, resnet, temp_conv in zip(\n self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:]\n ):\n if self.gradient_checkpointing:\n hidden_states = cross_attn_g_c(\n attn, \n temp_attn, \n resnet, \n temp_conv, \n hidden_states, \n encoder_hidden_states, \n cross_attention_kwargs, \n temb, \n num_frames\n )\n else:\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n ).sample\n \n if num_frames > 1:\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n return hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "models/unet_3d_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n self.gradient_checkpointing = False\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, num_frames=1):\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.gradient_checkpointing:\n hidden_states = up_down_g_c(resnet, temp_conv, hidden_states, temb, num_frames)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "models/unet_3d_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=True,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n):\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "models/unet_3d_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=True,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n):\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
},
{
"identifier": "transformer_g_c",
"path": "models/unet_3d_blocks.py",
"snippet": "def transformer_g_c(transformer, sample, num_frames):\n sample = g_c(custom_checkpoint(transformer, mode='temp'), \n sample, num_frames, use_reentrant=False\n )['sample']\n\n return sample"
}
] | from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.transformer_temporal import TransformerTemporalModel
from einops import rearrange, repeat
from .unet_3d_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
transformer_g_c
)
import torch
import torch.nn as nn
import torch.utils.checkpoint | 8,921 | reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=False,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
if norm_num_groups is not None:
self.conv_norm_out = nn.GroupNorm(
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
)
self.conv_act = nn.SiLU()
else:
self.conv_norm_out = None
self.conv_act = None
conv_out_padding = (conv_out_kernel - 1) // 2
self.conv_out = nn.Conv2d(
block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, value=False):
self.gradient_checkpointing = value
self.mid_block.gradient_checkpointing = value
for module in self.down_blocks + self.up_blocks:
| # Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved.
# Copyright 2023 The ModelScope Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
"""
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
r"""
UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep
and returns sample shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the models (such as downloading or saving, etc.)
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`):
The tuple of upsample blocks to use.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, it will skip the normalization and activation layers in post-processing
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1024,
attention_head_dim: Union[int, Tuple[int]] = 64,
motion_mask = False,
motion_strength = False,
):
super().__init__()
self.motion_mask = motion_mask
self.motion_strength = motion_strength
print(f"motion mask {self.motion_mask}, motion_strength {self.motion_strength}")
self.sample_size = sample_size
self.gradient_checkpointing = False
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_out_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
self.conv_in2 = nn.Conv2d(
5, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
cond_proj_dim=block_out_channels[0],
)
self.motion_proj = Timesteps(block_out_channels[0], True, 0)
self.motion_embedding = nn.Sequential(
nn.Linear(timestep_input_dim, time_embed_dim), nn.SiLU(),
nn.Linear(time_embed_dim, time_embed_dim))
nn.init.zeros_(self.motion_embedding[-1].weight)
nn.init.zeros_(self.motion_embedding[-1].bias)
self.transformer_in = TransformerTemporalModel(
num_attention_heads=8,
attention_head_dim=attention_head_dim,
in_channels=block_out_channels[0],
num_layers=1,
)
# class embedding
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=False,
)
self.down_blocks.append(down_block)
# mid
self.mid_block = UNetMidBlock3DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=False,
)
# count how many layers upsample the images
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=False,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
if norm_num_groups is not None:
self.conv_norm_out = nn.GroupNorm(
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
)
self.conv_act = nn.SiLU()
else:
self.conv_norm_out = None
self.conv_act = None
conv_out_padding = (conv_out_kernel - 1) // 2
self.conv_out = nn.Conv2d(
block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, value=False):
self.gradient_checkpointing = value
self.mid_block.gradient_checkpointing = value
for module in self.down_blocks + self.up_blocks: | if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): | 0 | 2023-12-07 08:26:29+00:00 | 12k |
octo-models/octo | scripts/finetune.py | [
{
"identifier": "make_single_dataset",
"path": "octo/data/dataset.py",
"snippet": "def make_single_dataset(\n dataset_kwargs: dict,\n *,\n train: bool,\n traj_transform_kwargs: dict = {},\n frame_transform_kwargs: dict = {},\n) -> dl.DLataset:\n \"\"\"Creates a single dataset from kwargs. Returns a dataset of trajectories.\n\n Args:\n dataset_kwargs: kwargs passed to `make_dataset_from_rlds` that are dataset-specific.\n train: whether this is a training or validation dataset.\n traj_transform_kwargs: kwargs passed to 'apply_trajectory_transforms'.\n frame_transform_kwargs: kwargs passed to 'get_frame_transforms'.\n \"\"\"\n dataset, dataset_statistics = make_dataset_from_rlds(\n **dataset_kwargs,\n train=train,\n )\n dataset = apply_trajectory_transforms(dataset, **traj_transform_kwargs, train=train)\n dataset = apply_frame_transforms(dataset, **frame_transform_kwargs, train=train)\n\n # this seems to reduce memory usage without affecting speed\n dataset = dataset.with_ram_budget(1)\n\n # save for later\n dataset.dataset_statistics = dataset_statistics\n return dataset"
},
{
"identifier": "OctoModel",
"path": "octo/model/octo_model.py",
"snippet": "class OctoModel:\n \"\"\"Recommended way of interacting with Octo models.\n\n Usage for inference:\n\n >>> model = OctoModel.load_pretrained(checkpoint_dir)\n >>> tasks = model.create_tasks(texts=[\"go to the red room\"])\n >>> # or tasks = model.create_tasks(goals={\"image_primary\": goal_images})\n >>> actions = model.sample_actions(observations, tasks, rng=jax.random.PRNGKey(0))\n >>> # Note: these are normalized actions (processed to mean 0 and std 1). To get the raw actions,\n # un-normalize them using model.dataset_statistics\n\n Usage for finetuning:\n\n >>> model = OctoModel.load_pretrained(checkpoint_dir)\n >>> train_state = octo.utils.train_utils.TrainState.create(\n rng=jax.random.PRNGKey(0),\n model=model,\n tx=optax.adamw(...)\n )\n >>> # access params through train_state.model.params\n >>> train_state, metrics = your_update_function(train_state, batch)\n >>> # when it's time to save (note that this only saves the model parameters,\n >>> # not the full optimizer state)\n >>> train_state.model.save_pretrained(step, save_dir)\n\n Usage for pretraining:\n\n >>> model = OctoModel.from_config(\n config,\n example_batch,\n text_processor\n ) # initializes params\n >>> # Continue as in finetuning example\n\n See full usage examples in train.py and finetune.py.\n\n \"\"\"\n\n module: OctoModule = struct.field(pytree_node=False)\n text_processor: TextProcessor = struct.field(pytree_node=False)\n config: Config = struct.field(pytree_node=False)\n params: Params\n example_batch: Data\n dataset_statistics: Optional[Data]\n\n def create_tasks(\n self, goals: Optional[Data] = None, texts: Optional[Sequence[str]] = None\n ):\n \"\"\"Creates tasks dict from goals and texts.\n\n Args:\n goals: if not None, dict of arrays with shape (batch_size, *)\n texts: if not None, list of texts of length batch_size\n\n Omit images to run the language-conditioned model, and omit texts to run the\n goal-conditioned model.\n \"\"\"\n assert goals is not None or texts is not None\n tasks = {\"pad_mask_dict\": {}}\n if goals is not None:\n tasks.update(goals)\n tasks[\"pad_mask_dict\"].update(\n {k: np.ones(v.shape[:1], dtype=bool) for k, v in goals.items()}\n )\n else:\n batch_size = len(texts)\n tasks.update(\n {\n k: np.zeros((batch_size, *v.shape[1:]), dtype=v.dtype)\n for k, v in self.example_batch[\"task\"].items()\n if k not in (\"pad_mask_dict\", \"language_instruction\")\n }\n )\n tasks[\"pad_mask_dict\"].update(\n {\n k: np.zeros(batch_size, dtype=bool)\n for k in tasks.keys()\n if k != \"pad_mask_dict\"\n }\n )\n\n if texts is not None:\n assert self.text_processor is not None\n tasks[\"language_instruction\"] = texts\n tasks[\"pad_mask_dict\"][\"language_instruction\"] = np.ones(\n len(texts), dtype=bool\n )\n else:\n batch_size = jax.tree_leaves(goals)[0].shape[0]\n tasks[\"language_instruction\"] = [\"\"] * batch_size\n tasks[\"pad_mask_dict\"][\"language_instruction\"] = np.zeros(\n batch_size, dtype=bool\n )\n\n if self.text_processor is not None:\n tasks[\"language_instruction\"] = self.text_processor.encode(\n tasks[\"language_instruction\"]\n )\n else:\n del tasks[\"language_instruction\"]\n\n _verify_shapes(tasks, \"tasks\", self.example_batch[\"task\"], starting_dim=1)\n return tasks\n\n @partial(jax.jit, static_argnames=(\"train\",))\n def run_transformer(\n self, observations: Data, tasks: Data, pad_mask: ArrayLike, train: bool = False\n ):\n \"\"\"Runs the transformer, but does shape checking on the inputs.\n\n Args:\n observations: dictionary of arrays of shape (batch_size, window_size, *shape).\n Shape must be consistent with self.example_batch[\"observation\"]\n tasks: dict of tasks of shape (batch_size, *shape)\n Shape must be consistent with self.example_batch[\"task\"]\n pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding\n train: whether to run in train mode\n \"\"\"\n _verify_shapes(\n observations,\n \"observations\",\n self.example_batch[\"observation\"],\n starting_dim=2,\n )\n _verify_shapes(tasks, \"tasks\", self.example_batch[\"task\"], starting_dim=1)\n\n return self.module.apply(\n {\"params\": self.params},\n observations,\n tasks,\n pad_mask,\n train=train,\n method=\"octo_transformer\",\n )\n\n @partial(jax.jit, static_argnames=(\"train\", \"sample_shape\", \"argmax\"))\n def sample_actions(\n self,\n observations: Data,\n tasks: Data,\n pad_mask: Optional[ArrayLike] = None,\n train: bool = False,\n argmax: bool = False,\n sample_shape: Tuple[int, ...] = (),\n rng: Optional[PRNGKey] = None,\n temperature: float = 1.0,\n ):\n \"\"\"Samples actions from the model. See `action_heads.py` for more info.\n\n Args:\n observations: dictionary of arrays of shape (batch_size, window_size, *)\n tasks: dict of tasks of shape (batch_size, *)\n pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding\n train: whether to run in train mode\n ...see `action_heads.py` for the rest of the kwargs.\n Returns:\n actions: (*sample_shape, batch_size, pred_horizon, action_dim)\n \"\"\"\n if pad_mask is None:\n pad_mask = observations[\"pad_mask\"]\n\n transformer_outputs = self.run_transformer(\n observations, tasks, pad_mask, train=train\n )\n action_head: ActionHead = self.module.bind({\"params\": self.params}).heads[\n \"action\"\n ]\n return action_head.predict_action(\n transformer_outputs,\n train=train,\n argmax=argmax,\n sample_shape=sample_shape,\n rng=rng,\n temperature=temperature,\n )\n\n @classmethod\n def load_pretrained(\n cls,\n checkpoint_path: str,\n step: Optional[int] = None,\n ) -> \"OctoModel\":\n \"\"\"Loads a model from a checkpoint that was saved via `save_pretrained`.\n\n Args:\n checkpoint_path (str): A path to either a directory of checkpoints or a single checkpoint.\n step (int, optional): If multiple checkpoints are present, which one to load. Defaults to the latest.\n \"\"\"\n if checkpoint_path.startswith(\"hf://\"):\n if step:\n raise ValueError(\n \"You can't set config['pretrained_step'] when loading from HuggingFace.\"\n )\n checkpoint_path = _download_from_huggingface(\n checkpoint_path.removeprefix(\"hf://\")\n )\n\n # load config\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"config.json\"), \"r\"\n ) as f:\n config = json.load(f)\n\n # load example batch\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"example_batch.msgpack\"), \"rb\"\n ) as f:\n example_batch = flax.serialization.msgpack_restore(f.read())\n # shim for migrating from \"tasks\" to \"task\"\n if \"tasks\" in example_batch:\n example_batch[\"task\"] = example_batch.pop(\"tasks\")\n\n logging.debug(\n \"Model was trained with observations: %s\",\n flax.core.pretty_repr(\n jax.tree_map(jnp.shape, example_batch[\"observation\"])\n ),\n )\n logging.debug(\n \"Model was trained with tasks: %s\",\n flax.core.pretty_repr(jax.tree_map(jnp.shape, example_batch[\"task\"])),\n )\n\n # load dataset statistics\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"dataset_statistics.json\"), \"r\"\n ) as f:\n dataset_statistics = json.load(f)\n dataset_statistics = jax.tree_map(\n np.array, dataset_statistics, is_leaf=lambda x: not isinstance(x, dict)\n )\n\n # create model def (an OctoModule)\n module = OctoModule.create(**config[\"model\"])\n # infer params shape without actually doing any computation\n params_shape = jax.eval_shape(\n partial(module.init, train=False),\n jax.random.PRNGKey(0),\n example_batch[\"observation\"],\n example_batch[\"task\"],\n example_batch[\"observation\"][\"pad_mask\"],\n )[\"params\"]\n # restore params, checking to make sure the shape matches\n checkpointer = orbax.checkpoint.CheckpointManager(\n checkpoint_path, orbax.checkpoint.PyTreeCheckpointer()\n )\n step = step if step is not None else checkpointer.latest_step()\n params = checkpointer.restore(step, params_shape)\n\n if config[\"text_processor\"] is not None:\n text_processor = ModuleSpec.instantiate(config[\"text_processor\"])()\n else:\n text_processor = None\n\n return cls(\n module=module,\n params=params,\n text_processor=text_processor,\n example_batch=example_batch,\n config=config,\n dataset_statistics=dataset_statistics,\n )\n\n def save_pretrained(\n self,\n step: int,\n checkpoint_path: Optional[str] = None,\n checkpoint_manager: Optional[orbax.checkpoint.CheckpointManager] = None,\n ):\n \"\"\"Saves a model, as well as corresponding metadata needed for `load_pretrained`. Takes either a\n pre-existing checkpoint manager (which already knows where to save the checkpoint) or a path to a\n directory to save the checkpoint to.\n\n Args:\n step (int): Step number.\n checkpoint_path (str, optional): Path to save the checkpoint.\n checkpoint_manager (optional): Checkpoint manager to save the checkpoint.\n params (optional): Params to save. If None, uses self.params.\n \"\"\"\n if (checkpoint_path is None) == (checkpoint_manager is None):\n raise ValueError(\n \"Must provide exactly one of checkpoint_path or checkpoint_manager.\"\n )\n if checkpoint_manager is None:\n checkpoint_manager = orbax.checkpoint.CheckpointManager(\n checkpoint_path, orbax.checkpoint.PyTreeCheckpointer()\n )\n if checkpoint_path is None:\n checkpoint_path = str(checkpoint_manager._directory)\n\n # save params\n checkpoint_manager.save(\n step,\n self.params,\n {\"save_args\": orbax_utils.save_args_from_target(self.params)},\n )\n\n if jax.process_index() == 0:\n # save config\n config_path = tf.io.gfile.join(checkpoint_path, \"config.json\")\n if not tf.io.gfile.exists(config_path):\n with tf.io.gfile.GFile(config_path, \"w\") as f:\n json.dump(self.config, f)\n\n # save example batch\n example_batch_path = tf.io.gfile.join(\n checkpoint_path, \"example_batch.msgpack\"\n )\n if not tf.io.gfile.exists(example_batch_path):\n with tf.io.gfile.GFile(example_batch_path, \"wb\") as f:\n f.write(flax.serialization.msgpack_serialize(self.example_batch))\n\n # save dataset statistics\n dataset_statistics_path = tf.io.gfile.join(\n checkpoint_path, \"dataset_statistics.json\"\n )\n if not tf.io.gfile.exists(dataset_statistics_path):\n with tf.io.gfile.GFile(dataset_statistics_path, \"w\") as f:\n json.dump(\n jax.tree_map(lambda x: x.tolist(), self.dataset_statistics),\n f,\n )\n\n @classmethod\n def from_config(\n cls,\n config: Config,\n example_batch: Data,\n text_processor: Optional[Any] = None,\n verbose: bool = False,\n rng: Optional[PRNGKey] = None,\n dataset_statistics: Optional[Data] = None,\n ):\n \"\"\"Initializes a model with a fresh set of weights from a given config + example_batch.\n\n Args:\n config (Dict[str, Any]): Config dict. The only required key is \"model\", but other configuration\n may be saved for posterity.\n example_batch (Dict[str, Any]): Example batch.\n text_processor (Any, optional): Preprocessor for text inputs.\n verbose (bool, optional): Whether to print out a summary of the model.\n rng (Optional[PRNGKey], optional): RNG key for initializing the model.\n dataset_statistics (Optional[Dict[str, Any]], optional): Dataset statistics.\n \"\"\"\n module = OctoModule.create(**config[\"model\"])\n rng = rng if rng is not None else jax.random.PRNGKey(0)\n example_batch = multihost_utils.process_allgather(example_batch)\n example_batch = jax.tree_map(lambda x: x[:1], example_batch)\n\n init_args = (\n example_batch[\"observation\"],\n example_batch[\"task\"],\n example_batch[\"observation\"][\"pad_mask\"],\n )\n\n if verbose:\n print(\n module.tabulate(rng, *init_args, train=False, verbose=True, depth=2)\n ) # Prints out the parameter count of our model, and tokenizer details\n\n @jax.jit\n def _init(rng):\n return module.init(rng, *init_args, train=False)\n\n params = _init(rng)[\"params\"]\n\n return cls(\n module=module,\n params=params,\n text_processor=text_processor,\n example_batch=example_batch,\n config=config,\n dataset_statistics=dataset_statistics,\n )\n\n def get_pretty_spec(self):\n \"\"\"Brief summary of the model's expected inputs and outputs.\"\"\"\n # TODO: generalize this to print out proprio when it is being tokenized\n window_size = self.example_batch[\"observation\"][\"pad_mask\"].shape[1]\n\n observation_space = {\n k: (\"batch\", \"history_window\", *v.shape[2:])\n for k, v in self.example_batch[\"observation\"].items()\n if k.startswith(\"image\")\n }\n task_space = {\n k: (\"batch\", *v.shape[1:])\n for k, v in self.example_batch[\"task\"].items()\n if k.startswith(\"image\")\n }\n if self.text_processor is not None:\n task_space[\"language_instruction\"] = jax.tree_map(\n lambda arr: (\"batch\", *arr.shape[1:]),\n self.example_batch[\"task\"][\"language_instruction\"],\n )\n\n try:\n action_head = self.module.heads[\"action\"]\n action_head_repr = str(action_head.__class__)\n action_dim, pred_horizon = action_head.action_dim, action_head.pred_horizon\n except:\n action_head_repr, action_dim, pred_horizon = \"\", None, None\n\n return SPEC_TEMPLATE.format(\n window_size=window_size,\n observation_space=flax.core.pretty_repr(observation_space),\n task_space=flax.core.pretty_repr(task_space),\n action_head_repr=action_head_repr,\n action_dim=action_dim,\n pred_horizon=pred_horizon,\n )"
},
{
"identifier": "initialize_compilation_cache",
"path": "octo/utils/jax_utils.py",
"snippet": "def initialize_compilation_cache(\n cache_dir=os.path.expanduser(\"~/.jax_compilation_cache\"),\n):\n \"\"\"Initializes the Jax persistent compilation cache.\"\"\"\n compilation_cache.initialize_cache(cache_dir)\n for logger in [logging.getLogger(name) for name in logging.root.manager.loggerDict]:\n logger.addFilter(\n lambda record: \"Not writing persistent cache entry for\"\n not in record.getMessage()\n )"
},
{
"identifier": "ModuleSpec",
"path": "octo/utils/spec.py",
"snippet": "class ModuleSpec(TypedDict):\n \"\"\"A JSON-serializable representation of a function or class with some default args and kwargs to pass to\n it. Useful for specifying a particular class or function in a config file, while keeping it serializable\n and overridable from the command line using ml_collections.\n\n Usage:\n\n # Preferred way to create a spec:\n >>> from octo.model.components.transformer import Transformer\n >>> spec = ModuleSpec.create(Transformer, num_layers=3)\n # Same as above using the fully qualified import string:\n >>> spec = ModuleSpec.create(\"octo.model.components.transformer:Transformer\", num_layers=3)\n\n # Usage:\n >>> ModuleSpec.instantiate(spec) == partial(Transformer, num_layers=3)\n # can pass additional kwargs at instantiation time\n >>> transformer = ModuleSpec.instantiate(spec, num_heads=8)\n\n Note: ModuleSpec is just an alias for a dictionary (that is strongly typed), not a real class. So from\n your code's perspective, it is just a dictionary.\n\n module (str): The module the callable is located in\n name (str): The name of the callable in the module\n args (tuple): The args to pass to the callable\n kwargs (dict): The kwargs to pass to the callable\n \"\"\"\n\n module: str\n name: str\n args: Tuple[Any, ...]\n kwargs: Dict[str, Any]\n\n @staticmethod\n def create(callable_or_full_name: Union[str, callable], *args, **kwargs) -> \"ModuleSpec\": # type: ignore\n \"\"\"Create a module spec from a callable or import string.\n\n Args:\n callable_or_full_name (str or object): Either the object itself or a fully qualified import string\n (e.g. \"octo.model.components.transformer:Transformer\")\n args (tuple, optional): Passed into callable upon instantiation.\n kwargs (dict, optional): Passed into callable upon instantiation.\n \"\"\"\n if isinstance(callable_or_full_name, str):\n assert callable_or_full_name.count(\":\") == 1, (\n \"If passing in a string, it must be a fully qualified import string \"\n \"(e.g. 'octo.model.components.transformer:Transformer')\"\n )\n module, name = callable_or_full_name.split(\":\")\n else:\n module, name = _infer_full_name(callable_or_full_name)\n\n return ModuleSpec(module=module, name=name, args=args, kwargs=kwargs)\n\n @staticmethod\n def instantiate(spec: \"ModuleSpec\"): # type: ignore\n if set(spec.keys()) != {\"module\", \"name\", \"args\", \"kwargs\"}:\n raise ValueError(\n f\"Expected ModuleSpec, but got {spec}. \"\n \"ModuleSpec must have keys 'module', 'name', 'args', and 'kwargs'.\"\n )\n cls = _import_from_string(spec[\"module\"], spec[\"name\"])\n return partial(cls, *spec[\"args\"], **spec[\"kwargs\"])"
},
{
"identifier": "RolloutVisualizationCallback",
"path": "octo/utils/train_callbacks.py",
"snippet": "class RolloutVisualizationCallback(Callback):\n visualizer_kwargs_list: Sequence[Mapping[str, Any]]\n text_processor: TextProcessor\n trajs_for_rollouts: int\n model_pred_horizon: int\n history_length: int\n modes_to_evaluate: str = (\"text_conditioned\", \"image_conditioned\")\n\n def __post_init__(self):\n self.zero_text = jax.tree_map(lambda x: x[0], self.text_processor.encode(\"\"))\n\n self.rollout_visualizers = [\n RolloutVisualizer(\n text_processor=self.text_processor,\n history_length=self.history_length,\n action_chunk=self.model_pred_horizon\n if \"pred_horizon\" not in kwargs\n else kwargs[\"pred_horizon\"],\n **kwargs,\n )\n for kwargs in self.visualizer_kwargs_list\n ]\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n modal_policy_fns = {\n mode: partial(\n get_policy_sampled_actions,\n train_state,\n zero_text=self.zero_text,\n samples_per_state=1,\n policy_mode=mode,\n )\n for mode in self.modes_to_evaluate\n }\n for rollout_visualizer in self.rollout_visualizers:\n for mode, policy_fn in modal_policy_fns.items():\n logging.info(f\"Running rollouts for {rollout_visualizer.env_name}\")\n rollout_infos = rollout_visualizer.run_rollouts(\n policy_fn, n_rollouts=self.trajs_for_rollouts\n )\n wandb_metrics[\n f\"rollouts_{rollout_visualizer.env_name}_chunk{rollout_visualizer.action_chunk}/{mode}\"\n ] = rollout_infos\n\n return wandb_metrics"
},
{
"identifier": "SaveCallback",
"path": "octo/utils/train_callbacks.py",
"snippet": "class SaveCallback(Callback):\n \"\"\"Callback that saves checkpoints to `save_dir`. If `save_dir` is None, does nothing.\"\"\"\n\n save_dir: Optional[str]\n\n def __post_init__(self):\n if self.save_dir is not None:\n if not self.save_dir.startswith(\"gs://\"):\n self.save_dir = os.path.abspath(self.save_dir)\n if jax.process_index() == 0:\n tf.io.gfile.makedirs(self.save_dir)\n logging.info(f\"Created {self.save_dir}\")\n # make checkpointers\n # only keep latest full TrainState\n self.state_checkpointer = orbax.checkpoint.CheckpointManager(\n tf.io.gfile.join(self.save_dir, \"state\"),\n orbax.checkpoint.PyTreeCheckpointer(),\n options=orbax.checkpoint.CheckpointManagerOptions(\n max_to_keep=1,\n ),\n )\n # keep every params checkpoint\n self.params_checkpointer = orbax.checkpoint.CheckpointManager(\n self.save_dir,\n orbax.checkpoint.PyTreeCheckpointer(),\n )\n\n def __call__(self, train_state: TrainState, step: int):\n if self.save_dir is not None:\n train_state.model.save_pretrained(\n step, checkpoint_manager=self.params_checkpointer\n )\n self.state_checkpointer.save(\n step,\n train_state,\n {\"save_args\": orbax_utils.save_args_from_target(train_state)},\n )"
},
{
"identifier": "ValidationCallback",
"path": "octo/utils/train_callbacks.py",
"snippet": "class ValidationCallback(Callback):\n loss_fn: Callable\n process_batch_fn: Callable[[Data], Data]\n text_processor: Optional[TextProcessor]\n val_dataset_kwargs_list: Sequence[Mapping[str, Any]]\n dataset_kwargs: Mapping[str, Any]\n val_shuffle_buffer_size: int\n num_val_batches: int\n modes_to_evaluate: Sequence[str] = (\"text_conditioned\", \"image_conditioned\")\n train: bool = False\n\n def __post_init__(self):\n if self.text_processor is not None:\n self.zero_text = jax.tree_map(\n lambda x: x[0], self.text_processor.encode(\"\")\n )\n self.val_iterators = {}\n for single_dataset_kwargs in self.val_dataset_kwargs_list:\n val_dataset = create_validation_dataset(\n single_dataset_kwargs,\n self.dataset_kwargs[\"traj_transform_kwargs\"],\n self.dataset_kwargs[\"frame_transform_kwargs\"],\n train=self.train,\n )\n val_iterator = (\n val_dataset.unbatch()\n .shuffle(self.val_shuffle_buffer_size)\n .repeat()\n .batch(self.dataset_kwargs[\"batch_size\"])\n .iterator(prefetch=0)\n )\n val_iterator = map(self.process_batch_fn, val_iterator)\n self.val_iterators[single_dataset_kwargs[\"name\"]] = val_iterator\n\n @partial(\n jax.jit,\n out_shardings=jax.sharding.PositionalSharding(jax.devices()).replicate(),\n )\n def eval_step(state: TrainState, batch: Data):\n loss_fn_partial = partial(\n self.loss_fn,\n params=state.model.params,\n rng=state.rng,\n train=False,\n )\n all_tasks = {}\n\n if \"base\" in self.modes_to_evaluate:\n all_tasks[\"base\"] = batch[\"task\"]\n if \"image_conditioned\" in self.modes_to_evaluate:\n all_tasks[\"image_conditioned\"] = remove_text(\n batch[\"task\"], self.zero_text\n )\n if \"text_conditioned\" in self.modes_to_evaluate:\n all_tasks[\"text_conditioned\"] = remove_images(batch[\"task\"])\n\n if \"unconditioned\" in self.modes_to_evaluate:\n all_tasks[\"unconditioned\"] = remove_text(\n remove_images(batch[\"task\"]), self.zero_text\n )\n return {\n k: loss_fn_partial(batch=flax.core.copy(batch, {\"task\": tasks}))[1]\n for k, tasks in all_tasks.items()\n }\n\n self.eval_step = eval_step\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n for name, val_data_iter in self.val_iterators.items():\n metrics = []\n for _, batch in tqdm.tqdm(\n zip(range(self.num_val_batches), val_data_iter),\n total=self.num_val_batches,\n desc=name,\n ):\n metrics.append(self.eval_step(train_state, batch))\n metrics = jax.tree_map(lambda *xs: np.mean(xs), *metrics)\n wandb_metrics[f\"validation_{name}\"] = metrics\n return wandb_metrics"
},
{
"identifier": "VisualizationCallback",
"path": "octo/utils/train_callbacks.py",
"snippet": "class VisualizationCallback(Callback):\n text_processor: TextProcessor\n val_dataset_kwargs_list: Sequence[Mapping[str, Any]]\n dataset_kwargs: Mapping[str, Any]\n eval_batch_size: int\n trajs_for_metrics: int\n trajs_for_viz: int\n samples_per_state: int\n modes_to_evaluate: str = (\"text_conditioned\", \"image_conditioned\")\n train: bool = False\n\n def __post_init__(self):\n self.zero_text = jax.tree_map(lambda x: x[0], self.text_processor.encode(\"\"))\n\n self.visualizers = {}\n for single_dataset_kwargs in self.val_dataset_kwargs_list:\n val_dataset = create_validation_dataset(\n single_dataset_kwargs,\n self.dataset_kwargs[\"traj_transform_kwargs\"],\n self.dataset_kwargs[\"frame_transform_kwargs\"],\n train=self.train,\n )\n self.visualizers[single_dataset_kwargs[\"name\"]] = Visualizer(\n val_dataset, text_processor=self.text_processor, freeze_trajs=False\n )\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n modal_policy_fns = {\n mode: batched_apply(\n partial(\n get_policy_sampled_actions,\n train_state,\n zero_text=self.zero_text,\n samples_per_state=self.samples_per_state,\n policy_mode=mode,\n ),\n self.eval_batch_size,\n )\n for mode in self.modes_to_evaluate\n }\n\n for name, visualizer in self.visualizers.items():\n for mode, policy_fn in modal_policy_fns.items():\n if self.trajs_for_metrics > 0:\n raw_infos = visualizer.raw_evaluations(\n policy_fn, max_trajs=self.trajs_for_metrics\n )\n metrics = visualizer.metrics_for_wandb(raw_infos)\n wandb_metrics[f\"offline_metrics_{name}/{mode}\"] = metrics\n if self.trajs_for_viz > 0:\n images = visualizer.visualize_for_wandb(\n policy_fn, max_trajs=self.trajs_for_viz\n )\n wandb_metrics[f\"visualizations_{name}/{mode}\"] = images\n return wandb_metrics"
},
{
"identifier": "check_config_diff",
"path": "octo/utils/train_utils.py",
"snippet": "def check_config_diff(new_conf: Config, old_conf: Config, silent: bool = False):\n \"\"\"Checks for differences between new config and old config dicts.\"\"\"\n new_conf_flat = flax.traverse_util.flatten_dict(\n new_conf.to_dict() if isinstance(new_conf, ConfigDict) else new_conf\n )\n old_conf_flat = flax.traverse_util.flatten_dict(\n old_conf.to_dict() if isinstance(old_conf, ConfigDict) else old_conf\n )\n\n # check for missing / new keys\n if set(new_conf_flat.keys()) != set(old_conf_flat.keys()) and not silent:\n logging.info(\n \"New config contains extra items: %s\",\n set(new_conf_flat.keys()) - set(old_conf_flat.keys()),\n )\n logging.info(\n \"New config doesn't contain items: %s\",\n set(old_conf_flat.keys()) - set(new_conf_flat.keys()),\n )\n\n # print differing key values\n mismatched_keys = {\n k: (new_conf_flat[k], old_conf_flat[k])\n for k in new_conf_flat\n if k in old_conf_flat and new_conf_flat[k] != old_conf_flat[k]\n }\n if mismatched_keys and not silent:\n logging.info(\n \"New config contains keys with new values: %s\",\n flax.core.pretty_repr(mismatched_keys),\n )\n return mismatched_keys or (set(new_conf_flat.keys()) != set(old_conf_flat.keys()))"
},
{
"identifier": "create_optimizer",
"path": "octo/utils/train_utils.py",
"snippet": "def create_optimizer(\n params_or_params_shape: Params, **kwargs: dict\n) -> optax.GradientTransformation:\n \"\"\"Creates optimizer for Octo.\n\n kwargs are the kwargs for optax.adamw; if the \"learning_rate\" key is a dict, it is interpreted\n as the kwargs for create_lr_schedule (see above), otherwise it is interpreted as a constant\n learning rate.\n\n If clip_gradient is specified, then gradient clipping is applied. If frozen_keys is specified,\n then those parameters are frozen (i.e. not updated) during training.\n\n Returns:\n tx: an Optax optimizer\n lr_callable: Function that takes the current step and returns the learning rate\n \"\"\"\n if isinstance(kwargs[\"learning_rate\"], dict):\n lr_callable = create_lr_schedule(**kwargs[\"learning_rate\"])\n else:\n lr_callable = lambda _: kwargs[\"learning_rate\"]\n kwargs[\"learning_rate\"] = lr_callable\n\n # Following ViT, timm, MAE: this mask skips weight decay on biases and LayerNorm parameters\n wd_mask = jax.tree_util.tree_map_with_path(\n lambda path, x: \"kernel\" in jax.tree_util.keystr(path), params_or_params_shape\n )\n\n clip_gradient = kwargs.pop(\"clip_gradient\", None)\n frozen_keys = kwargs.pop(\"frozen_keys\", None)\n grad_accumulation_steps = kwargs.pop(\"grad_accumulation_steps\", None)\n\n tx = optax.adamw(mu_dtype=jnp.bfloat16, **kwargs, mask=wd_mask)\n if grad_accumulation_steps:\n tx = optax.MultiSteps(tx, grad_accumulation_steps)\n if clip_gradient is not None:\n tx = optax.chain(\n optax.clip_by_global_norm(clip_gradient),\n tx,\n )\n\n if frozen_keys:\n tx, param_partitions = freeze_weights(\n tx, params_or_params_shape, frozen_keys, return_partitions=True\n )\n zero_frozen_params = lambda params: jax.tree_map(\n lambda x, y: x if y == \"trainable\" else jnp.zeros(()),\n params,\n param_partitions,\n )\n param_norm_callable = lambda params: optax.global_norm(\n zero_frozen_params(params)\n )\n else:\n param_norm_callable = optax.global_norm\n\n return tx, lr_callable, param_norm_callable"
},
{
"identifier": "format_name_with_config",
"path": "octo/utils/train_utils.py",
"snippet": "def format_name_with_config(name, config):\n \"\"\"Formats a name string with a config dict.\n\n Formatting keys may be specified as {key} or {full_path_to_key_with_underscores}.\n\n Example:\n name = \"model_{model_type}_{model_size}\"\n config = {\"model_type\": \"transformer\", \"model_size\": \"small\"}\n format_name_with_config(name, config) -> \"model_transformer_small\"\n \"\"\"\n config_flat = flax.traverse_util.flatten_dict(config, sep=\"_\")\n config_final = {k.split(\"_\")[-1]: v for k, v in config_flat.items()}\n format_dict = {**config_final, **config_flat}\n return name.format(**format_dict)"
},
{
"identifier": "merge_params",
"path": "octo/utils/train_utils.py",
"snippet": "def merge_params(target_params: Params, pretrained_params: Params) -> Params:\n \"\"\"Copies pre-trained params into target_params for every param that has corresponding key + shape.\"\"\"\n flat_target_params = flax.traverse_util.flatten_dict(target_params)\n flat_pretrained_params = flax.traverse_util.flatten_dict(pretrained_params)\n keys_to_update = [\n k\n for k in flat_target_params\n if k in flat_pretrained_params\n and flat_target_params[k].shape == flat_pretrained_params[k].shape\n ]\n missing_keys = [k for k in flat_target_params if k not in flat_pretrained_params]\n shape_mismatch_keys = [\n k\n for k in flat_target_params\n if k in flat_pretrained_params\n and flat_target_params[k].shape != flat_pretrained_params[k].shape\n ]\n\n for key in keys_to_update:\n logging.debug(f\"Param copied from pre-trained: {'.'.join(key)}\")\n if missing_keys or shape_mismatch_keys:\n logging.info(\"########## Parameters skipped during model loading: ##########\")\n for key in missing_keys:\n logging.info(\n f\"Param missing in pre-trained model, skipping: {'.'.join(key)}\"\n )\n for key in shape_mismatch_keys:\n logging.info(\n f\"Param with differing shape in pre-trained model, skipping: {'.'.join(key)}\"\n )\n\n flat_target_params = flax.core.copy(\n flat_target_params, {k: flat_pretrained_params[k] for k in keys_to_update}\n )\n target_params = flax.traverse_util.unflatten_dict(flat_target_params)\n return target_params"
},
{
"identifier": "process_text",
"path": "octo/utils/train_utils.py",
"snippet": "def process_text(batch: Data, text_processor: Optional[TextProcessor]) -> Data:\n \"\"\"Encodes the language instruction inside the tasks for a batch.\n\n If the text processor is None, removes language entirely from the tasks.\n Expects batch to be a nested dictionary, where\n batch[\"task\"][\"language_instruction\"] is a sequence of byte strings\n \"\"\"\n if text_processor is None:\n batch[\"task\"].pop(\"language_instruction\")\n else:\n batch[\"task\"][\"language_instruction\"] = text_processor.encode(\n [s.decode(\"utf-8\") for s in batch[\"task\"][\"language_instruction\"]]\n )\n return batch"
},
{
"identifier": "Timer",
"path": "octo/utils/train_utils.py",
"snippet": "class Timer:\n \"\"\"\n Timer utility. Usage:\n\n timer = Timer()\n with timer(\"foo\"):\n do_something()\n\n timer.tick(\"bar\")\n do_something_else()\n timer.tock(\"bar\")\n\n timer.get_average_times() -> {\"foo\": 0.1, \"bar\": 0.2}\n \"\"\"\n\n def __init__(self):\n self.reset()\n\n @contextmanager\n def __call__(self, key):\n self.tick(key)\n try:\n yield None\n finally:\n self.tock(key)\n\n def reset(self):\n self.counts = defaultdict(int)\n self.times = defaultdict(float)\n self.start_times = {}\n\n def tick(self, key):\n if key in self.start_times:\n raise ValueError(f\"Timer is already ticking for key: {key}\")\n self.start_times[key] = time.time()\n\n def tock(self, key):\n if key not in self.start_times:\n raise ValueError(f\"Timer is not ticking for key: {key}\")\n self.counts[key] += 1\n self.times[key] += time.time() - self.start_times[key]\n del self.start_times[key]\n\n def get_average_times(self, reset=True):\n ret = {key: self.times[key] / self.counts[key] for key in self.counts}\n if reset:\n self.reset()\n return ret"
},
{
"identifier": "TrainState",
"path": "octo/utils/train_utils.py",
"snippet": "class TrainState:\n rng: PRNGKey\n model: OctoModel\n step: int\n opt_state: optax.OptState\n tx: optax.GradientTransformation = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n rng: PRNGKey,\n model: OctoModel,\n tx: optax.GradientTransformation,\n ):\n opt_state = tx.init(model.params)\n return cls(\n rng=rng,\n model=model,\n step=0,\n opt_state=opt_state,\n tx=tx,\n )\n\n def apply_gradients(self, *, grads, rng):\n updates, new_opt_state = self.tx.update(\n grads, self.opt_state, self.model.params\n )\n new_params = optax.apply_updates(self.model.params, updates)\n\n return self.replace(\n step=self.step + 1,\n model=self.model.replace(params=new_params),\n opt_state=new_opt_state,\n rng=rng,\n )"
}
] | import datetime
import imp
import os
import flax
import jax
import optax
import tensorflow as tf
import tqdm
import wandb
from functools import partial
from absl import app, flags, logging
from flax.traverse_util import flatten_dict
from jax.sharding import Mesh, NamedSharding, PartitionSpec
from ml_collections import config_flags, ConfigDict
from octo.data.dataset import make_single_dataset
from octo.model.octo_model import OctoModel
from octo.utils.jax_utils import initialize_compilation_cache
from octo.utils.spec import ModuleSpec
from octo.utils.train_callbacks import (
RolloutVisualizationCallback,
SaveCallback,
ValidationCallback,
VisualizationCallback,
)
from octo.utils.train_utils import (
check_config_diff,
create_optimizer,
format_name_with_config,
merge_params,
process_text,
Timer,
TrainState,
)
from jax_smi import initialise_tracking # type: ignore | 10,139 |
try:
initialise_tracking()
except ImportError:
pass
FLAGS = flags.FLAGS
flags.DEFINE_string("name", "experiment", "Experiment name.")
flags.DEFINE_bool("debug", False, "Debug config (no wandb logging)")
default_config_file = os.path.join(
os.path.dirname(__file__), "configs/finetune_config.py"
)
config_flags.DEFINE_config_file(
"config",
default_config_file,
"File path to the training hyperparameter configuration.",
lock_config=False,
)
def main(_):
initialize_compilation_cache()
devices = jax.devices()
logging.info(
f"""
Octo Finetuning Script
======================
Pretrained model: {FLAGS.config.pretrained_path}
Finetuning Dataset: {FLAGS.config.dataset_kwargs.name}
Data dir: {FLAGS.config.dataset_kwargs.data_dir}
Task Modality: {FLAGS.config.modality}
Finetuning Mode: {FLAGS.config.finetuning_mode}
# Devices: {jax.device_count()}
Batch size: {FLAGS.config.batch_size} ({FLAGS.config.batch_size // len(devices) } per device)
# Steps: {FLAGS.config.num_steps}
"""
)
#########
#
# Setup Jax Data Parallelism
#
#########
assert (
FLAGS.config.batch_size % len(devices) == 0
), f"Batch size ({FLAGS.config.batch_size}) must be divisible by the number of devices ({len(devices)})"
assert (
FLAGS.config.viz_kwargs.eval_batch_size % len(devices) == 0
), f"Eval batch size ({FLAGS.config.viz_kwargs.eval_batch_size}) must be divisible by the number of devices ({len(devices)})"
# create a 1D mesh with a single axis named "batch"
mesh = Mesh(jax.devices(), axis_names="batch")
# Our batches will be data-parallel sharded -- each device will get a slice of the batch
dp_sharding = NamedSharding(mesh, PartitionSpec("batch"))
# Our model will be replicated across devices (we are only doing data parallelism, not model parallelism)
replicated_sharding = NamedSharding(mesh, PartitionSpec())
# prevent tensorflow from using GPU memory since it's only used for data loading
tf.config.set_visible_devices([], "GPU")
#########
#
# Setup WandB
#
#########
name = format_name_with_config(
FLAGS.name,
FLAGS.config.to_dict(),
)
wandb_id = "{name}_{time}".format(
name=name,
time=datetime.datetime.now().strftime("%Y%m%d_%H%M%S"),
)
wandb.init(
config=FLAGS.config.to_dict(),
id=wandb_id,
name=name,
mode="disabled" if FLAGS.debug else None,
**FLAGS.config.wandb,
)
#########
#
# Load Pretrained model + optionally modify config
#
#########
pretrained_model = OctoModel.load_pretrained(
FLAGS.config.pretrained_path,
step=FLAGS.config.pretrained_step,
)
flat_config = flax.traverse_util.flatten_dict(
pretrained_model.config, keep_empty_nodes=True
)
for d_key in flax.traverse_util.flatten_dict(
FLAGS.config.get("config_delete_keys", ConfigDict()).to_dict()
):
for c_key in list(flat_config.keys()):
if ".".join(c_key).startswith(".".join(d_key)):
del flat_config[c_key]
config = ConfigDict(flax.traverse_util.unflatten_dict(flat_config))
config.update(FLAGS.config.get("update_config", ConfigDict()))
config = config.to_dict()
|
try:
initialise_tracking()
except ImportError:
pass
FLAGS = flags.FLAGS
flags.DEFINE_string("name", "experiment", "Experiment name.")
flags.DEFINE_bool("debug", False, "Debug config (no wandb logging)")
default_config_file = os.path.join(
os.path.dirname(__file__), "configs/finetune_config.py"
)
config_flags.DEFINE_config_file(
"config",
default_config_file,
"File path to the training hyperparameter configuration.",
lock_config=False,
)
def main(_):
initialize_compilation_cache()
devices = jax.devices()
logging.info(
f"""
Octo Finetuning Script
======================
Pretrained model: {FLAGS.config.pretrained_path}
Finetuning Dataset: {FLAGS.config.dataset_kwargs.name}
Data dir: {FLAGS.config.dataset_kwargs.data_dir}
Task Modality: {FLAGS.config.modality}
Finetuning Mode: {FLAGS.config.finetuning_mode}
# Devices: {jax.device_count()}
Batch size: {FLAGS.config.batch_size} ({FLAGS.config.batch_size // len(devices) } per device)
# Steps: {FLAGS.config.num_steps}
"""
)
#########
#
# Setup Jax Data Parallelism
#
#########
assert (
FLAGS.config.batch_size % len(devices) == 0
), f"Batch size ({FLAGS.config.batch_size}) must be divisible by the number of devices ({len(devices)})"
assert (
FLAGS.config.viz_kwargs.eval_batch_size % len(devices) == 0
), f"Eval batch size ({FLAGS.config.viz_kwargs.eval_batch_size}) must be divisible by the number of devices ({len(devices)})"
# create a 1D mesh with a single axis named "batch"
mesh = Mesh(jax.devices(), axis_names="batch")
# Our batches will be data-parallel sharded -- each device will get a slice of the batch
dp_sharding = NamedSharding(mesh, PartitionSpec("batch"))
# Our model will be replicated across devices (we are only doing data parallelism, not model parallelism)
replicated_sharding = NamedSharding(mesh, PartitionSpec())
# prevent tensorflow from using GPU memory since it's only used for data loading
tf.config.set_visible_devices([], "GPU")
#########
#
# Setup WandB
#
#########
name = format_name_with_config(
FLAGS.name,
FLAGS.config.to_dict(),
)
wandb_id = "{name}_{time}".format(
name=name,
time=datetime.datetime.now().strftime("%Y%m%d_%H%M%S"),
)
wandb.init(
config=FLAGS.config.to_dict(),
id=wandb_id,
name=name,
mode="disabled" if FLAGS.debug else None,
**FLAGS.config.wandb,
)
#########
#
# Load Pretrained model + optionally modify config
#
#########
pretrained_model = OctoModel.load_pretrained(
FLAGS.config.pretrained_path,
step=FLAGS.config.pretrained_step,
)
flat_config = flax.traverse_util.flatten_dict(
pretrained_model.config, keep_empty_nodes=True
)
for d_key in flax.traverse_util.flatten_dict(
FLAGS.config.get("config_delete_keys", ConfigDict()).to_dict()
):
for c_key in list(flat_config.keys()):
if ".".join(c_key).startswith(".".join(d_key)):
del flat_config[c_key]
config = ConfigDict(flax.traverse_util.unflatten_dict(flat_config))
config.update(FLAGS.config.get("update_config", ConfigDict()))
config = config.to_dict() | check_config_diff(config, pretrained_model.config) | 8 | 2023-12-13 09:58:56+00:00 | 12k |
modelscope/richdreamer | threestudio/systems/base.py | [
{
"identifier": "Exporter",
"path": "threestudio/models/exporters/base.py",
"snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n material: BaseMaterial,\n background: BaseBackground,\n ) -> None:\n @dataclass\n class SubModules:\n geometry: BaseImplicitGeometry\n material: BaseMaterial\n background: BaseBackground\n\n self.sub_modules = SubModules(geometry, material, background)\n\n @property\n def geometry(self) -> BaseImplicitGeometry:\n return self.sub_modules.geometry\n\n @property\n def material(self) -> BaseMaterial:\n return self.sub_modules.material\n\n @property\n def background(self) -> BaseBackground:\n return self.sub_modules.background\n\n def __call__(self, *args, **kwargs) -> List[ExporterOutput]:\n raise NotImplementedError"
},
{
"identifier": "ExporterOutput",
"path": "threestudio/models/exporters/base.py",
"snippet": "class ExporterOutput:\n save_name: str\n save_type: str\n params: Dict[str, Any]"
},
{
"identifier": "parse_optimizer",
"path": "threestudio/systems/utils.py",
"snippet": "def parse_optimizer(config, model):\n if hasattr(config, \"params\"):\n params = [\n {\"params\": get_parameters(model, name), \"name\": name, **args}\n for name, args in config.params.items()\n ]\n threestudio.debug(f\"Specify optimizer params: {config.params}\")\n else:\n params = model.parameters()\n if config.name in [\"FusedAdam\"]:\n import apex\n\n optim = getattr(apex.optimizers, config.name)(params, **config.args)\n elif config.name in [\"Adan\"]:\n from threestudio.systems import optimizers\n\n optim = getattr(optimizers, config.name)(params, **config.args)\n else:\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim"
},
{
"identifier": "parse_scheduler",
"path": "threestudio/systems/utils.py",
"snippet": "def parse_scheduler(config, optimizer):\n interval = config.get(\"interval\", \"epoch\")\n assert interval in [\"epoch\", \"step\"]\n if config.name == \"SequentialLR\":\n scheduler = {\n \"scheduler\": lr_scheduler.SequentialLR(\n optimizer,\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ],\n milestones=config.milestones,\n ),\n \"interval\": interval,\n }\n elif config.name == \"ChainedScheduler\":\n scheduler = {\n \"scheduler\": lr_scheduler.ChainedScheduler(\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ]\n ),\n \"interval\": interval,\n }\n else:\n scheduler = {\n \"scheduler\": get_scheduler(config.name)(optimizer, **config.args),\n \"interval\": interval,\n }\n return scheduler"
},
{
"identifier": "Updateable",
"path": "threestudio/utils/base.py",
"snippet": "class Updateable:\n def do_update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step(\n epoch, global_step, on_load_weights=on_load_weights\n )\n self.update_step(epoch, global_step, on_load_weights=on_load_weights)\n\n def do_update_step_end(self, epoch: int, global_step: int):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step_end(epoch, global_step)\n self.update_step_end(epoch, global_step)\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n # override this method to implement custom update logic\n # if on_load_weights is True, you should be careful doing things related to model evaluations,\n # as the models and tensors are not guarenteed to be on the same device\n pass\n\n def update_step_end(self, epoch: int, global_step: int):\n pass"
},
{
"identifier": "update_end_if_possible",
"path": "threestudio/utils/base.py",
"snippet": "def update_end_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step_end(epoch, global_step)"
},
{
"identifier": "update_if_possible",
"path": "threestudio/utils/base.py",
"snippet": "def update_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step(epoch, global_step)"
},
{
"identifier": "parse_structured",
"path": "threestudio/utils/config.py",
"snippet": "def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]] = None) -> Any:\n scfg = OmegaConf.structured(fields(**cfg))\n return scfg"
},
{
"identifier": "C",
"path": "threestudio/utils/misc.py",
"snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value"
},
{
"identifier": "cleanup",
"path": "threestudio/utils/misc.py",
"snippet": "def cleanup():\n gc.collect()\n torch.cuda.empty_cache()\n tcnn.free_temporary_memory()"
},
{
"identifier": "get_device",
"path": "threestudio/utils/misc.py",
"snippet": "def get_device():\n return torch.device(f\"cuda:{get_rank()}\")"
},
{
"identifier": "load_module_weights",
"path": "threestudio/utils/misc.py",
"snippet": "def load_module_weights(\n path, module_name=None, ignore_modules=None, map_location=None\n) -> Tuple[dict, int, int]:\n if module_name is not None and ignore_modules is not None:\n raise ValueError(\"module_name and ignore_modules cannot be both set\")\n if map_location is None:\n map_location = get_device()\n\n ckpt = torch.load(path, map_location=map_location)\n state_dict = ckpt[\"state_dict\"]\n state_dict_to_load = state_dict\n\n if ignore_modules is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n ignore = any(\n [k.startswith(ignore_module + \".\") for ignore_module in ignore_modules]\n )\n if ignore:\n continue\n state_dict_to_load[k] = v\n\n if module_name is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n m = re.match(rf\"^{module_name}\\.(.*)$\", k)\n if m is None:\n continue\n state_dict_to_load[m.group(1)] = v\n\n return state_dict_to_load, ckpt[\"epoch\"], ckpt[\"global_step\"]"
},
{
"identifier": "SaverMixin",
"path": "threestudio/utils/saving.py",
"snippet": "class SaverMixin:\n _save_dir: Optional[str] = None\n _exp_root_save_dir: Optional[str] = None\n _wandb_logger: Optional[WandbLogger] = None\n\n def set_save_dir(self, save_dir: str):\n self._save_dir = save_dir\n\n def set_exp_root_dir(self, exp_root_dir: str):\n self._exp_root_save_dir = exp_root_dir\n\n def get_save_dir(self):\n if self._save_dir is None:\n raise ValueError(\"Save dir is not set\")\n return self._save_dir\n\n def get_exp_root_dir(self):\n if self._exp_root_save_dir is None:\n raise ValueError(\"exp root save dir dir is not set\")\n return self._exp_root_save_dir\n\n\n def convert_data(self, data):\n if data is None:\n return None\n elif isinstance(data, np.ndarray):\n return data\n elif isinstance(data, torch.Tensor):\n return data.detach().cpu().numpy()\n elif isinstance(data, list):\n return [self.convert_data(d) for d in data]\n elif isinstance(data, dict):\n return {k: self.convert_data(v) for k, v in data.items()}\n else:\n raise TypeError(\n \"Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting\",\n type(data),\n )\n\n def get_save_path(self, filename):\n save_path = os.path.join(self.get_save_dir(), filename)\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n return save_path\n\n def create_loggers(self, cfg_loggers: DictConfig) -> None:\n if \"wandb\" in cfg_loggers.keys() and cfg_loggers.wandb.enable:\n self._wandb_logger = WandbLogger(\n project=cfg_loggers.wandb.project, name=cfg_loggers.wandb.name\n )\n\n def get_loggers(self) -> List:\n if self._wandb_logger:\n return [self._wandb_logger]\n else:\n return []\n\n DEFAULT_RGB_KWARGS = {\"data_format\": \"HWC\", \"data_range\": (0, 1)}\n DEFAULT_UV_KWARGS = {\n \"data_format\": \"HWC\",\n \"data_range\": (0, 1),\n \"cmap\": \"checkerboard\",\n }\n DEFAULT_GRAYSCALE_KWARGS = {\"data_range\": None, \"cmap\": \"jet\"}\n DEFAULT_GRID_KWARGS = {\"align\": \"max\"}\n\n def get_rgb_image_(self, img, data_format, data_range, rgba=False):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n if img.dtype != np.uint8:\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (\n (img - data_range[0]) / (data_range[1] - data_range[0]) * 255.0\n ).astype(np.uint8)\n nc = 4 if rgba else 3\n imgs = [img[..., start : start + nc] for start in range(0, img.shape[-1], nc)]\n imgs = [\n img_\n if img_.shape[-1] == nc\n else np.concatenate(\n [\n img_,\n np.zeros(\n (img_.shape[0], img_.shape[1], nc - img_.shape[2]),\n dtype=img_.dtype,\n ),\n ],\n axis=-1,\n )\n for img_ in imgs\n ]\n img = np.concatenate(imgs, axis=1)\n if rgba:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n else:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_rgb_image(\n self,\n filename,\n img,\n data_format,\n data_range,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_rgb_image_(img, data_format, data_range)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_rgb_image(\n self,\n filename,\n img,\n data_format=DEFAULT_RGB_KWARGS[\"data_format\"],\n data_range=DEFAULT_RGB_KWARGS[\"data_range\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_rgb_image(save_path, img, data_format, data_range, name, step)\n return save_path\n\n def get_uv_image_(self, img, data_format, data_range, cmap):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [\"checkerboard\", \"color\"]\n if cmap == \"checkerboard\":\n n_grid = 64\n mask = (img * n_grid).astype(int)\n mask = (mask[..., 0] + mask[..., 1]) % 2 == 0\n img = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 255\n img[mask] = np.array([255, 0, 255], dtype=np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif cmap == \"color\":\n img_ = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n img_[..., 0] = (img[..., 0] * 255).astype(np.uint8)\n img_[..., 1] = (img[..., 1] * 255).astype(np.uint8)\n img_ = cv2.cvtColor(img_, cv2.COLOR_RGB2BGR)\n img = img_\n return img\n\n def save_uv_image(\n self,\n filename,\n img,\n data_format=DEFAULT_UV_KWARGS[\"data_format\"],\n data_range=DEFAULT_UV_KWARGS[\"data_range\"],\n cmap=DEFAULT_UV_KWARGS[\"cmap\"],\n ) -> str:\n save_path = self.get_save_path(filename)\n img = self.get_uv_image_(img, data_format, data_range, cmap)\n cv2.imwrite(save_path, img)\n return save_path\n\n def get_grayscale_image_(self, img, data_range, cmap):\n img = self.convert_data(img)\n img = np.nan_to_num(img)\n if data_range is None:\n img = (img - img.min()) / (img.max() - img.min())\n else:\n img = img.clip(data_range[0], data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [None, \"jet\", \"magma\", \"spectral\"]\n if cmap == None:\n img = (img * 255.0).astype(np.uint8)\n img = np.repeat(img[..., None], 3, axis=2)\n elif cmap == \"jet\":\n img = (img * 255.0).astype(np.uint8)\n img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n elif cmap == \"magma\":\n img = 1.0 - img\n base = cm.get_cmap(\"magma\")\n num_bins = 256\n colormap = LinearSegmentedColormap.from_list(\n f\"{base.name}{num_bins}\", base(np.linspace(0, 1, num_bins)), num_bins\n )(np.linspace(0, 1, num_bins))[:, :3]\n a = np.floor(img * 255.0)\n b = (a + 1).clip(max=255.0)\n f = img * 255.0 - a\n a = a.astype(np.uint16).clip(0, 255)\n b = b.astype(np.uint16).clip(0, 255)\n img = colormap[a] + (colormap[b] - colormap[a]) * f[..., None]\n img = (img * 255.0).astype(np.uint8)\n elif cmap == \"spectral\":\n colormap = plt.get_cmap(\"Spectral\")\n\n def blend_rgba(image):\n image = image[..., :3] * image[..., -1:] + (\n 1.0 - image[..., -1:]\n ) # blend A to RGB\n return image\n\n img = colormap(img)\n img = blend_rgba(img)\n img = (img * 255).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_grayscale_image(\n self,\n filename,\n img,\n data_range,\n cmap,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_grayscale_image_(img, data_range, cmap)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_grayscale_image(\n self,\n filename,\n img,\n data_range=DEFAULT_GRAYSCALE_KWARGS[\"data_range\"],\n cmap=DEFAULT_GRAYSCALE_KWARGS[\"cmap\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_grayscale_image(save_path, img, data_range, cmap, name, step)\n return save_path\n\n def get_image_grid_(self, imgs, align):\n if isinstance(imgs[0], list):\n return np.concatenate(\n [self.get_image_grid_(row, align) for row in imgs], axis=0\n )\n cols = []\n for col in imgs:\n assert col[\"type\"] in [\"rgb\", \"uv\", \"grayscale\"]\n if col[\"type\"] == \"rgb\":\n rgb_kwargs = self.DEFAULT_RGB_KWARGS.copy()\n rgb_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_rgb_image_(col[\"img\"], **rgb_kwargs))\n elif col[\"type\"] == \"uv\":\n uv_kwargs = self.DEFAULT_UV_KWARGS.copy()\n uv_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_uv_image_(col[\"img\"], **uv_kwargs))\n elif col[\"type\"] == \"grayscale\":\n grayscale_kwargs = self.DEFAULT_GRAYSCALE_KWARGS.copy()\n grayscale_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_grayscale_image_(col[\"img\"], **grayscale_kwargs))\n\n if align == \"max\":\n h = max([col.shape[0] for col in cols])\n w = max([col.shape[1] for col in cols])\n elif align == \"min\":\n h = min([col.shape[0] for col in cols])\n w = min([col.shape[1] for col in cols])\n elif isinstance(align, int):\n h = align\n w = align\n elif (\n isinstance(align, tuple)\n and isinstance(align[0], int)\n and isinstance(align[1], int)\n ):\n h, w = align\n else:\n raise ValueError(\n f\"Unsupported image grid align: {align}, should be min, max, int or (int, int)\"\n )\n\n for i in range(len(cols)):\n if cols[i].shape[0] != h or cols[i].shape[1] != w:\n cols[i] = cv2.resize(cols[i], (w, h), interpolation=cv2.INTER_LINEAR)\n return np.concatenate(cols, axis=1)\n\n def save_image_grid(\n self,\n filename,\n imgs,\n align=DEFAULT_GRID_KWARGS[\"align\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n texts: Optional[List[float]] = None,\n ):\n save_path = self.get_save_path(filename)\n img = self.get_image_grid_(imgs, align=align)\n\n if texts is not None:\n img = Image.fromarray(img)\n draw = ImageDraw.Draw(img)\n black, white = (0, 0, 0), (255, 255, 255)\n for i, text in enumerate(texts):\n draw.text((2, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((2, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((1, (img.size[1] // len(texts)) * i), f\"{text}\", black)\n img = np.asarray(img)\n\n cv2.imwrite(save_path, img)\n if name and self._wandb_logger:\n wandb.log({name: wandb.Image(save_path), \"trainer/global_step\": step})\n\n save_vis_path = os.path.join(self.get_exp_root_dir(), \"vis.jpg\")\n os.makedirs(os.path.dirname(save_vis_path), exist_ok=True)\n cv2.imwrite(save_vis_path, img)\n return save_path\n\n def save_image(self, filename, img) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.dtype == np.uint8 or img.dtype == np.uint16\n if img.ndim == 3 and img.shape[-1] == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif img.ndim == 3 and img.shape[-1] == 4:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n cv2.imwrite(save_path, img)\n return save_path\n\n def save_img_ffmpeg(self, save_path, fps=30, *img_handler_list):\n # fix the bug the string has \"'s\"\n command = 'ffmpeg -r {} -pattern_type glob -i \"{}\" -vcodec libx264 -crf 18 -vf \"pad=ceil(iw/2)*2:ceil(ih/2)*2\" -pix_fmt yuv420p {}'\n ffmpeg_list = []\n\n for handler_idx, img_handler in enumerate(img_handler_list):\n img_dir = os.path.join(self.get_save_dir(), img_handler, \"*.png\")\n\n tmp_file_name = os.path.join(\n self.get_save_dir(), \"tmp_{:04d}.mp4\".format(handler_idx)\n ).replace(\"'\", \"\\\\'\")\n img_dir = img_dir.replace(\"'\", \"\\\\'\")\n cmd = command.format(fps, img_dir, tmp_file_name)\n\n os.system(\"rm -rf {}\".format(tmp_file_name))\n\n os.system(cmd)\n ffmpeg_list.append(tmp_file_name)\n\n save_path = os.path.join(self.get_save_dir(), save_path).replace(\"'\", \"\\\\'\")\n\n os.system(\"rm -rf {}\".format(save_path))\n cmd = 'ffmpeg -i {} -i {} -filter_complex \"[0:v][1:v]concat=n=2:v=1:a=0\" -c:v libx264 -crf 23 -preset veryfast -c:a aac -b:a 128k {}'.format(\n ffmpeg_list[0], ffmpeg_list[1], save_path\n )\n\n os.system(cmd)\n\n def save_cubemap(self, filename, img, data_range=(0, 1), rgba=False) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.ndim == 4 and img.shape[0] == 6 and img.shape[1] == img.shape[2]\n\n imgs_full = []\n for start in range(0, img.shape[-1], 3):\n img_ = img[..., start : start + 3]\n img_ = np.stack(\n [\n self.get_rgb_image_(img_[i], \"HWC\", data_range, rgba=rgba)\n for i in range(img_.shape[0])\n ],\n axis=0,\n )\n size = img_.shape[1]\n placeholder = np.zeros((size, size, 3), dtype=np.float32)\n img_full = np.concatenate(\n [\n np.concatenate(\n [placeholder, img_[2], placeholder, placeholder], axis=1\n ),\n np.concatenate([img_[1], img_[4], img_[0], img_[5]], axis=1),\n np.concatenate(\n [placeholder, img_[3], placeholder, placeholder], axis=1\n ),\n ],\n axis=0,\n )\n imgs_full.append(img_full)\n\n imgs_full = np.concatenate(imgs_full, axis=1)\n cv2.imwrite(save_path, imgs_full)\n return save_path\n\n def save_data(self, filename, data) -> str:\n data = self.convert_data(data)\n if isinstance(data, dict):\n if not filename.endswith(\".npz\"):\n filename += \".npz\"\n save_path = self.get_save_path(filename)\n np.savez(save_path, **data)\n else:\n if not filename.endswith(\".npy\"):\n filename += \".npy\"\n save_path = self.get_save_path(filename)\n np.save(save_path, data)\n return save_path\n\n def save_state_dict(self, filename, data) -> str:\n save_path = self.get_save_path(filename)\n torch.save(data, save_path)\n return save_path\n\n def save_img_sequence(\n self,\n filename,\n img_dir,\n matcher,\n save_format=\"mp4\",\n fps=30,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n assert save_format in [\"gif\", \"mp4\"]\n if not filename.endswith(save_format):\n filename += f\".{save_format}\"\n save_path = self.get_save_path(filename)\n matcher = re.compile(matcher)\n img_dir = os.path.join(self.get_save_dir(), img_dir)\n imgs = []\n for f in os.listdir(img_dir):\n if matcher.search(f):\n imgs.append(f)\n imgs = sorted(imgs, key=lambda f: int(matcher.search(f).groups()[0]))\n imgs = [cv2.imread(os.path.join(img_dir, f)) for f in imgs]\n\n if save_format == \"gif\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps, palettesize=256)\n elif save_format == \"mp4\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Video(save_path, format=\"mp4\"),\n \"trainer/global_step\": step,\n }\n )\n return save_path\n\n def save_mesh(self, filename, v_pos, t_pos_idx, v_tex=None, t_tex_idx=None) -> str:\n save_path = self.get_save_path(filename)\n v_pos = self.convert_data(v_pos)\n t_pos_idx = self.convert_data(t_pos_idx)\n mesh = trimesh.Trimesh(vertices=v_pos, faces=t_pos_idx)\n mesh.export(save_path)\n return save_path\n\n def save_obj(\n self,\n filename: str,\n mesh: Mesh,\n save_mat: bool = False,\n save_normal: bool = False,\n save_uv: bool = False,\n save_vertex_color: bool = False,\n map_Kd: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Ks: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Bump: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Pm: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_Pr: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_format: str = \"jpg\",\n ) -> List[str]:\n save_paths: List[str] = []\n if not filename.endswith(\".obj\"):\n filename += \".obj\"\n v_pos, t_pos_idx = self.convert_data(mesh.v_pos), self.convert_data(\n mesh.t_pos_idx\n )\n v_nrm, v_tex, t_tex_idx, v_rgb = None, None, None, None\n if save_normal:\n v_nrm = self.convert_data(mesh.v_nrm)\n if save_uv:\n v_tex, t_tex_idx = self.convert_data(mesh.v_tex), self.convert_data(\n mesh.t_tex_idx\n )\n if save_vertex_color:\n v_rgb = self.convert_data(mesh.v_rgb)\n matname, mtllib = None, None\n if save_mat:\n matname = \"default\"\n mtl_filename = filename.replace(\".obj\", \".mtl\")\n mtllib = os.path.basename(mtl_filename)\n mtl_save_paths = self._save_mtl(\n mtl_filename,\n matname,\n map_Kd=self.convert_data(map_Kd),\n map_Ks=self.convert_data(map_Ks),\n map_Bump=self.convert_data(map_Bump),\n map_Pm=self.convert_data(map_Pm),\n map_Pr=self.convert_data(map_Pr),\n map_format=map_format,\n )\n save_paths += mtl_save_paths\n obj_save_path = self._save_obj(\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=v_nrm,\n v_tex=v_tex,\n t_tex_idx=t_tex_idx,\n v_rgb=v_rgb,\n matname=matname,\n mtllib=mtllib,\n )\n save_fix_dir = os.path.dirname(obj_save_path) + \"_fix\"\n save_fix_obj_path = os.path.join(save_fix_dir, os.path.basename(obj_save_path))\n os.makedirs(save_fix_dir, exist_ok=True)\n fix_mesh(obj_save_path, save_fix_obj_path)\n save_paths.append(obj_save_path)\n return save_paths\n\n def _save_obj(\n self,\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=None,\n v_tex=None,\n t_tex_idx=None,\n v_rgb=None,\n matname=None,\n mtllib=None,\n ) -> str:\n obj_str = \"\"\n if matname is not None:\n obj_str += f\"mtllib {mtllib}\\n\"\n obj_str += f\"g object\\n\"\n obj_str += f\"usemtl {matname}\\n\"\n for i in range(len(v_pos)):\n obj_str += f\"v {v_pos[i][0]} {v_pos[i][1]} {v_pos[i][2]}\"\n if v_rgb is not None:\n obj_str += f\" {v_rgb[i][0]} {v_rgb[i][1]} {v_rgb[i][2]}\"\n obj_str += \"\\n\"\n if v_nrm is not None:\n for v in v_nrm:\n obj_str += f\"vn {v[0]} {v[1]} {v[2]}\\n\"\n if v_tex is not None:\n for v in v_tex:\n obj_str += f\"vt {v[0]} {1.0 - v[1]}\\n\"\n\n for i in range(len(t_pos_idx)):\n obj_str += \"f\"\n for j in range(3):\n obj_str += f\" {t_pos_idx[i][j] + 1}/\"\n if v_tex is not None:\n obj_str += f\"{t_tex_idx[i][j] + 1}\"\n obj_str += \"/\"\n if v_nrm is not None:\n obj_str += f\"{t_pos_idx[i][j] + 1}\"\n obj_str += \"\\n\"\n\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(obj_str)\n return save_path\n\n def _save_mtl(\n self,\n filename,\n matname,\n Ka=(0.0, 0.0, 0.0),\n Kd=(1.0, 1.0, 1.0),\n Ks=(0.0, 0.0, 0.0),\n map_Kd=None,\n map_Ks=None,\n map_Bump=None,\n map_Pm=None,\n map_Pr=None,\n map_format=\"jpg\",\n step: Optional[int] = None,\n ) -> List[str]:\n mtl_save_path = self.get_save_path(filename)\n save_paths = [mtl_save_path]\n mtl_str = f\"newmtl {matname}\\n\"\n mtl_str += f\"Ka {Ka[0]} {Ka[1]} {Ka[2]}\\n\"\n if map_Kd is not None:\n map_Kd_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_kd.{map_format}\"\n )\n mtl_str += f\"map_Kd texture_kd.{map_format}\\n\"\n self._save_rgb_image(\n map_Kd_save_path,\n map_Kd,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Kd\",\n step=step,\n )\n save_paths.append(map_Kd_save_path)\n else:\n mtl_str += f\"Kd {Kd[0]} {Kd[1]} {Kd[2]}\\n\"\n if map_Ks is not None:\n map_Ks_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_ks.{map_format}\"\n )\n mtl_str += f\"map_Ks texture_ks.{map_format}\\n\"\n self._save_rgb_image(\n map_Ks_save_path,\n map_Ks,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Ks\",\n step=step,\n )\n save_paths.append(map_Ks_save_path)\n else:\n mtl_str += f\"Ks {Ks[0]} {Ks[1]} {Ks[2]}\\n\"\n if map_Bump is not None:\n map_Bump_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_nrm.{map_format}\"\n )\n mtl_str += f\"map_Bump texture_nrm.{map_format}\\n\"\n self._save_rgb_image(\n map_Bump_save_path,\n map_Bump,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Bump\",\n step=step,\n )\n save_paths.append(map_Bump_save_path)\n if map_Pm is not None:\n map_Pm_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_metallic.{map_format}\"\n )\n mtl_str += f\"map_Pm texture_metallic.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pm_save_path,\n map_Pm,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_refl\",\n step=step,\n )\n save_paths.append(map_Pm_save_path)\n if map_Pr is not None:\n map_Pr_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_roughness.{map_format}\"\n )\n mtl_str += f\"map_Pr texture_roughness.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pr_save_path,\n map_Pr,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_Ns\",\n step=step,\n )\n save_paths.append(map_Pr_save_path)\n with open(self.get_save_path(filename), \"w\") as f:\n f.write(mtl_str)\n return save_paths\n\n def save_file(self, filename, src_path) -> str:\n save_path = self.get_save_path(filename)\n shutil.copyfile(src_path, save_path)\n return save_path\n\n def save_json(self, filename, payload) -> str:\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(json.dumps(payload))\n return save_path"
}
] | import os
import pytorch_lightning as pl
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from threestudio.models.exporters.base import Exporter, ExporterOutput
from threestudio.systems.utils import parse_optimizer, parse_scheduler
from threestudio.utils.base import (Updateable, update_end_if_possible,
update_if_possible,)
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import C, cleanup, get_device, load_module_weights
from threestudio.utils.saving import SaverMixin
from threestudio.utils.typing import *
from threestudio.utils.config import load_config, parse_structured | 10,184 |
def set_resume_status(self, current_epoch: int, global_step: int):
# restore correct epoch and global step in eval
self._resumed_eval = True
self._resumed_eval_status["current_epoch"] = current_epoch
self._resumed_eval_status["global_step"] = global_step
@property
def resumed(self):
# whether from resumed checkpoint
return self._resumed
@property
def true_global_step(self):
if self._resumed_eval:
return self._resumed_eval_status["global_step"]
else:
return self.global_step
@property
def true_current_epoch(self):
if self._resumed_eval:
return self._resumed_eval_status["current_epoch"]
else:
return self.current_epoch
def configure(self) -> None:
pass
def post_configure(self) -> None:
"""
executed after weights are loaded
"""
pass
def C(self, value: Any) -> float:
return C(value, self.true_current_epoch, self.true_global_step)
def configure_optimizers(self):
optim = parse_optimizer(self.cfg.optimizer, self)
ret = {
"optimizer": optim,
}
if self.cfg.scheduler is not None:
ret.update(
{
"lr_scheduler": parse_scheduler(self.cfg.scheduler, optim),
}
)
return ret
def training_step(self, batch, batch_idx):
raise NotImplementedError
def validation_step(self, batch, batch_idx):
raise NotImplementedError
def on_train_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.train_dataloader.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
def on_validation_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.val_dataloaders.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
if self.cfg.cleanup_after_validation_step:
# cleanup to save vram
cleanup()
def on_validation_epoch_end(self):
raise NotImplementedError
def test_step(self, batch, batch_idx):
raise NotImplementedError
def on_test_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.test_dataloaders.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_test_epoch_end(self):
pass
def predict_step(self, batch, batch_idx):
raise NotImplementedError
def on_predict_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.predict_dataloaders.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_predict_epoch_end(self):
pass
def preprocess_data(self, batch, stage):
pass
"""
Implementing on_after_batch_transfer of DataModule does the same.
But on_after_batch_transfer does not support DP.
"""
def on_train_batch_start(self, batch, batch_idx, unused=0):
self.preprocess_data(batch, "train")
self.dataset = self.trainer.train_dataloader.dataset
|
class BaseSystem(pl.LightningModule, Updateable, SaverMixin):
@dataclass
class Config:
loggers: dict = field(default_factory=dict)
loss: dict = field(default_factory=dict)
optimizer: dict = field(default_factory=dict)
scheduler: Optional[dict] = None
weights: Optional[str] = None
weights_ignore_modules: Optional[List[str]] = None
cleanup_after_validation_step: bool = False
cleanup_after_test_step: bool = False
cfg: Config
def __init__(self, cfg, resumed=False) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
self._save_dir: Optional[str] = None
self._resumed: bool = resumed
self._resumed_eval: bool = False
self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0}
if "loggers" in cfg:
self.create_loggers(cfg.loggers)
self.configure()
if self.cfg.weights is not None:
self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules)
self.post_configure()
def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None):
state_dict, epoch, global_step = load_module_weights(
weights, ignore_modules=ignore_modules, map_location="cpu"
)
self.load_state_dict(state_dict, strict=False)
# restore step-dependent states
self.do_update_step(epoch, global_step, on_load_weights=True)
def set_resume_status(self, current_epoch: int, global_step: int):
# restore correct epoch and global step in eval
self._resumed_eval = True
self._resumed_eval_status["current_epoch"] = current_epoch
self._resumed_eval_status["global_step"] = global_step
@property
def resumed(self):
# whether from resumed checkpoint
return self._resumed
@property
def true_global_step(self):
if self._resumed_eval:
return self._resumed_eval_status["global_step"]
else:
return self.global_step
@property
def true_current_epoch(self):
if self._resumed_eval:
return self._resumed_eval_status["current_epoch"]
else:
return self.current_epoch
def configure(self) -> None:
pass
def post_configure(self) -> None:
"""
executed after weights are loaded
"""
pass
def C(self, value: Any) -> float:
return C(value, self.true_current_epoch, self.true_global_step)
def configure_optimizers(self):
optim = parse_optimizer(self.cfg.optimizer, self)
ret = {
"optimizer": optim,
}
if self.cfg.scheduler is not None:
ret.update(
{
"lr_scheduler": parse_scheduler(self.cfg.scheduler, optim),
}
)
return ret
def training_step(self, batch, batch_idx):
raise NotImplementedError
def validation_step(self, batch, batch_idx):
raise NotImplementedError
def on_train_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.train_dataloader.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
def on_validation_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.val_dataloaders.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
if self.cfg.cleanup_after_validation_step:
# cleanup to save vram
cleanup()
def on_validation_epoch_end(self):
raise NotImplementedError
def test_step(self, batch, batch_idx):
raise NotImplementedError
def on_test_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.test_dataloaders.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_test_epoch_end(self):
pass
def predict_step(self, batch, batch_idx):
raise NotImplementedError
def on_predict_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.predict_dataloaders.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_predict_epoch_end(self):
pass
def preprocess_data(self, batch, stage):
pass
"""
Implementing on_after_batch_transfer of DataModule does the same.
But on_after_batch_transfer does not support DP.
"""
def on_train_batch_start(self, batch, batch_idx, unused=0):
self.preprocess_data(batch, "train")
self.dataset = self.trainer.train_dataloader.dataset | update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) | 6 | 2023-12-06 07:53:11+00:00 | 12k |
rehg-lab/RAVE | annotator/oneformer/detectron2/modeling/mmdet_wrapper.py | [
{
"identifier": "ShapeSpec",
"path": "annotator/oneformer/detectron2/layers/shape_spec.py",
"snippet": "class ShapeSpec:\r\n \"\"\"\r\n A simple structure that contains basic shape specification about a tensor.\r\n It is often used as the auxiliary inputs/outputs of models,\r\n to complement the lack of shape inference ability among pytorch modules.\r\n \"\"\"\r\n\r\n channels: Optional[int] = None\r\n height: Optional[int] = None\r\n width: Optional[int] = None\r\n stride: Optional[int] = None\r"
},
{
"identifier": "Boxes",
"path": "annotator/oneformer/detectron2/structures/boxes.py",
"snippet": "class Boxes:\r\n \"\"\"\r\n This structure stores a list of boxes as a Nx4 torch.Tensor.\r\n It supports some common methods about boxes\r\n (`area`, `clip`, `nonempty`, etc),\r\n and also behaves like a Tensor\r\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\r\n\r\n Attributes:\r\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\r\n \"\"\"\r\n\r\n def __init__(self, tensor: torch.Tensor):\r\n \"\"\"\r\n Args:\r\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\r\n \"\"\"\r\n if not isinstance(tensor, torch.Tensor):\r\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=torch.device(\"cpu\"))\r\n else:\r\n tensor = tensor.to(torch.float32)\r\n if tensor.numel() == 0:\r\n # Use reshape, so we don't end up creating a new tensor that does not depend on\r\n # the inputs (and consequently confuses jit)\r\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32)\r\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\r\n\r\n self.tensor = tensor\r\n\r\n def clone(self) -> \"Boxes\":\r\n \"\"\"\r\n Clone the Boxes.\r\n\r\n Returns:\r\n Boxes\r\n \"\"\"\r\n return Boxes(self.tensor.clone())\r\n\r\n def to(self, device: torch.device):\r\n # Boxes are assumed float32 and does not support to(dtype)\r\n return Boxes(self.tensor.to(device=device))\r\n\r\n def area(self) -> torch.Tensor:\r\n \"\"\"\r\n Computes the area of all the boxes.\r\n\r\n Returns:\r\n torch.Tensor: a vector with areas of each box.\r\n \"\"\"\r\n box = self.tensor\r\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\r\n return area\r\n\r\n def clip(self, box_size: Tuple[int, int]) -> None:\r\n \"\"\"\r\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\r\n and y coordinates to the range [0, height].\r\n\r\n Args:\r\n box_size (height, width): The clipping box's size.\r\n \"\"\"\r\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\r\n h, w = box_size\r\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\r\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\r\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\r\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\r\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\r\n\r\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\r\n \"\"\"\r\n Find boxes that are non-empty.\r\n A box is considered empty, if either of its side is no larger than threshold.\r\n\r\n Returns:\r\n Tensor:\r\n a binary vector which represents whether each box is empty\r\n (False) or non-empty (True).\r\n \"\"\"\r\n box = self.tensor\r\n widths = box[:, 2] - box[:, 0]\r\n heights = box[:, 3] - box[:, 1]\r\n keep = (widths > threshold) & (heights > threshold)\r\n return keep\r\n\r\n def __getitem__(self, item) -> \"Boxes\":\r\n \"\"\"\r\n Args:\r\n item: int, slice, or a BoolTensor\r\n\r\n Returns:\r\n Boxes: Create a new :class:`Boxes` by indexing.\r\n\r\n The following usage are allowed:\r\n\r\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\r\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\r\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\r\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\r\n\r\n Note that the returned Boxes might share storage with this Boxes,\r\n subject to Pytorch's indexing semantics.\r\n \"\"\"\r\n if isinstance(item, int):\r\n return Boxes(self.tensor[item].view(1, -1))\r\n b = self.tensor[item]\r\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\r\n return Boxes(b)\r\n\r\n def __len__(self) -> int:\r\n return self.tensor.shape[0]\r\n\r\n def __repr__(self) -> str:\r\n return \"Boxes(\" + str(self.tensor) + \")\"\r\n\r\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\r\n \"\"\"\r\n Args:\r\n box_size (height, width): Size of the reference box.\r\n boundary_threshold (int): Boxes that extend beyond the reference box\r\n boundary by more than boundary_threshold are considered \"outside\".\r\n\r\n Returns:\r\n a binary vector, indicating whether each box is inside the reference box.\r\n \"\"\"\r\n height, width = box_size\r\n inds_inside = (\r\n (self.tensor[..., 0] >= -boundary_threshold)\r\n & (self.tensor[..., 1] >= -boundary_threshold)\r\n & (self.tensor[..., 2] < width + boundary_threshold)\r\n & (self.tensor[..., 3] < height + boundary_threshold)\r\n )\r\n return inds_inside\r\n\r\n def get_centers(self) -> torch.Tensor:\r\n \"\"\"\r\n Returns:\r\n The box centers in a Nx2 array of (x, y).\r\n \"\"\"\r\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\r\n\r\n def scale(self, scale_x: float, scale_y: float) -> None:\r\n \"\"\"\r\n Scale the box with horizontal and vertical scaling factors\r\n \"\"\"\r\n self.tensor[:, 0::2] *= scale_x\r\n self.tensor[:, 1::2] *= scale_y\r\n\r\n @classmethod\r\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\r\n \"\"\"\r\n Concatenates a list of Boxes into a single Boxes\r\n\r\n Arguments:\r\n boxes_list (list[Boxes])\r\n\r\n Returns:\r\n Boxes: the concatenated Boxes\r\n \"\"\"\r\n assert isinstance(boxes_list, (list, tuple))\r\n if len(boxes_list) == 0:\r\n return cls(torch.empty(0))\r\n assert all([isinstance(box, Boxes) for box in boxes_list])\r\n\r\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\r\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\r\n return cat_boxes\r\n\r\n @property\r\n def device(self) -> device:\r\n return self.tensor.device\r\n\r\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\r\n # https://github.com/pytorch/pytorch/issues/18627\r\n @torch.jit.unused\r\n def __iter__(self):\r\n \"\"\"\r\n Yield a box as a Tensor of shape (4,) at a time.\r\n \"\"\"\r\n yield from self.tensor\r"
},
{
"identifier": "ImageList",
"path": "annotator/oneformer/detectron2/structures/image_list.py",
"snippet": "class ImageList(object):\r\n \"\"\"\r\n Structure that holds a list of images (of possibly\r\n varying sizes) as a single tensor.\r\n This works by padding the images to the same size.\r\n The original sizes of each image is stored in `image_sizes`.\r\n\r\n Attributes:\r\n image_sizes (list[tuple[int, int]]): each tuple is (h, w).\r\n During tracing, it becomes list[Tensor] instead.\r\n \"\"\"\r\n\r\n def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):\r\n \"\"\"\r\n Arguments:\r\n tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1\r\n image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can\r\n be smaller than (H, W) due to padding.\r\n \"\"\"\r\n self.tensor = tensor\r\n self.image_sizes = image_sizes\r\n\r\n def __len__(self) -> int:\r\n return len(self.image_sizes)\r\n\r\n def __getitem__(self, idx) -> torch.Tensor:\r\n \"\"\"\r\n Access the individual image in its original size.\r\n\r\n Args:\r\n idx: int or slice\r\n\r\n Returns:\r\n Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1\r\n \"\"\"\r\n size = self.image_sizes[idx]\r\n return self.tensor[idx, ..., : size[0], : size[1]]\r\n\r\n @torch.jit.unused\r\n def to(self, *args: Any, **kwargs: Any) -> \"ImageList\":\r\n cast_tensor = self.tensor.to(*args, **kwargs)\r\n return ImageList(cast_tensor, self.image_sizes)\r\n\r\n @property\r\n def device(self) -> device:\r\n return self.tensor.device\r\n\r\n @staticmethod\r\n def from_tensors(\r\n tensors: List[torch.Tensor],\r\n size_divisibility: int = 0,\r\n pad_value: float = 0.0,\r\n padding_constraints: Optional[Dict[str, int]] = None,\r\n ) -> \"ImageList\":\r\n \"\"\"\r\n Args:\r\n tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or\r\n (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded\r\n to the same shape with `pad_value`.\r\n size_divisibility (int): If `size_divisibility > 0`, add padding to ensure\r\n the common height and width is divisible by `size_divisibility`.\r\n This depends on the model and many models need a divisibility of 32.\r\n pad_value (float): value to pad.\r\n padding_constraints (optional[Dict]): If given, it would follow the format as\r\n {\"size_divisibility\": int, \"square_size\": int}, where `size_divisibility` will\r\n overwrite the above one if presented and `square_size` indicates the\r\n square padding size if `square_size` > 0.\r\n Returns:\r\n an `ImageList`.\r\n \"\"\"\r\n assert len(tensors) > 0\r\n assert isinstance(tensors, (tuple, list))\r\n for t in tensors:\r\n assert isinstance(t, torch.Tensor), type(t)\r\n assert t.shape[:-2] == tensors[0].shape[:-2], t.shape\r\n\r\n image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]\r\n image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes]\r\n max_size = torch.stack(image_sizes_tensor).max(0).values\r\n\r\n if padding_constraints is not None:\r\n square_size = padding_constraints.get(\"square_size\", 0)\r\n if square_size > 0:\r\n # pad to square.\r\n max_size[0] = max_size[1] = square_size\r\n if \"size_divisibility\" in padding_constraints:\r\n size_divisibility = padding_constraints[\"size_divisibility\"]\r\n if size_divisibility > 1:\r\n stride = size_divisibility\r\n # the last two dims are H,W, both subject to divisibility requirement\r\n max_size = (max_size + (stride - 1)).div(stride, rounding_mode=\"floor\") * stride\r\n\r\n # handle weirdness of scripting and tracing ...\r\n if torch.jit.is_scripting():\r\n max_size: List[int] = max_size.to(dtype=torch.long).tolist()\r\n else:\r\n if torch.jit.is_tracing():\r\n image_sizes = image_sizes_tensor\r\n\r\n if len(tensors) == 1:\r\n # This seems slightly (2%) faster.\r\n # TODO: check whether it's faster for multiple images as well\r\n image_size = image_sizes[0]\r\n padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]\r\n batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)\r\n else:\r\n # max_size can be a tensor in tracing mode, therefore convert to list\r\n batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)\r\n device = (\r\n None if torch.jit.is_scripting() else (\"cpu\" if torch.jit.is_tracing() else None)\r\n )\r\n batched_imgs = tensors[0].new_full(batch_shape, pad_value, device=device)\r\n batched_imgs = move_device_like(batched_imgs, tensors[0])\r\n for i, img in enumerate(tensors):\r\n # Use `batched_imgs` directly instead of `img, pad_img = zip(tensors, batched_imgs)`\r\n # Tracing mode cannot capture `copy_()` of temporary locals\r\n batched_imgs[i, ..., : img.shape[-2], : img.shape[-1]].copy_(img)\r\n\r\n return ImageList(batched_imgs.contiguous(), image_sizes)\r"
},
{
"identifier": "Instances",
"path": "annotator/oneformer/detectron2/structures/instances.py",
"snippet": "class Instances:\r\n \"\"\"\r\n This class represents a list of instances in an image.\r\n It stores the attributes of instances (e.g., boxes, masks, labels, scores) as \"fields\".\r\n All fields must have the same ``__len__`` which is the number of instances.\r\n\r\n All other (non-field) attributes of this class are considered private:\r\n they must start with '_' and are not modifiable by a user.\r\n\r\n Some basic usage:\r\n\r\n 1. Set/get/check a field:\r\n\r\n .. code-block:: python\r\n\r\n instances.gt_boxes = Boxes(...)\r\n print(instances.pred_masks) # a tensor of shape (N, H, W)\r\n print('gt_masks' in instances)\r\n\r\n 2. ``len(instances)`` returns the number of instances\r\n 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields\r\n and returns a new :class:`Instances`.\r\n Typically, ``indices`` is a integer vector of indices,\r\n or a binary mask of length ``num_instances``\r\n\r\n .. code-block:: python\r\n\r\n category_3_detections = instances[instances.pred_classes == 3]\r\n confident_detections = instances[instances.scores > 0.9]\r\n \"\"\"\r\n\r\n def __init__(self, image_size: Tuple[int, int], **kwargs: Any):\r\n \"\"\"\r\n Args:\r\n image_size (height, width): the spatial size of the image.\r\n kwargs: fields to add to this `Instances`.\r\n \"\"\"\r\n self._image_size = image_size\r\n self._fields: Dict[str, Any] = {}\r\n for k, v in kwargs.items():\r\n self.set(k, v)\r\n\r\n @property\r\n def image_size(self) -> Tuple[int, int]:\r\n \"\"\"\r\n Returns:\r\n tuple: height, width\r\n \"\"\"\r\n return self._image_size\r\n\r\n def __setattr__(self, name: str, val: Any) -> None:\r\n if name.startswith(\"_\"):\r\n super().__setattr__(name, val)\r\n else:\r\n self.set(name, val)\r\n\r\n def __getattr__(self, name: str) -> Any:\r\n if name == \"_fields\" or name not in self._fields:\r\n raise AttributeError(\"Cannot find field '{}' in the given Instances!\".format(name))\r\n return self._fields[name]\r\n\r\n def set(self, name: str, value: Any) -> None:\r\n \"\"\"\r\n Set the field named `name` to `value`.\r\n The length of `value` must be the number of instances,\r\n and must agree with other existing fields in this object.\r\n \"\"\"\r\n with warnings.catch_warnings(record=True):\r\n data_len = len(value)\r\n if len(self._fields):\r\n assert (\r\n len(self) == data_len\r\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\r\n self._fields[name] = value\r\n\r\n def has(self, name: str) -> bool:\r\n \"\"\"\r\n Returns:\r\n bool: whether the field called `name` exists.\r\n \"\"\"\r\n return name in self._fields\r\n\r\n def remove(self, name: str) -> None:\r\n \"\"\"\r\n Remove the field called `name`.\r\n \"\"\"\r\n del self._fields[name]\r\n\r\n def get(self, name: str) -> Any:\r\n \"\"\"\r\n Returns the field called `name`.\r\n \"\"\"\r\n return self._fields[name]\r\n\r\n def get_fields(self) -> Dict[str, Any]:\r\n \"\"\"\r\n Returns:\r\n dict: a dict which maps names (str) to data of the fields\r\n\r\n Modifying the returned dict will modify this instance.\r\n \"\"\"\r\n return self._fields\r\n\r\n # Tensor-like methods\r\n def to(self, *args: Any, **kwargs: Any) -> \"Instances\":\r\n \"\"\"\r\n Returns:\r\n Instances: all fields are called with a `to(device)`, if the field has this method.\r\n \"\"\"\r\n ret = Instances(self._image_size)\r\n for k, v in self._fields.items():\r\n if hasattr(v, \"to\"):\r\n v = v.to(*args, **kwargs)\r\n ret.set(k, v)\r\n return ret\r\n\r\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Instances\":\r\n \"\"\"\r\n Args:\r\n item: an index-like object and will be used to index all the fields.\r\n\r\n Returns:\r\n If `item` is a string, return the data in the corresponding field.\r\n Otherwise, returns an `Instances` where all fields are indexed by `item`.\r\n \"\"\"\r\n if type(item) == int:\r\n if item >= len(self) or item < -len(self):\r\n raise IndexError(\"Instances index out of range!\")\r\n else:\r\n item = slice(item, None, len(self))\r\n\r\n ret = Instances(self._image_size)\r\n for k, v in self._fields.items():\r\n ret.set(k, v[item])\r\n return ret\r\n\r\n def __len__(self) -> int:\r\n for v in self._fields.values():\r\n # use __len__ because len() has to be int and is not friendly to tracing\r\n return v.__len__()\r\n raise NotImplementedError(\"Empty Instances does not support __len__!\")\r\n\r\n def __iter__(self):\r\n raise NotImplementedError(\"`Instances` object is not iterable!\")\r\n\r\n @staticmethod\r\n def cat(instance_lists: List[\"Instances\"]) -> \"Instances\":\r\n \"\"\"\r\n Args:\r\n instance_lists (list[Instances])\r\n\r\n Returns:\r\n Instances\r\n \"\"\"\r\n assert all(isinstance(i, Instances) for i in instance_lists)\r\n assert len(instance_lists) > 0\r\n if len(instance_lists) == 1:\r\n return instance_lists[0]\r\n\r\n image_size = instance_lists[0].image_size\r\n if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing\r\n for i in instance_lists[1:]:\r\n assert i.image_size == image_size\r\n ret = Instances(image_size)\r\n for k in instance_lists[0]._fields.keys():\r\n values = [i.get(k) for i in instance_lists]\r\n v0 = values[0]\r\n if isinstance(v0, torch.Tensor):\r\n values = torch.cat(values, dim=0)\r\n elif isinstance(v0, list):\r\n values = list(itertools.chain(*values))\r\n elif hasattr(type(v0), \"cat\"):\r\n values = type(v0).cat(values)\r\n else:\r\n raise ValueError(\"Unsupported type {} for concatenation\".format(type(v0)))\r\n ret.set(k, values)\r\n return ret\r\n\r\n def __str__(self) -> str:\r\n s = self.__class__.__name__ + \"(\"\r\n s += \"num_instances={}, \".format(len(self))\r\n s += \"image_height={}, \".format(self._image_size[0])\r\n s += \"image_width={}, \".format(self._image_size[1])\r\n s += \"fields=[{}])\".format(\", \".join((f\"{k}: {v}\" for k, v in self._fields.items())))\r\n return s\r\n\r\n __repr__ = __str__\r"
},
{
"identifier": "BitMasks",
"path": "annotator/oneformer/detectron2/structures/masks.py",
"snippet": "class BitMasks:\r\n \"\"\"\r\n This class stores the segmentation masks for all objects in one image, in\r\n the form of bitmaps.\r\n\r\n Attributes:\r\n tensor: bool Tensor of N,H,W, representing N instances in the image.\r\n \"\"\"\r\n\r\n def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):\r\n \"\"\"\r\n Args:\r\n tensor: bool Tensor of N,H,W, representing N instances in the image.\r\n \"\"\"\r\n if isinstance(tensor, torch.Tensor):\r\n tensor = tensor.to(torch.bool)\r\n else:\r\n tensor = torch.as_tensor(tensor, dtype=torch.bool, device=torch.device(\"cpu\"))\r\n assert tensor.dim() == 3, tensor.size()\r\n self.image_size = tensor.shape[1:]\r\n self.tensor = tensor\r\n\r\n @torch.jit.unused\r\n def to(self, *args: Any, **kwargs: Any) -> \"BitMasks\":\r\n return BitMasks(self.tensor.to(*args, **kwargs))\r\n\r\n @property\r\n def device(self) -> torch.device:\r\n return self.tensor.device\r\n\r\n @torch.jit.unused\r\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"BitMasks\":\r\n \"\"\"\r\n Returns:\r\n BitMasks: Create a new :class:`BitMasks` by indexing.\r\n\r\n The following usage are allowed:\r\n\r\n 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.\r\n 2. `new_masks = masks[2:10]`: return a slice of masks.\r\n 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor\r\n with `length = len(masks)`. Nonzero elements in the vector will be selected.\r\n\r\n Note that the returned object might share storage with this object,\r\n subject to Pytorch's indexing semantics.\r\n \"\"\"\r\n if isinstance(item, int):\r\n return BitMasks(self.tensor[item].unsqueeze(0))\r\n m = self.tensor[item]\r\n assert m.dim() == 3, \"Indexing on BitMasks with {} returns a tensor with shape {}!\".format(\r\n item, m.shape\r\n )\r\n return BitMasks(m)\r\n\r\n @torch.jit.unused\r\n def __iter__(self) -> torch.Tensor:\r\n yield from self.tensor\r\n\r\n @torch.jit.unused\r\n def __repr__(self) -> str:\r\n s = self.__class__.__name__ + \"(\"\r\n s += \"num_instances={})\".format(len(self.tensor))\r\n return s\r\n\r\n def __len__(self) -> int:\r\n return self.tensor.shape[0]\r\n\r\n def nonempty(self) -> torch.Tensor:\r\n \"\"\"\r\n Find masks that are non-empty.\r\n\r\n Returns:\r\n Tensor: a BoolTensor which represents\r\n whether each mask is empty (False) or non-empty (True).\r\n \"\"\"\r\n return self.tensor.flatten(1).any(dim=1)\r\n\r\n @staticmethod\r\n def from_polygon_masks(\r\n polygon_masks: Union[\"PolygonMasks\", List[List[np.ndarray]]], height: int, width: int\r\n ) -> \"BitMasks\":\r\n \"\"\"\r\n Args:\r\n polygon_masks (list[list[ndarray]] or PolygonMasks)\r\n height, width (int)\r\n \"\"\"\r\n if isinstance(polygon_masks, PolygonMasks):\r\n polygon_masks = polygon_masks.polygons\r\n masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]\r\n if len(masks):\r\n return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))\r\n else:\r\n return BitMasks(torch.empty(0, height, width, dtype=torch.bool))\r\n\r\n @staticmethod\r\n def from_roi_masks(roi_masks: \"ROIMasks\", height: int, width: int) -> \"BitMasks\":\r\n \"\"\"\r\n Args:\r\n roi_masks:\r\n height, width (int):\r\n \"\"\"\r\n return roi_masks.to_bitmasks(height, width)\r\n\r\n def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:\r\n \"\"\"\r\n Crop each bitmask by the given box, and resize results to (mask_size, mask_size).\r\n This can be used to prepare training targets for Mask R-CNN.\r\n It has less reconstruction error compared to rasterization with polygons.\r\n However we observe no difference in accuracy,\r\n but BitMasks requires more memory to store all the masks.\r\n\r\n Args:\r\n boxes (Tensor): Nx4 tensor storing the boxes for each mask\r\n mask_size (int): the size of the rasterized mask.\r\n\r\n Returns:\r\n Tensor:\r\n A bool tensor of shape (N, mask_size, mask_size), where\r\n N is the number of predicted boxes for this image.\r\n \"\"\"\r\n assert len(boxes) == len(self), \"{} != {}\".format(len(boxes), len(self))\r\n device = self.tensor.device\r\n\r\n batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]\r\n rois = torch.cat([batch_inds, boxes], dim=1) # Nx5\r\n\r\n bit_masks = self.tensor.to(dtype=torch.float32)\r\n rois = rois.to(device=device)\r\n output = (\r\n ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)\r\n .forward(bit_masks[:, None, :, :], rois)\r\n .squeeze(1)\r\n )\r\n output = output >= 0.5\r\n return output\r\n\r\n def get_bounding_boxes(self) -> Boxes:\r\n \"\"\"\r\n Returns:\r\n Boxes: tight bounding boxes around bitmasks.\r\n If a mask is empty, it's bounding box will be all zero.\r\n \"\"\"\r\n boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)\r\n x_any = torch.any(self.tensor, dim=1)\r\n y_any = torch.any(self.tensor, dim=2)\r\n for idx in range(self.tensor.shape[0]):\r\n x = torch.where(x_any[idx, :])[0]\r\n y = torch.where(y_any[idx, :])[0]\r\n if len(x) > 0 and len(y) > 0:\r\n boxes[idx, :] = torch.as_tensor(\r\n [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32\r\n )\r\n return Boxes(boxes)\r\n\r\n @staticmethod\r\n def cat(bitmasks_list: List[\"BitMasks\"]) -> \"BitMasks\":\r\n \"\"\"\r\n Concatenates a list of BitMasks into a single BitMasks\r\n\r\n Arguments:\r\n bitmasks_list (list[BitMasks])\r\n\r\n Returns:\r\n BitMasks: the concatenated BitMasks\r\n \"\"\"\r\n assert isinstance(bitmasks_list, (list, tuple))\r\n assert len(bitmasks_list) > 0\r\n assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)\r\n\r\n cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))\r\n return cat_bitmasks\r"
},
{
"identifier": "get_event_storage",
"path": "annotator/oneformer/detectron2/utils/events.py",
"snippet": "def get_event_storage():\r\n \"\"\"\r\n Returns:\r\n The :class:`EventStorage` object that's currently being used.\r\n Throws an error if no :class:`EventStorage` is currently enabled.\r\n \"\"\"\r\n assert len(\r\n _CURRENT_STORAGE_STACK\r\n ), \"get_event_storage() has to be called inside a 'with EventStorage(...)' context!\"\r\n return _CURRENT_STORAGE_STACK[-1]\r"
},
{
"identifier": "Backbone",
"path": "annotator/oneformer/detectron2/modeling/backbone/backbone.py",
"snippet": "class Backbone(nn.Module, metaclass=ABCMeta):\r\n \"\"\"\r\n Abstract base class for network backbones.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n The `__init__` method of any subclass can specify its own set of arguments.\r\n \"\"\"\r\n super().__init__()\r\n\r\n @abstractmethod\r\n def forward(self):\r\n \"\"\"\r\n Subclasses must override this method, but adhere to the same return type.\r\n\r\n Returns:\r\n dict[str->Tensor]: mapping from feature name (e.g., \"res2\") to tensor\r\n \"\"\"\r\n pass\r\n\r\n @property\r\n def size_divisibility(self) -> int:\r\n \"\"\"\r\n Some backbones require the input height and width to be divisible by a\r\n specific integer. This is typically true for encoder / decoder type networks\r\n with lateral connection (e.g., FPN) for which feature maps need to match\r\n dimension in the \"bottom up\" and \"top down\" paths. Set to 0 if no specific\r\n input size divisibility is required.\r\n \"\"\"\r\n return 0\r\n\r\n @property\r\n def padding_constraints(self) -> Dict[str, int]:\r\n \"\"\"\r\n This property is a generalization of size_divisibility. Some backbones and training\r\n recipes require specific padding constraints, such as enforcing divisibility by a specific\r\n integer (e.g., FPN) or padding to a square (e.g., ViTDet with large-scale jitter\r\n in :paper:vitdet). `padding_constraints` contains these optional items like:\r\n {\r\n \"size_divisibility\": int,\r\n \"square_size\": int,\r\n # Future options are possible\r\n }\r\n `size_divisibility` will read from here if presented and `square_size` indicates the\r\n square padding size if `square_size` > 0.\r\n\r\n TODO: use type of Dict[str, int] to avoid torchscipt issues. The type of padding_constraints\r\n could be generalized as TypedDict (Python 3.8+) to support more types in the future.\r\n \"\"\"\r\n return {}\r\n\r\n def output_shape(self):\r\n \"\"\"\r\n Returns:\r\n dict[str->ShapeSpec]\r\n \"\"\"\r\n # this is a backward-compatible default\r\n return {\r\n name: ShapeSpec(\r\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\r\n )\r\n for name in self._out_features\r\n }\r"
}
] | import itertools
import logging
import numpy as np
import torch
from collections import OrderedDict
from collections.abc import Mapping
from typing import Dict, List, Optional, Tuple, Union
from omegaconf import DictConfig, OmegaConf
from torch import Tensor, nn
from annotator.oneformer.detectron2.layers import ShapeSpec
from annotator.oneformer.detectron2.structures import BitMasks, Boxes, ImageList, Instances
from annotator.oneformer.detectron2.utils.events import get_event_storage
from .backbone import Backbone
from mmcv.utils import ConfigDict
from mmdet.models import build_backbone
from mmdet.models import build_neck
from mmdet.models import build_detector
from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks
| 9,484 | raise ValueError(
"Length of output_shapes does not match outputs from the mmdet backbone: "
f"{len(outs)} != {len(self._output_shapes)}"
)
return {k: v for k, v in zip(self._output_names, outs)}
def output_shape(self) -> Dict[str, ShapeSpec]:
return {k: v for k, v in zip(self._output_names, self._output_shapes)}
class MMDetDetector(nn.Module):
"""
Wrapper of a mmdetection detector model, for detection and instance segmentation.
Input/output formats of this class follow detectron2's convention, so a
mmdetection model can be trained and evaluated in detectron2.
"""
def __init__(
self,
detector: Union[nn.Module, Mapping],
*,
# Default is 32 regardless of model:
# https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets
size_divisibility=32,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
detector: a mmdet detector, or a mmdet config dict that defines a detector.
size_divisibility: pad input images to multiple of this number
pixel_mean: per-channel mean to normalize input image
pixel_std: per-channel stddev to normalize input image
"""
super().__init__()
if isinstance(detector, Mapping):
detector = build_detector(_to_container(detector))
self.detector = detector
self.detector.init_weights()
self.size_divisibility = size_divisibility
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor
metas = []
rescale = {"height" in x for x in batched_inputs}
if len(rescale) != 1:
raise ValueError("Some inputs have original height/width, but some don't!")
rescale = list(rescale)[0]
output_shapes = []
for input in batched_inputs:
meta = {}
c, h, w = input["image"].shape
meta["img_shape"] = meta["ori_shape"] = (h, w, c)
if rescale:
scale_factor = np.array(
[w / input["width"], h / input["height"]] * 2, dtype="float32"
)
ori_shape = (input["height"], input["width"])
output_shapes.append(ori_shape)
meta["ori_shape"] = ori_shape + (c,)
else:
scale_factor = 1.0
output_shapes.append((h, w))
meta["scale_factor"] = scale_factor
meta["flip"] = False
padh, padw = images.shape[-2:]
meta["pad_shape"] = (padh, padw, c)
metas.append(meta)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
if gt_instances[0].has("gt_masks"):
def convert_mask(m, shape):
# mmdet mask format
if isinstance(m, BitMasks):
return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1])
else:
return mm_PolygonMasks(m.polygons, shape[0], shape[1])
gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances]
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
gt_masks=gt_masks,
)
else:
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
)
return _parse_losses(losses_and_metrics)
else:
results = self.detector.simple_test(images, metas, rescale=rescale)
results = [
{"instances": _convert_mmdet_result(r, shape)}
for r, shape in zip(results, output_shapes)
]
return results
@property
def device(self):
return self.pixel_mean.device
# Reference: show_result() in
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
| # Copyright (c) Facebook, Inc. and its affiliates.
logger = logging.getLogger(__name__)
def _to_container(cfg):
"""
mmdet will assert the type of dict/list.
So convert omegaconf objects to dict/list.
"""
if isinstance(cfg, DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
return ConfigDict(cfg)
class MMDetBackbone(Backbone):
"""
Wrapper of mmdetection backbones to use in detectron2.
mmdet backbones produce list/tuple of tensors, while detectron2 backbones
produce a dict of tensors. This class wraps the given backbone to produce
output in detectron2's convention, so it can be used in place of detectron2
backbones.
"""
def __init__(
self,
backbone: Union[nn.Module, Mapping],
neck: Union[nn.Module, Mapping, None] = None,
*,
output_shapes: List[ShapeSpec],
output_names: Optional[List[str]] = None,
):
"""
Args:
backbone: either a backbone module or a mmdet config dict that defines a
backbone. The backbone takes a 4D image tensor and returns a
sequence of tensors.
neck: either a backbone module or a mmdet config dict that defines a
neck. The neck takes outputs of backbone and returns a
sequence of tensors. If None, no neck is used.
output_shapes: shape for every output of the backbone (or neck, if given).
stride and channels are often needed.
output_names: names for every output of the backbone (or neck, if given).
By default, will use "out0", "out1", ...
"""
super().__init__()
if isinstance(backbone, Mapping):
backbone = build_backbone(_to_container(backbone))
self.backbone = backbone
if isinstance(neck, Mapping):
neck = build_neck(_to_container(neck))
self.neck = neck
# "Neck" weights, if any, are part of neck itself. This is the interface
# of mmdet so we follow it. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py
logger.info("Initializing mmdet backbone weights...")
self.backbone.init_weights()
# train() in mmdet modules is non-trivial, and has to be explicitly
# called. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py
self.backbone.train()
if self.neck is not None:
logger.info("Initializing mmdet neck weights ...")
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.neck.train()
self._output_shapes = output_shapes
if not output_names:
output_names = [f"out{i}" for i in range(len(output_shapes))]
self._output_names = output_names
def forward(self, x) -> Dict[str, Tensor]:
outs = self.backbone(x)
if self.neck is not None:
outs = self.neck(outs)
assert isinstance(
outs, (list, tuple)
), "mmdet backbone should return a list/tuple of tensors!"
if len(outs) != len(self._output_shapes):
raise ValueError(
"Length of output_shapes does not match outputs from the mmdet backbone: "
f"{len(outs)} != {len(self._output_shapes)}"
)
return {k: v for k, v in zip(self._output_names, outs)}
def output_shape(self) -> Dict[str, ShapeSpec]:
return {k: v for k, v in zip(self._output_names, self._output_shapes)}
class MMDetDetector(nn.Module):
"""
Wrapper of a mmdetection detector model, for detection and instance segmentation.
Input/output formats of this class follow detectron2's convention, so a
mmdetection model can be trained and evaluated in detectron2.
"""
def __init__(
self,
detector: Union[nn.Module, Mapping],
*,
# Default is 32 regardless of model:
# https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets
size_divisibility=32,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
detector: a mmdet detector, or a mmdet config dict that defines a detector.
size_divisibility: pad input images to multiple of this number
pixel_mean: per-channel mean to normalize input image
pixel_std: per-channel stddev to normalize input image
"""
super().__init__()
if isinstance(detector, Mapping):
detector = build_detector(_to_container(detector))
self.detector = detector
self.detector.init_weights()
self.size_divisibility = size_divisibility
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor
metas = []
rescale = {"height" in x for x in batched_inputs}
if len(rescale) != 1:
raise ValueError("Some inputs have original height/width, but some don't!")
rescale = list(rescale)[0]
output_shapes = []
for input in batched_inputs:
meta = {}
c, h, w = input["image"].shape
meta["img_shape"] = meta["ori_shape"] = (h, w, c)
if rescale:
scale_factor = np.array(
[w / input["width"], h / input["height"]] * 2, dtype="float32"
)
ori_shape = (input["height"], input["width"])
output_shapes.append(ori_shape)
meta["ori_shape"] = ori_shape + (c,)
else:
scale_factor = 1.0
output_shapes.append((h, w))
meta["scale_factor"] = scale_factor
meta["flip"] = False
padh, padw = images.shape[-2:]
meta["pad_shape"] = (padh, padw, c)
metas.append(meta)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
if gt_instances[0].has("gt_masks"):
def convert_mask(m, shape):
# mmdet mask format
if isinstance(m, BitMasks):
return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1])
else:
return mm_PolygonMasks(m.polygons, shape[0], shape[1])
gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances]
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
gt_masks=gt_masks,
)
else:
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
)
return _parse_losses(losses_and_metrics)
else:
results = self.detector.simple_test(images, metas, rescale=rescale)
results = [
{"instances": _convert_mmdet_result(r, shape)}
for r, shape in zip(results, output_shapes)
]
return results
@property
def device(self):
return self.pixel_mean.device
# Reference: show_result() in
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
| def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances:
| 3 | 2023-12-05 02:51:53+00:00 | 12k |
DiffusionLight/DiffusionLight | inpaint.py | [
{
"identifier": "BallInpainter",
"path": "relighting/inpainter.py",
"snippet": "class BallInpainter():\n def __init__(self, pipeline, sd_arch, control_generator, disable_water_mask=True):\n self.pipeline = pipeline\n self.sd_arch = sd_arch\n self.control_generator = control_generator\n self.median = {}\n if disable_water_mask:\n self._disable_water_mask()\n\n def _disable_water_mask(self):\n if hasattr(self.pipeline, \"watermark\"):\n self.pipeline.watermark = NoWaterMark()\n print(\"Disabled watermasking\")\n\n @classmethod\n def from_sd(cls, \n model, \n controlnet=None, \n device=0, \n sampler=\"unipc\", \n torch_dtype=torch.float16,\n disable_water_mask=True,\n offload=False\n ):\n if controlnet is not None:\n control_signal_type = get_control_signal_type(controlnet)\n controlnet = ControlNetModel.from_pretrained(controlnet, torch_dtype=torch.float16)\n pipe = CustomStableDiffusionControlNetInpaintPipeline.from_pretrained(\n model,\n controlnet=controlnet,\n torch_dtype=torch_dtype,\n ).to(device)\n control_generator = ControlSignalGenerator(\"sd\", control_signal_type, device=device)\n else:\n pipe = CustomStableDiffusionInpaintPipeline.from_pretrained(\n model,\n torch_dtype=torch_dtype,\n ).to(device)\n control_generator = None\n \n try:\n if torch_dtype==torch.float16 and device != torch.device(\"cpu\"):\n pipe.enable_xformers_memory_efficient_attention()\n except:\n pass\n pipe.set_progress_bar_config(disable=True)\n \n pipe.scheduler = SAMPLERS[sampler].from_config(pipe.scheduler.config)\n \n return BallInpainter(pipe, \"sd\", control_generator, disable_water_mask)\n\n @classmethod\n def from_sdxl(cls, \n model, \n controlnet=None, \n device=0, \n sampler=\"unipc\", \n torch_dtype=torch.float16,\n disable_water_mask=True,\n use_fixed_vae=True,\n offload=False\n ):\n vae = VAE_MODELS[\"sdxl\"]\n vae = AutoencoderKL.from_pretrained(vae, torch_dtype=torch_dtype).to(device) if use_fixed_vae else None\n extra_kwargs = {\"vae\": vae} if vae is not None else {}\n \n if controlnet is not None:\n control_signal_type = get_control_signal_type(controlnet)\n controlnet = ControlNetModel.from_pretrained(\n controlnet,\n variant=\"fp16\" if torch_dtype == torch.float16 else None,\n use_safetensors=True,\n torch_dtype=torch_dtype,\n ).to(device)\n pipe = CustomStableDiffusionXLControlNetInpaintPipeline.from_pretrained(\n model,\n controlnet=controlnet,\n variant=\"fp16\" if torch_dtype == torch.float16 else None,\n use_safetensors=True,\n torch_dtype=torch_dtype,\n **extra_kwargs,\n ).to(device)\n control_generator = ControlSignalGenerator(\"sdxl\", control_signal_type, device=device)\n else:\n pipe = CustomStableDiffusionXLInpaintPipeline.from_pretrained(\n model,\n variant=\"fp16\" if torch_dtype == torch.float16 else None,\n use_safetensors=True,\n torch_dtype=torch_dtype,\n **extra_kwargs,\n ).to(device)\n control_generator = None\n \n try:\n if torch_dtype==torch.float16 and device != torch.device(\"cpu\"):\n pipe.enable_xformers_memory_efficient_attention()\n except:\n pass\n \n if offload and device != torch.device(\"cpu\"):\n pipe.enable_model_cpu_offload()\n pipe.set_progress_bar_config(disable=True)\n pipe.scheduler = SAMPLERS[sampler].from_config(pipe.scheduler.config)\n \n return BallInpainter(pipe, \"sdxl\", control_generator, disable_water_mask)\n\n # TODO: this method should be replaced by inpaint(), but we'll leave it here for now\n # otherwise, the existing experiment code will break down\n def __call__(self, *args, **kwargs):\n return self.pipeline(*args, **kwargs)\n\n def _default_height_width(self, height=None, width=None):\n if (height is not None) and (width is not None):\n return height, width\n if self.sd_arch == \"sd\":\n return (512, 512)\n elif self.sd_arch == \"sdxl\":\n return (1024, 1024)\n else:\n raise NotImplementedError\n\n # this method is for sanity check only\n def get_cache_control_image(self):\n control_image = getattr(self, \"cache_control_image\", None)\n return control_image\n\n def prepare_control_signal(self, image, controlnet_conditioning_scale, extra_kwargs):\n if self.control_generator is not None:\n control_image = self.control_generator(image, **extra_kwargs)\n controlnet_kwargs = {\n \"control_image\": control_image,\n \"controlnet_conditioning_scale\": controlnet_conditioning_scale\n }\n self.cache_control_image = control_image\n else:\n controlnet_kwargs = {}\n\n return controlnet_kwargs\n\n def get_cache_median(self, it):\n if it in self.median: return self.median[it]\n else: return None\n\n def reset_median(self):\n self.median = {}\n print(\"Reset median\")\n\n def load_median(self, path):\n if os.path.exists(path):\n with open(path, \"rb\") as f:\n self.median = pickle.load(f)\n print(f\"Loaded median from {path}\")\n else:\n print(f\"Median not found at {path}!\")\n\n def inpaint_iterative(\n self,\n prompt=None,\n negative_prompt=\"\",\n num_inference_steps=30,\n generator=None, # TODO: remove this\n image=None,\n mask_image=None,\n height=None,\n width=None,\n controlnet_conditioning_scale=0.5,\n num_images_per_prompt=1,\n current_seed=0,\n cross_attention_kwargs={},\n strength=0.8,\n num_iteration=2,\n ball_per_iteration=30,\n agg_mode=\"median\",\n save_intermediate=True,\n cache_dir=\"./temp_inpaint_iterative\",\n disable_progress=False,\n prompt_embeds=None,\n pooled_prompt_embeds=None,\n use_cache_median=False,\n **extra_kwargs,\n ):\n\n def computeMedian(ball_images):\n all = np.stack(ball_images, axis=0)\n median = np.median(all, axis=0)\n idx_median = np.argsort(all, axis=0)[all.shape[0]//2]\n # print(all.shape)\n # print(idx_median.shape)\n return median, idx_median\n\n def generate_balls(avg_image, current_strength, ball_per_iteration, current_iteration):\n print(f\"Inpainting balls for {current_iteration} iteration...\")\n controlnet_kwargs = self.prepare_control_signal(\n image=avg_image,\n controlnet_conditioning_scale=controlnet_conditioning_scale,\n extra_kwargs=extra_kwargs,\n )\n\n ball_images = []\n for i in tqdm(range(ball_per_iteration), disable=disable_progress):\n seed = current_seed + i\n new_generator = torch.Generator().manual_seed(seed)\n\n output_image = self.pipeline(\n prompt=prompt,\n negative_prompt=negative_prompt,\n num_inference_steps=num_inference_steps,\n generator=new_generator,\n image=avg_image,\n mask_image=mask_image,\n height=height,\n width=width,\n num_images_per_prompt=num_images_per_prompt,\n strength=current_strength,\n newx=x,\n newy=y,\n newr=r,\n current_seed=seed,\n cross_attention_kwargs=cross_attention_kwargs,\n prompt_embeds=prompt_embeds,\n pooled_prompt_embeds=pooled_prompt_embeds,\n **controlnet_kwargs\n ).images[0]\n \n ball_image = crop_ball(output_image, mask_ball_for_crop, x, y, r)\n ball_images.append(ball_image)\n\n if save_intermediate:\n os.makedirs(os.path.join(cache_dir, str(current_iteration)), mode=0o777, exist_ok=True)\n output_image.save(os.path.join(cache_dir, str(current_iteration), f\"raw_{i}.png\"))\n Image.fromarray(ball_image).save(os.path.join(cache_dir, str(current_iteration), f\"ball_{i}.png\"))\n # chmod 777\n os.chmod(os.path.join(cache_dir, str(current_iteration), f\"raw_{i}.png\"), 0o0777)\n os.chmod(os.path.join(cache_dir, str(current_iteration), f\"ball_{i}.png\"), 0o0777)\n\n \n return ball_images\n\n if save_intermediate:\n os.makedirs(cache_dir, exist_ok=True)\n\n height, width = self._default_height_width(height, width)\n\n x = extra_kwargs[\"x\"]\n y = extra_kwargs[\"y\"]\n r = 256 if \"r\" not in extra_kwargs else extra_kwargs[\"r\"]\n _, mask_ball_for_crop = get_ideal_normal_ball(size=r)\n \n # generate initial average ball\n avg_image = image\n ball_images = generate_balls(\n avg_image,\n current_strength=1.0,\n ball_per_iteration=ball_per_iteration,\n current_iteration=0,\n )\n\n # ball refinement loop\n image = np.array(image)\n for it in range(1, num_iteration+1):\n if use_cache_median and (self.get_cache_median(it) is not None):\n print(\"Use existing median\")\n all = np.stack(ball_images, axis=0)\n idx_median = self.get_cache_median(it)\n avg_ball = all[idx_median, \n np.arange(idx_median.shape[0])[:, np.newaxis, np.newaxis],\n np.arange(idx_median.shape[1])[np.newaxis, :, np.newaxis],\n np.arange(idx_median.shape[2])[np.newaxis, np.newaxis, :]\n ]\n else:\n avg_ball, idx_median = computeMedian(ball_images)\n print(\"Add new median\")\n self.median[it] = idx_median\n \n avg_image = merge_normal_map(image, avg_ball, mask_ball_for_crop, x, y)\n avg_image = Image.fromarray(avg_image.astype(np.uint8))\n if save_intermediate:\n avg_image.save(os.path.join(cache_dir, f\"average_{it}.png\"))\n # chmod777\n os.chmod(os.path.join(cache_dir, f\"average_{it}.png\"), 0o0777)\n \n ball_images = generate_balls(\n avg_image,\n current_strength=strength,\n ball_per_iteration=ball_per_iteration if it < num_iteration else 1,\n current_iteration=it,\n )\n\n # TODO: add algorithm for select the best ball\n best_ball = ball_images[0]\n output_image = merge_normal_map(image, best_ball, mask_ball_for_crop, x, y)\n return Image.fromarray(output_image.astype(np.uint8))\n\n def inpaint(\n self,\n prompt=None,\n negative_prompt=None,\n num_inference_steps=30,\n generator=None,\n image=None,\n mask_image=None,\n height=None,\n width=None,\n controlnet_conditioning_scale=0.5,\n num_images_per_prompt=1,\n strength=1.0,\n current_seed=0,\n cross_attention_kwargs={},\n prompt_embeds=None,\n pooled_prompt_embeds=None,\n **extra_kwargs,\n ):\n height, width = self._default_height_width(height, width)\n\n controlnet_kwargs = self.prepare_control_signal(\n image=image,\n controlnet_conditioning_scale=controlnet_conditioning_scale,\n extra_kwargs=extra_kwargs,\n )\n \n if generator is None:\n generator = torch.Generator().manual_seed(0)\n\n output_image = self.pipeline(\n prompt=prompt,\n negative_prompt=negative_prompt,\n num_inference_steps=num_inference_steps,\n generator=generator,\n image=image,\n mask_image=mask_image,\n height=height,\n width=width,\n num_images_per_prompt=num_images_per_prompt,\n strength=strength,\n newx = extra_kwargs[\"x\"],\n newy = extra_kwargs[\"y\"],\n newr = getattr(extra_kwargs, \"r\", 256), # default to ball_size = 256\n current_seed=current_seed,\n cross_attention_kwargs=cross_attention_kwargs,\n prompt_embeds=prompt_embeds,\n pooled_prompt_embeds=pooled_prompt_embeds,\n **controlnet_kwargs\n )\n\n return output_image"
},
{
"identifier": "MaskGenerator",
"path": "relighting/mask_utils.py",
"snippet": "class MaskGenerator():\n def __init__(self, cache_mask=True):\n self.cache_mask = cache_mask\n self.all_masks = []\n\n def clear_cache(self):\n self.all_masks = []\n\n def retrieve_masks(self):\n return self.all_masks\n\n def generate_grid(self, image, mask_ball, n_ball=16, size=128):\n ball_positions = create_grid(image.size, n_ball, size)\n # _, mask_ball = get_normal_ball(size)\n \n masks = []\n mask_template = np.zeros(image.size)\n for x, y in ball_positions:\n mask = mask_template.copy()\n mask[y:y+size, x:x+size] = 255 * mask_ball\n mask = Image.fromarray(mask.astype(np.uint8), \"L\")\n masks.append(mask)\n\n # if self.cache_mask:\n # self.all_masks.append((x, y, size))\n \n return masks, ball_positions\n\n def generate_single(self, image, mask_ball, x, y, size):\n w,h = image.size # numpy as (h,w) but PIL is (w,h)\n mask = np.zeros((h,w))\n mask[y:y+size, x:x+size] = 255 * mask_ball\n mask = Image.fromarray(mask.astype(np.uint8), \"L\")\n\n return mask\n\n def generate_best(self, image, mask_ball, size):\n w,h = image.size # numpy as (h,w) but PIL is (w,h)\n mask = np.zeros((h,w))\n\n (y, x), _ = find_best_location(np.array(image), ball_size=size)\n mask[y:y+size, x:x+size] = 255 * mask_ball\n mask = Image.fromarray(mask.astype(np.uint8), \"L\")\n\n return mask, (x, y)"
},
{
"identifier": "get_ideal_normal_ball",
"path": "relighting/ball_processor.py",
"snippet": "def get_ideal_normal_ball(size, flip_x=True):\n \"\"\"\n Generate normal ball for specific size \n Normal map is x \"left\", y up, z into the screen \n (we flip X to match sobel operator)\n @params\n - size (int) - single value of height and width\n @return:\n - normal_map (np.array) - normal map [size, size, 3]\n - mask (np.array) - mask that make a valid normal map [size,size]\n \"\"\"\n # we flip x to match sobel operator\n x = torch.linspace(1, -1, size)\n y = torch.linspace(1, -1, size)\n x = x.flip(dims=(-1,)) if not flip_x else x\n\n y, x = torch.meshgrid(y, x)\n z = (1 - x**2 - y**2)\n mask = z >= 0\n\n # clean up invalid value outsize the mask\n x = x * mask\n y = y * mask\n z = z * mask\n \n # get real z value\n z = torch.sqrt(z)\n \n # clean up normal map value outside mask \n normal_map = torch.cat([x[..., None], y[..., None], z[..., None]], dim=-1)\n normal_map = normal_map.numpy()\n mask = mask.numpy()\n return normal_map, mask"
},
{
"identifier": "crop_ball",
"path": "relighting/ball_processor.py",
"snippet": "def crop_ball(image, mask_ball, x, y, size, apply_mask=True, bg_color = (0, 0, 0)):\n if isinstance(image, Image.Image):\n result = np.array(image)\n else:\n result = image.copy()\n \n result = result[y:y+size, x:x+size]\n if apply_mask:\n result[~mask_ball] = bg_color\n return result"
},
{
"identifier": "GeneralLoader",
"path": "relighting/dataset.py",
"snippet": "class GeneralLoader(Dataset):\n def __init__(self,\n root=None,\n num_samples=None,\n res_threshold=((1024, 1024)),\n apply_threshold=False,\n random_shuffle=False,\n process_id = 0,\n process_total = 1,\n limit_input = 0,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.root = root\n self.res_threshold = res_threshold\n self.apply_threshold = apply_threshold\n self.has_meta = False\n \n if self.root is not None:\n if not os.path.exists(self.root):\n raise Exception(f\"Dataset {self.root} does not exist.\") \n \n paths = natsorted(\n list(glob.glob(os.path.join(self.root, \"*.png\"))) + \\\n list(glob.glob(os.path.join(self.root, \"*.jpg\")))\n )\n self.scene_data = self._load_data_path(paths, num_samples=num_samples)\n \n if random_shuffle:\n SEED = 0\n random.Random(SEED).shuffle(self.scene_data)\n random.Random(SEED).shuffle(self.boundary_info)\n \n if limit_input > 0:\n self.scene_data = self.scene_data[:limit_input]\n self.boundary_info = self.boundary_info[:limit_input]\n \n # please keep this one the last, so, we will filter out scene_data and boundary info\n if process_total > 1:\n self.scene_data = self.scene_data[process_id::process_total]\n self.boundary_info = self.boundary_info[process_id::process_total]\n print(f\"Process {process_id} has {len(self.scene_data)} samples\")\n\n def _load_data_path(self, paths, num_samples=None):\n if os.path.exists(os.path.splitext(paths[0])[0] + \".json\") or os.path.exists(os.path.splitext(paths[-1])[0] + \".json\"):\n self.has_meta = True\n \n if self.has_meta:\n # read metadata\n TARGET_KEY = \"chrome_mask256\"\n for path in paths:\n with open(os.path.splitext(path)[0] + \".json\") as f:\n meta = json.load(f)\n self.meta_data.append(meta)\n boundary = {\n \"x\": meta[TARGET_KEY][\"x\"],\n \"y\": meta[TARGET_KEY][\"y\"],\n \"size\": meta[TARGET_KEY][\"w\"],\n }\n self.boundary_info.append(boundary)\n \n \n scene_data = paths\n if self.apply_threshold:\n scene_data = []\n for path in tqdm(paths):\n img = Image.open(path)\n if (img.size[0] >= self.res_threshold[0]) and (img.size[1] >= self.res_threshold[1]):\n scene_data.append(path)\n \n if num_samples is not None:\n max_idx = min(num_samples, len(scene_data))\n scene_data = scene_data[:max_idx]\n \n return scene_data\n \n @classmethod\n def from_image_paths(cls, paths, *args, **kwargs):\n dataset = cls(*args, **kwargs)\n dataset.scene_data = dataset._load_data_path(paths)\n return dataset"
},
{
"identifier": "name2hash",
"path": "relighting/utils.py",
"snippet": "def name2hash(name: str):\n \"\"\"\n @see https://stackoverflow.com/questions/16008670/how-to-hash-a-string-into-8-digits\n \"\"\"\n hash_number = int(hashlib.sha1(name.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n return hash_number"
},
{
"identifier": "SD_MODELS",
"path": "relighting/argument.py",
"snippet": "SD_MODELS = {\n \"sd15_old\": \"runwayml/stable-diffusion-inpainting\",\n \"sd15_new\": \"runwayml/stable-diffusion-inpainting\",\n \"sd21\": \"stabilityai/stable-diffusion-2-inpainting\",\n \"sdxl\": \"stabilityai/stable-diffusion-xl-base-1.0\",\n \"sdxl_fast\": \"stabilityai/stable-diffusion-xl-base-1.0\",\n \"sd15_depth\": \"runwayml/stable-diffusion-inpainting\",\n}"
},
{
"identifier": "CONTROLNET_MODELS",
"path": "relighting/argument.py",
"snippet": "CONTROLNET_MODELS = {\n \"sd15_old\": \"fusing/stable-diffusion-v1-5-controlnet-normal\",\n \"sd15_new\": \"lllyasviel/control_v11p_sd15_normalbae\",\n \"sd21\": \"thibaud/controlnet-sd21-normalbae-diffusers\",\n \"sdxl\": \"diffusers/controlnet-depth-sdxl-1.0\",\n \"sdxl_fast\": \"diffusers/controlnet-depth-sdxl-1.0-small\",\n \"sd15_depth\": \"lllyasviel/control_v11f1p_sd15_depth\",\n}"
},
{
"identifier": "VAE_MODELS",
"path": "relighting/argument.py",
"snippet": "VAE_MODELS = {\n \"sdxl\": \"madebyollin/sdxl-vae-fp16-fix\",\n \"sdxl_fast\": \"madebyollin/sdxl-vae-fp16-fix\",\n}"
}
] | import torch
import argparse
import numpy as np
import torch.distributed as dist
import os
import json
import relighting.dist_utils as dist_util
import time
from PIL import Image
from tqdm.auto import tqdm
from relighting.inpainter import BallInpainter
from relighting.mask_utils import MaskGenerator
from relighting.ball_processor import (
get_ideal_normal_ball,
crop_ball
)
from relighting.dataset import GeneralLoader
from relighting.utils import name2hash
from relighting.argument import (
SD_MODELS,
CONTROLNET_MODELS,
VAE_MODELS
) | 8,224 | torch_dtype = torch_dtype,
offload = args.offload
)
elif args.model_option in ["sdxl", "sdxl_fast"] and not args.use_controlnet:
model = SD_MODELS[args.model_option]
pipe = BallInpainter.from_sdxl(
model=model,
controlnet=None,
device=device,
torch_dtype = torch_dtype,
offload = args.offload
)
elif args.use_controlnet:
model, controlnet = SD_MODELS[args.model_option], CONTROLNET_MODELS[args.model_option]
pipe = BallInpainter.from_sd(
model=model,
controlnet=controlnet,
device=device,
torch_dtype = torch_dtype,
offload = args.offload
)
else:
model = SD_MODELS[args.model_option]
pipe = BallInpainter.from_sd(
model=model,
controlnet=None,
device=device,
torch_dtype = torch_dtype,
offload = args.offload
)
if args.lora_scale > 0 and args.lora_path is None:
raise ValueError("lora scale is not 0 but lora path is not set")
if (args.lora_path is not None) and (args.use_lora):
print(f"using lora path {args.lora_path}")
print(f"using lora scale {args.lora_scale}")
pipe.pipeline.load_lora_weights(args.lora_path)
pipe.pipeline.fuse_lora(lora_scale=args.lora_scale) # fuse lora weight w' = w + \alpha \Delta w
enabled_lora = True
else:
enabled_lora = False
if args.use_torch_compile:
try:
print("compiling unet model")
start_time = time.time()
pipe.pipeline.unet = torch.compile(pipe.pipeline.unet, mode="reduce-overhead", fullgraph=True)
print("Model compilation time: ", time.time() - start_time)
except:
pass
# default height for sdxl is 1024, if not set, we set default height.
if args.model_option == "sdxl" and args.img_height == 0 and args.img_width == 0:
args.img_height = 1024
args.img_width = 1024
# load dataset
dataset = GeneralLoader(
root=args.dataset,
resolution=(args.img_width, args.img_height),
force_square=args.force_square,
return_dict=True,
random_shuffle=args.random_loader,
process_id=args.idx,
process_total=args.total,
limit_input=args.limit_input,
)
# interpolate embedding
embedding_dict = interpolate_embedding(pipe, args)
# prepare mask and normal ball
mask_generator = MaskGenerator()
normal_ball, mask_ball = get_ideal_normal_ball(size=args.ball_size+args.ball_dilate)
_, mask_ball_for_crop = get_ideal_normal_ball(size=args.ball_size)
# make output directory if not exist
raw_output_dir = os.path.join(args.output_dir, "raw")
control_output_dir = os.path.join(args.output_dir, "control")
square_output_dir = os.path.join(args.output_dir, "square")
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(raw_output_dir, exist_ok=True)
os.makedirs(control_output_dir, exist_ok=True)
os.makedirs(square_output_dir, exist_ok=True)
# create split seed
# please DO NOT manual replace this line, use --seed option instead
seeds = args.seed.split(",")
for image_data in tqdm(dataset):
input_image = image_data["image"]
image_path = image_data["path"]
for ev, (prompt_embeds, pooled_prompt_embeds) in embedding_dict.items():
# create output file name (we always use png to prevent quality loss)
ev_str = str(ev).replace(".", "") if ev != 0 else "-00"
outname = os.path.basename(image_path).split(".")[0] + f"_ev{ev_str}"
# we use top-left corner notation (which is different from aj.aek's center point notation)
x, y, r = get_ball_location(image_data, args)
# create inpaint mask
mask = mask_generator.generate_single(
input_image, mask_ball,
x - (args.ball_dilate // 2),
y - (args.ball_dilate // 2),
r + args.ball_dilate
)
seeds = tqdm(seeds, desc="seeds") if len(seeds) > 10 else seeds
#replacely create image with differnt seed
for seed in seeds:
start_time = time.time()
# set seed, if seed auto we use file name as seed
if seed == "auto":
filename = os.path.basename(image_path).split(".")[0]
| # inpaint the ball on an image
# this one is design for general image that does not require special location to place
# cross import from inpaint_multi-illum.py
def create_argparser():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, required=True ,help='directory that contain the image') #dataset name or directory
parser.add_argument("--ball_size", type=int, default=256, help="size of the ball in pixel")
parser.add_argument("--ball_dilate", type=int, default=20, help="How much pixel to dilate the ball to make a sharper edge")
parser.add_argument("--prompt", type=str, default="a perfect mirrored reflective chrome ball sphere")
parser.add_argument("--prompt_dark", type=str, default="a perfect black dark mirrored reflective chrome ball sphere")
parser.add_argument("--negative_prompt", type=str, default="matte, diffuse, flat, dull")
parser.add_argument("--model_option", default="sdxl", help='selecting fancy model option (sd15_old, sd15_new, sd21, sdxl)') # [sd15_old, sd15_new, or sd21]
parser.add_argument("--output_dir", required=True, type=str, help="output directory")
parser.add_argument("--img_height", type=int, default=1024, help="Dataset Image Height")
parser.add_argument("--img_width", type=int, default=1024, help="Dataset Image Width")
# some good seed 0, 37, 71, 125, 140, 196, 307, 434, 485, 575 | 9021, 9166, 9560, 9814, but default auto is for fairness
parser.add_argument("--seed", default="auto", type=str, help="Seed: right now we use single seed instead to reduce the time, (Auto will use hash file name to generate seed)")
parser.add_argument("--denoising_step", default=30, type=int, help="number of denoising step of diffusion model")
parser.add_argument("--control_scale", default=0.5, type=float, help="controlnet conditioning scale")
parser.add_argument('--no_controlnet', dest='use_controlnet', action='store_false', help='by default we using controlnet, we have option to disable to see the different')
parser.set_defaults(use_controlnet=True)
parser.add_argument('--no_force_square', dest='force_square', action='store_false', help='SDXL is trained for square image, we prefered the square input. but you use this option to disable reshape')
parser.set_defaults(force_square=True)
parser.add_argument('--no_random_loader', dest='random_loader', action='store_false', help="by default, we random how dataset load. This make us able to peak into the trend of result without waiting entire dataset. but can disable if prefereed")
parser.set_defaults(random_loader=True)
parser.add_argument('--cpu', dest='is_cpu', action='store_true', help="using CPU inference instead of GPU inference")
parser.set_defaults(is_cpu=False)
parser.add_argument('--offload', dest='offload', action='store_false', help="to enable diffusers cpu offload")
parser.set_defaults(offload=False)
parser.add_argument("--limit_input", default=0, type=int, help="limit number of image to process to n image (0 = no limit), useful for run smallset")
# LoRA stuff
parser.add_argument('--no_lora', dest='use_lora', action='store_false', help='by default we using lora, we have option to disable to see the different')
parser.set_defaults(use_lora=True)
parser.add_argument("--lora_path", default="models/ThisIsTheFinal-lora-hdr-continuous-largeT@900/0_-5/checkpoint-2500", type=str, help="LoRA Checkpoint path")
parser.add_argument("--lora_scale", default=0.75, type=float, help="LoRA scale factor")
# speed optimization stuff
parser.add_argument('--no_torch_compile', dest='use_torch_compile', action='store_false', help='by default we using torch compile for faster processing speed. disable it if your environemnt is lower than pytorch2.0')
parser.set_defaults(use_torch_compile=True)
# algorithm + iterative stuff
parser.add_argument("--algorithm", type=str, default="iterative", choices=["iterative", "normal"], help="Selecting between iterative or normal (single pass inpaint) algorithm")
parser.add_argument("--agg_mode", default="median", type=str)
parser.add_argument("--strength", default=0.8, type=float)
parser.add_argument("--num_iteration", default=2, type=int)
parser.add_argument("--ball_per_iteration", default=30, type=int)
parser.add_argument('--no_save_intermediate', dest='save_intermediate', action='store_false')
parser.set_defaults(save_intermediate=True)
parser.add_argument("--cache_dir", default="./temp_inpaint_iterative", type=str, help="cache directory for iterative inpaint")
# pararelle processing
parser.add_argument("--idx", default=0, type=int, help="index of the current process, useful for running on multiple node")
parser.add_argument("--total", default=1, type=int, help="total number of process")
# for HDR stuff
parser.add_argument("--max_negative_ev", default=-5, type=int, help="maximum negative EV for lora")
parser.add_argument("--ev", default="0,-2.5,-5", type=str, help="EV: list of EV to generate")
return parser
def get_ball_location(image_data, args):
if 'boundary' in image_data:
# support predefined boundary if need
x = image_data["boundary"]["x"]
y = image_data["boundary"]["y"]
r = image_data["boundary"]["size"]
# support ball dilation
half_dilate = args.ball_dilate // 2
# check if not left out-of-bound
if x - half_dilate < 0: x += half_dilate
if y - half_dilate < 0: y += half_dilate
# check if not right out-of-bound
if x + r + half_dilate > args.img_width: x -= half_dilate
if y + r + half_dilate > args.img_height: y -= half_dilate
else:
# we use top-left corner notation
x, y, r = ((args.img_width // 2) - (args.ball_size // 2), (args.img_height // 2) - (args.ball_size // 2), args.ball_size)
return x, y, r
def interpolate_embedding(pipe, args):
print("interpolate embedding...")
# get list of all EVs
ev_list = [float(x) for x in args.ev.split(",")]
interpolants = [ev / args.max_negative_ev for ev in ev_list]
print("EV : ", ev_list)
print("EV : ", interpolants)
# calculate prompt embeddings
prompt_normal = args.prompt
prompt_dark = args.prompt_dark
prompt_embeds_normal, _, pooled_prompt_embeds_normal, _ = pipe.pipeline.encode_prompt(prompt_normal)
prompt_embeds_dark, _, pooled_prompt_embeds_dark, _ = pipe.pipeline.encode_prompt(prompt_dark)
# interpolate embeddings
interpolate_embeds = []
for t in interpolants:
int_prompt_embeds = prompt_embeds_normal + t * (prompt_embeds_dark - prompt_embeds_normal)
int_pooled_prompt_embeds = pooled_prompt_embeds_normal + t * (pooled_prompt_embeds_dark - pooled_prompt_embeds_normal)
interpolate_embeds.append((int_prompt_embeds, int_pooled_prompt_embeds))
return dict(zip(ev_list, interpolate_embeds))
def main():
# load arguments
args = create_argparser().parse_args()
# get local rank
if args.is_cpu:
device = torch.device("cpu")
torch_dtype = torch.float32
else:
device = dist_util.dev()
torch_dtype = torch.float16
# so, we need ball_dilate >= 16 (2*vae_scale_factor) to make our mask shape = (272, 272)
assert args.ball_dilate % 2 == 0 # ball dilation should be symmetric
# create controlnet pipeline
if args.model_option in ["sdxl", "sdxl_fast"] and args.use_controlnet:
model, controlnet = SD_MODELS[args.model_option], CONTROLNET_MODELS[args.model_option]
pipe = BallInpainter.from_sdxl(
model=model,
controlnet=controlnet,
device=device,
torch_dtype = torch_dtype,
offload = args.offload
)
elif args.model_option in ["sdxl", "sdxl_fast"] and not args.use_controlnet:
model = SD_MODELS[args.model_option]
pipe = BallInpainter.from_sdxl(
model=model,
controlnet=None,
device=device,
torch_dtype = torch_dtype,
offload = args.offload
)
elif args.use_controlnet:
model, controlnet = SD_MODELS[args.model_option], CONTROLNET_MODELS[args.model_option]
pipe = BallInpainter.from_sd(
model=model,
controlnet=controlnet,
device=device,
torch_dtype = torch_dtype,
offload = args.offload
)
else:
model = SD_MODELS[args.model_option]
pipe = BallInpainter.from_sd(
model=model,
controlnet=None,
device=device,
torch_dtype = torch_dtype,
offload = args.offload
)
if args.lora_scale > 0 and args.lora_path is None:
raise ValueError("lora scale is not 0 but lora path is not set")
if (args.lora_path is not None) and (args.use_lora):
print(f"using lora path {args.lora_path}")
print(f"using lora scale {args.lora_scale}")
pipe.pipeline.load_lora_weights(args.lora_path)
pipe.pipeline.fuse_lora(lora_scale=args.lora_scale) # fuse lora weight w' = w + \alpha \Delta w
enabled_lora = True
else:
enabled_lora = False
if args.use_torch_compile:
try:
print("compiling unet model")
start_time = time.time()
pipe.pipeline.unet = torch.compile(pipe.pipeline.unet, mode="reduce-overhead", fullgraph=True)
print("Model compilation time: ", time.time() - start_time)
except:
pass
# default height for sdxl is 1024, if not set, we set default height.
if args.model_option == "sdxl" and args.img_height == 0 and args.img_width == 0:
args.img_height = 1024
args.img_width = 1024
# load dataset
dataset = GeneralLoader(
root=args.dataset,
resolution=(args.img_width, args.img_height),
force_square=args.force_square,
return_dict=True,
random_shuffle=args.random_loader,
process_id=args.idx,
process_total=args.total,
limit_input=args.limit_input,
)
# interpolate embedding
embedding_dict = interpolate_embedding(pipe, args)
# prepare mask and normal ball
mask_generator = MaskGenerator()
normal_ball, mask_ball = get_ideal_normal_ball(size=args.ball_size+args.ball_dilate)
_, mask_ball_for_crop = get_ideal_normal_ball(size=args.ball_size)
# make output directory if not exist
raw_output_dir = os.path.join(args.output_dir, "raw")
control_output_dir = os.path.join(args.output_dir, "control")
square_output_dir = os.path.join(args.output_dir, "square")
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(raw_output_dir, exist_ok=True)
os.makedirs(control_output_dir, exist_ok=True)
os.makedirs(square_output_dir, exist_ok=True)
# create split seed
# please DO NOT manual replace this line, use --seed option instead
seeds = args.seed.split(",")
for image_data in tqdm(dataset):
input_image = image_data["image"]
image_path = image_data["path"]
for ev, (prompt_embeds, pooled_prompt_embeds) in embedding_dict.items():
# create output file name (we always use png to prevent quality loss)
ev_str = str(ev).replace(".", "") if ev != 0 else "-00"
outname = os.path.basename(image_path).split(".")[0] + f"_ev{ev_str}"
# we use top-left corner notation (which is different from aj.aek's center point notation)
x, y, r = get_ball_location(image_data, args)
# create inpaint mask
mask = mask_generator.generate_single(
input_image, mask_ball,
x - (args.ball_dilate // 2),
y - (args.ball_dilate // 2),
r + args.ball_dilate
)
seeds = tqdm(seeds, desc="seeds") if len(seeds) > 10 else seeds
#replacely create image with differnt seed
for seed in seeds:
start_time = time.time()
# set seed, if seed auto we use file name as seed
if seed == "auto":
filename = os.path.basename(image_path).split(".")[0] | seed = name2hash(filename) | 5 | 2023-12-07 14:03:31+00:00 | 12k |
eliphatfs/zerorf | zerorf.py | [
{
"identifier": "MultiSceneNeRF",
"path": "lib/models/autoencoders/multiscene_nerf.py",
"snippet": "class MultiSceneNeRF(BaseNeRF):\n\n def __init__(self,\n *args,\n cache_size=0, # cache in RAM, top priority\n cache_16bit=False,\n num_file_writers=0, # cache in file system (for large dataset)\n **kwargs):\n super().__init__(*args, **kwargs)\n\n self.cache_size = cache_size\n self.cache_16bit = cache_16bit\n if cache_size > 0:\n rank, ws = get_dist_info()\n split_points = np.round(np.linspace(0, cache_size, num=ws + 1)).astype(np.int64)\n inds = np.arange(start=split_points[rank], stop=split_points[rank + 1])\n self.cache = {ind: None for ind in inds}\n else:\n self.cache = None\n self.cache_loaded = False\n\n self.num_file_writers = num_file_writers\n self.is_file_writers_initialized = False\n\n def init_file_writers(self, save_dir):\n if self.num_file_writers > 0:\n def file_writer(queue):\n while True:\n obj = queue.get()\n torch.save(obj, os.path.join(save_dir, obj['scene_name'] + '.pth'))\n\n self.file_queues = [mp.Queue(maxsize=1) for _ in range(self.num_file_writers)]\n for queue in self.file_queues:\n p = mp.Process(target=file_writer, args=(queue,))\n p.start()\n else:\n self.file_queues = None\n self.is_file_writers_initialized = True\n\n def load_cache(self, data, freeze_code=False):\n device = get_module_device(self)\n num_scenes = len(data['scene_id'])\n rank, ws = get_dist_info()\n\n if self.cache is not None:\n if not self.cache_loaded:\n cache_load_from = self.train_cfg.get('cache_load_from', None)\n loaded = False\n if cache_load_from is not None:\n cache_files = os.listdir(cache_load_from)\n cache_files.sort()\n if len(cache_files) > 0:\n assert len(cache_files) == self.cache_size\n cacheiter = list(self.cache.keys())\n if sys.stdout.isatty() and rank == 0:\n cacheiter = tqdm.tqdm(cacheiter)\n for ind in cacheiter:\n self.cache[ind] = torch.load(\n os.path.join(cache_load_from, cache_files[ind]), map_location='cpu')\n loaded = True\n if rank == 0:\n mmcv.print_log('Loaded cache files from ' + cache_load_from + '.', 'mmgen')\n if not loaded:\n if rank == 0:\n mmcv.print_log('Initialize codes from scratch.', 'mmgen')\n self.cache_loaded = True\n cache_list = [self.cache[scene_id_single] for scene_id_single in data['scene_id']]\n elif 'code' in data:\n cache_list = data['code']\n else:\n cache_list = [None for _ in range(num_scenes)]\n code_list_ = []\n density_grid = []\n density_bitfield = []\n for scene_state_single in cache_list:\n if scene_state_single is None:\n code_list_.append(self.get_init_code_(None, device))\n density_grid.append(self.get_init_density_grid(None, device))\n density_bitfield.append(self.get_init_density_bitfield(None, device))\n else:\n if 'code_' in scene_state_single['param']:\n code_ = scene_state_single['param']['code_'].to(dtype=torch.float32, device=device)\n else:\n assert 'code' in scene_state_single['param']\n if rank == 0:\n warnings.warn(\n 'Pre-activation codes not found. Using on-the-fly inversion instead '\n '(which could be inconsistent).')\n code_ = self.code_activation.inverse(\n scene_state_single['param']['code'].to(dtype=torch.float32, device=device))\n code_list_.append(code_.requires_grad_(not freeze_code))\n density_grid.append(\n scene_state_single['param']['density_grid'].to(device)\n if 'density_grid' in scene_state_single['param']\n else self.get_init_density_grid(None, device))\n density_bitfield.append(\n scene_state_single['param']['density_bitfield'].to(device)\n if 'density_bitfield' in scene_state_single['param']\n else self.get_init_density_bitfield(None, device))\n density_grid = torch.stack(density_grid, dim=0)\n density_bitfield = torch.stack(density_bitfield, dim=0)\n\n code_optimizers = self.build_optimizer(code_list_, self.train_cfg)\n for ind, scene_state_single in enumerate(cache_list):\n if scene_state_single is not None and 'optimizer' in scene_state_single:\n optimizer_set_state(code_optimizers[ind], scene_state_single['optimizer'])\n return code_list_, code_optimizers, density_grid, density_bitfield\n\n def save_cache(self, code_list_, code_optimizers,\n density_grid, density_bitfield, scene_id, scene_name):\n if self.cache_16bit:\n code_dtype = torch.float16 if code_list_[0].dtype == torch.float32 else code_list_[0].dtype\n optimizer_dtype = torch.bfloat16\n else:\n code_dtype = code_list_[0].dtype\n optimizer_dtype = torch.float32\n if 'save_dir' in self.train_cfg:\n save_dir = self.train_cfg['save_dir']\n os.makedirs(save_dir, exist_ok=True)\n if not self.is_file_writers_initialized:\n self.init_file_writers(save_dir)\n else:\n save_dir = None\n for ind, code_single_ in enumerate(code_list_):\n scene_id_single = scene_id[ind]\n out = dict(\n scene_id=scene_id_single,\n scene_name=scene_name[ind],\n param=dict(\n code_=code_single_.data,\n density_grid=density_grid[ind],\n density_bitfield=density_bitfield[ind]),\n optimizer=code_optimizers[ind].state_dict())\n if self.cache is not None:\n if self.cache[scene_id_single] is None:\n self.cache[scene_id_single] = out_dict_to(\n out, device='cpu', code_dtype=code_dtype, optimizer_dtype=optimizer_dtype)\n else:\n if 'scene_id' not in self.cache[scene_id_single]:\n self.cache[scene_id_single]['scene_id'] = out['scene_id']\n if 'scene_name' not in self.cache[scene_id_single]:\n self.cache[scene_id_single]['scene_name'] = out['scene_name']\n if 'code' in self.cache[scene_id_single]['param']:\n del self.cache[scene_id_single]['param']['code']\n for key, val in out['param'].items():\n load_tensor_to_dict(self.cache[scene_id_single]['param'], key, val,\n device='cpu', dtype=code_dtype)\n if 'optimizer' in self.cache[scene_id_single]:\n optimizer_state_copy(out['optimizer'], self.cache[scene_id_single]['optimizer'],\n device='cpu', dtype=optimizer_dtype)\n else:\n self.cache[scene_id_single]['optimizer'] = optimizer_state_to(\n out['optimizer'], device='cpu', dtype=optimizer_dtype)\n if save_dir is not None:\n if self.file_queues is not None:\n self.file_queues[ind // self.num_file_writers].put(\n out_dict_to(out, device='cpu', code_dtype=code_dtype, optimizer_dtype=optimizer_dtype))\n else:\n torch.save(\n out_dict_to(out, device='cpu', code_dtype=code_dtype, optimizer_dtype=optimizer_dtype),\n os.path.join(save_dir, scene_name + '.pth'))\n\n def train_step(self, data, optimizer, running_status=None):\n code_list_, code_optimizers, density_grid, density_bitfield = self.load_cache(data)\n\n # ==== optimize code ====\n cond_imgs = data['cond_imgs'] # (num_scenes, num_imgs, h, w, 3)\n cond_intrinsics = data['cond_intrinsics'] # (num_scenes, num_imgs, 4), in [fx, fy, cx, cy]\n cond_poses = data['cond_poses']\n cond_times = data.get('cond_times')\n\n num_scenes, num_imgs, h, w, _ = cond_imgs.size()\n # (num_scenes, num_imgs, h, w, 3)\n cond_rays_o, cond_rays_d = get_cam_rays(cond_poses, cond_intrinsics, h, w)\n dt_gamma_scale = self.train_cfg.get('dt_gamma_scale', 0.0)\n # (num_scenes,)\n dt_gamma = dt_gamma_scale / cond_intrinsics[..., :2].mean(dim=(-2, -1))\n\n extra_scene_step = self.train_cfg.get('extra_scene_step', 0)\n if extra_scene_step > 0:\n cfg = self.train_cfg.copy()\n cfg['n_inverse_steps'] = extra_scene_step\n self.inverse_code(\n self.decoder, cond_imgs, cond_rays_o, cond_rays_d, dt_gamma=dt_gamma, cfg=cfg,\n code_=code_list_,\n density_grid=density_grid,\n density_bitfield=density_bitfield,\n code_optimizer=code_optimizers)\n\n # ==== joint optimization ====\n for code_optimizer in code_optimizers:\n code_optimizer.zero_grad()\n optimizer['decoder'].zero_grad()\n\n code = self.code_activation(torch.stack(code_list_, dim=0), update_stats=True)\n\n loss, log_vars, out_rgbs, target_rgbs = self.loss_decoder(\n self.decoder, code, density_bitfield, cond_rays_o, cond_rays_d,\n cond_imgs, dt_gamma=dt_gamma, cond_times=cond_times, cfg=self.train_cfg,\n update_extra_state=self.update_extra_iters,\n extra_args=(density_grid, density_bitfield, 0),\n extra_kwargs=dict(\n density_thresh=self.train_cfg['density_thresh']\n ) if 'density_thresh' in self.train_cfg else dict())\n loss.backward()\n log_vars.update(loss=float(loss))\n\n if self.train_cfg.get('decoder_grad_clip', 0.0) > 0.0:\n decoder_grad_norm = torch.nn.utils.clip_grad_norm_(\n self.decoder.parameters(), self.train_cfg['decoder_grad_clip'])\n log_vars.update(decoder_grad_norm=float(decoder_grad_norm))\n optimizer['decoder'].step()\n for code_optimizer in code_optimizers:\n code_optimizer.step()\n\n # ==== save cache ====\n self.save_cache(\n code_list_, code_optimizers,\n density_grid, density_bitfield, data['scene_id'], data['scene_name'])\n\n # ==== evaluate reconstruction ====\n with torch.no_grad():\n self.mean_ema_update(code)\n train_psnr = eval_psnr(out_rgbs, target_rgbs)\n code_rms = code.square().flatten(1).mean().sqrt()\n log_vars.update(train_psnr=float(train_psnr.mean()),\n code_rms=float(code_rms.mean()))\n if 'test_imgs' in data and data['test_imgs'] is not None:\n log_vars.update(self.eval_and_viz(\n data, self.decoder, code, density_bitfield, cfg=self.train_cfg))\n\n # ==== outputs ====\n outputs_dict = dict(\n log_vars=log_vars, num_samples=num_scenes)\n\n return outputs_dict"
},
{
"identifier": "build_optimizers",
"path": "lib/core/optimizer/builder.py",
"snippet": "def build_optimizers(model, cfgs):\n \"\"\"Modified from MMGeneration\n \"\"\"\n optimizers = {}\n if hasattr(model, 'module'):\n model = model.module\n # determine whether 'cfgs' has several dicts for optimizers\n is_dict_of_dict = True\n for key, cfg in cfgs.items():\n if not isinstance(cfg, dict):\n is_dict_of_dict = False\n if is_dict_of_dict:\n for key, cfg in cfgs.items():\n cfg_ = cfg.copy()\n module = rgetattr(model, key)\n optimizers[key] = build_optimizer(module, cfg_)\n return optimizers\n\n return build_optimizer(model, cfgs)"
},
{
"identifier": "OrbitCamera",
"path": "lib/core/ssdnerf_gui.py",
"snippet": "class OrbitCamera:\n def __init__(self, name, W, H, r=2., fovy=60., euler=[0, 0, 0]):\n self.name = name\n self.W = W\n self.H = H\n self.radius = r # camera distance from center\n self.fovy = fovy # in degree\n self.center = np.array([0, 0, 0], dtype=np.float32) # look at this point\n self.default_rot = R.from_quat([0.5, -0.5, 0.5, -0.5])\n self.rot = copy.deepcopy(self.default_rot)\n self.up = np.array([0, 0, 1], dtype=np.float32) # need to be normalized!\n\n self.set_euler(euler)\n\n # pose\n @property\n def pose(self):\n # first move camera to radius\n res = np.eye(4, dtype=np.float32)\n res[2, 3] -= self.radius\n # rotate\n rot = np.eye(4, dtype=np.float32)\n rot[:3, :3] = self.rot.as_matrix()\n res = rot @ res\n # translate\n res[:3, 3] -= self.center\n return res\n\n def set_pose(self, pose):\n self.rot = R.from_matrix(pose[:3, :3])\n self.center = -pose[:3, 3] - self.rot.as_matrix()[:3, 2] * self.radius\n\n @property\n def intrinsics(self):\n focal = self.H / (2 * np.tan(np.radians(self.fovy) / 2))\n return np.array([focal, focal, self.W / 2, self.H / 2])\n\n @property\n def euler(self):\n return (self.rot * self.default_rot.inv()).as_euler('xyz', degrees=True)\n\n def set_euler(self, euler):\n self.rot = R.from_euler('xyz', euler, degrees=True) * self.default_rot\n\n def orbit(self, dx, dy):\n # rotate along camera up/side axis!\n side = self.rot.as_matrix()[:3, 0] # why this is side --> ? # already normalized.\n rotvec_x = self.up * np.radians(-0.1 * dx)\n rotvec_y = side * np.radians(-0.1 * dy)\n self.rot = R.from_rotvec(rotvec_x) * R.from_rotvec(rotvec_y) * self.rot\n\n def scale(self, delta):\n self.radius *= 1.1 ** (-delta)\n\n def pan(self, dx, dy, dz=0):\n # pan in camera coordinate system (careful on the sensitivity!)\n self.center += 0.0005 * self.rot.as_matrix()[:3, :3] @ np.array([dx, dy, dz])\n\n def pose2str(self):\n with np.printoptions(precision=3, suppress=True):\n return str(self.pose)"
},
{
"identifier": "NerfSynthetic",
"path": "lib/datasets/nerf_synthetic.py",
"snippet": "class NerfSynthetic(Dataset):\n\n def __init__(\n self, meta_files: list, world_scale: float = 1.0, rgba: bool = False\n ) -> None:\n super().__init__()\n self.meta_files = meta_files\n self.world_scale = world_scale\n self.rgba = rgba\n\n def __len__(self):\n return len(self.meta_files)\n\n def load_sub(self, sub):\n with open(sub) as mf:\n meta = json.load(mf)\n frames_i = []\n frames_p = []\n frames_c = []\n frames_t = []\n for frame in range(len(meta['frames'])):\n img = plotlib.imread(os.path.join(os.path.dirname(sub), meta['frames'][frame]['file_path'] + '.png'))\n h, w, c = img.shape\n x, y = w / 2, h / 2\n focal_length = y / numpy.tan(meta['camera_angle_x'] / 2)\n # scaling = 320.0 / img.shape[0]\n scaling = 1.0\n if not self.rgba:\n img = img[..., :3] * img[..., 3:] + (1 - img[..., 3:])\n # img = cv2.resize(img, [320, 320], interpolation=cv2.INTER_AREA)\n pose = meta['frames'][frame]['transform_matrix']\n frames_i.append(img)\n frames_p.append((numpy.array(pose) @ BLENDER_TO_OPENCV_MATRIX) * self.world_scale)\n frames_c.append(numpy.array([focal_length, focal_length, x, y]) * scaling)\n if 'time' in meta['frames'][frame]:\n frames_t.append(meta['frames'][frame]['time'])\n f32 = numpy.float32\n return dict(\n cond_imgs=numpy.array(frames_i, f32),\n cond_poses=numpy.array(frames_p, f32),\n cond_intrinsics=numpy.array(frames_c, f32),\n cond_times=numpy.array(frames_t, f32) * 2 - 1 if len(frames_t) else None\n )\n\n def __getitem__(self, index):\n sub = self.meta_files[index]\n return dict(\n scene_id=DC(index, cpu_only=True),\n scene_name=DC(sub, cpu_only=True),\n **self.load_sub(sub)\n )"
},
{
"identifier": "OppoDataset",
"path": "lib/datasets/oppo.py",
"snippet": "class OppoDataset(Dataset):\n\n def __init__(\n self, root_dir: str, split: str, world_scale: float = 1.0, rgba: bool = False\n ) -> None:\n super().__init__()\n self.root_dir = root_dir\n self.world_scale = world_scale\n self.rgba = rgba\n self.split = split\n\n self.downsample = 4.0\n self.img_wh = (int(2656 / self.downsample), int(3984 / self.downsample))\n self.define_transforms()\n\n # self.scene_bbox = torch.tensor([[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]])\n # self.near_far = [0.5, 1.5]\n\n camera_file = os.path.join(self.root_dir, f\"../../transforms_alignz_{split}.json\")\n with open(camera_file, 'r') as f:\n self.meta = json.load(f)['frames']\n\n self.poses = []\n self.imgs = []\n self.intrinsic = []\n w, h = self.img_wh\n\n for k, v in self.meta.items():\n imgid = v['file_path'].split('/')[-1]\n\n focal = 0.5 * v['calib_imgw'] / np.tan(0.5 * v['camera_angle_x']) # original focal length\n if self.downsample != 1.0:\n focal = focal / self.downsample\n\n image_path = os.path.join(self.root_dir, f\"../Lights/013/raw_undistorted/{imgid}.JPG\")\n c2w = np.array(v['transform_matrix'])\n c2w = torch.FloatTensor(c2w)\n self.poses.append(c2w)\n\n self.intrinsic.append(torch.tensor([focal, focal, w / 2, h / 2])) # focal, focal, cx, cy\n\n img = Image.open(image_path)\n\n if self.downsample != 1.0:\n img = img.resize(self.img_wh, Image.LANCZOS)\n img = self.transform(img) # (4, h, w)\n if self.split == 'train':\n mask_path = os.path.join(self.root_dir, f\"com_masks/{imgid}.png\")\n else:\n # mask_path = os.path.join(self.root_dir, f\"obj_masks/{imgid}.png\")\n mask_path = os.path.join(self.root_dir, f\"com_masks/{imgid}.png\")\n mask = cv2.imread(mask_path, 2) > 0\n if self.downsample != 1.0:\n mask = cv2.resize(mask.astype(np.uint8), self.img_wh) > 0\n mask = torch.from_numpy(mask).bool()\n img = img.permute(1,2,0)\n img = img * mask[...,None].float() + (1 - mask[...,None].float()) # blend A to RGB\n if rgba:\n img = torch.cat([img, mask[..., None]], dim=-1)\n self.imgs += [img]\n\n self.poses = torch.stack(self.poses, dim=0) * self.world_scale\n # self.poses = transform_poses_pca(np.array(self.poses))\n self.imgs = torch.stack(self.imgs, dim=0)\n self.intrinsic = torch.stack(self.intrinsic, dim=0)\n\n def define_transforms(self):\n self.transform = T.ToTensor()\n\n def __len__(self):\n return 1\n\n def __getitem__(self, index):\n return dict(\n scene_id=DC(index, cpu_only=True),\n scene_name=DC(self.root_dir, cpu_only=True),\n cond_imgs=np.array(self.imgs, np.float32),\n cond_poses=np.array(self.poses, np.float32),\n cond_intrinsics=np.array(self.intrinsic, np.float32)\n )"
},
{
"identifier": "config_parser",
"path": "opt.py",
"snippet": "def config_parser(cmd=None):\n parser = configargparse.ArgumentParser()\n # experiment\n parser.add_argument('--load-image', type=str, default=None,\n help='zero123pp image path')\n parser.add_argument(\"--proj-name\", type=str, default=\"test\",\n help='experiment name')\n parser.add_argument(\"--wandb-project\", type=str, \n default=\"zerorf\", help='wandb project name')\n \n # data\n parser.add_argument(\"--dataset\", type=str, \n default=\"nerf_syn\", help='type of dataset')\n parser.add_argument(\"--data-dir\", type=str, \n default=\"/root/nerf_synthetic\", help='directory of the dataset')\n parser.add_argument(\"--obj\", type=str, \n default=\"chair\", help='object name')\n parser.add_argument(\"--n-views\", type=int, \n default=6, help='number of input views')\n \n # model\n parser.add_argument(\"--model-res\", type=int, \n default=20, help='noise resolution (should be about 1/40 the provided image resolution), ignored when load-image is set')\n parser.add_argument(\"--model-ch\", type=int, \n default=8, help='noise channel')\n parser.add_argument(\"--n-rays-init\", type=int, \n default=2**12, help='number of rays per batch initially')\n parser.add_argument(\"--n-rays-up\", type=int, \n default=2**16, help='number of rays per batch after 100 iterations')\n parser.add_argument(\"--learn-bg\", action='store_true', help='if learn background')\n parser.add_argument(\"--bg-color\", type=float, \n default=1.0, help='background color')\n parser.add_argument(\"--rep\", type=str, choices=['dif', 'tensorf'],\n default=\"dif\", help=\"representation to use\")\n \n # training\n parser.add_argument(\"--net-lr\", type=float, \n default=0.002, help='learning rate')\n parser.add_argument(\"--seed\", type=int, \n default=1337, help='random seed')\n parser.add_argument(\"--n-val\", type=int, \n default=1, help='number of validate views')\n parser.add_argument(\"--net-lr-decay-to\", type=float, \n default=0.002, help='lr decay rate')\n parser.add_argument(\"--n-iters\", type=int, \n default=10000, help='number of iterations')\n parser.add_argument(\"--val-iter\", type=int, \n default=1000, help='valid every k iterations')\n parser.add_argument(\"--device\", type=str, \n default=\"cuda:0\", help='device name')\n \n if cmd is not None:\n return parser.parse_args(cmd)\n else:\n return parser.parse_args()"
}
] | import sys
import shutil
import os
import cv2
import tqdm
import json
import numpy
import wandb
import torch
import torch_redstone as rst
import einops
from sklearn.cluster import KMeans
from lib.models.autoencoders import MultiSceneNeRF
from mmgen.models import build_model, build_module
from lib.core.optimizer import build_optimizers
from lib.core.ssdnerf_gui import OrbitCamera
from lib.datasets.nerf_synthetic import NerfSynthetic
from lib.datasets.oppo import OppoDataset
from PIL import Image
from opt import config_parser
from pprint import pprint | 7,955 | )
entry = test[0]
test_entry = dict(
test_imgs=torch.tensor(entry['cond_imgs'][:][None]).float().to(device),
test_poses=torch.tensor(entry['cond_poses'][:])[None].float().to(device),
test_intrinsics=torch.tensor(entry['cond_intrinsics'][:])[None].float().to(device),
scene_id=[0],
scene_name=[args.proj_name]
)
else:
data_entry = dict(
cond_imgs=images,
cond_poses=torch.tensor(poses)[None].float().to(device) * 0.9,
cond_intrinsics=torch.tensor(intrinsics)[None].float().to(device),
scene_id=[0],
scene_name=[args.proj_name]
)
selected_idxs = list(range(args.n_views))
pic_h = data_entry['cond_imgs'].shape[-3]
pic_w = data_entry['cond_imgs'].shape[-2]
if args.load_image:
args.model_res = 4
pic_h = pic_w = 320
cam = OrbitCamera('render', pic_w, pic_h, 3.2, 48)
decoder_1 = dict(
type='TensorialDecoder',
preprocessor=dict(
type='TensorialGenerator',
in_ch=args.model_ch, out_ch=16, noise_res=args.model_res,
tensor_config=(
['xy', 'z', 'yz', 'x', 'zx', 'y']
)
),
subreduce=1 if args.load_image else 2,
reduce='cat',
separate_density_and_color=False,
sh_coef_only=False,
sdf_mode=False,
max_steps=1024 if not args.load_image else 320,
n_images=args.n_views,
image_h=pic_h,
image_w=pic_w,
has_time_dynamics=False,
visualize_mesh=True
)
decoder_2 = dict(
type='FreqFactorizedDecoder',
preprocessor=dict(
type='TensorialGenerator',
in_ch=args.model_ch, out_ch=16, noise_res=args.model_res,
tensor_config=['xyz', 'xyz']
),
subreduce=1,
reduce='cat',
separate_density_and_color=False,
sh_coef_only=False,
sdf_mode=False,
max_steps=1024 if not args.load_image else 640,
n_images=args.n_views,
image_h=pic_h,
image_w=pic_w,
has_time_dynamics=False,
freq_bands=[None, 0.4],
visualize_mesh=True
)
patch_reg_loss = build_module(dict(
type='MaskedTVLoss',
power=1.5,
loss_weight=0.00
))
nerf: MultiSceneNeRF = build_model(dict(
type='MultiSceneNeRF',
code_size=code_size,
code_activation=dict(type='IdentityCode'),
grid_size=64,
patch_size=32,
decoder=decoder_2 if args.rep == 'dif' else decoder_1,
decoder_use_ema=False,
bg_color=1.0,
pixel_loss=dict(
type='MSELoss',
loss_weight=3.2
),
use_lpips_metric=torch.cuda.mem_get_info()[1] // 1000 ** 3 >= 32,
cache_size=1,
cache_16bit=False,
init_from_mean=True
), train_cfg = dict(
dt_gamma_scale=0.5,
density_thresh=0.05,
extra_scene_step=0,
n_inverse_rays=args.n_rays_init,
n_decoder_rays=args.n_rays_init,
loss_coef=0.1 / (pic_h * pic_w),
optimizer=dict(type='Adam', lr=0, weight_decay=0.),
lr_scheduler=dict(type='ExponentialLR', gamma=0.99),
cache_load_from=None,
viz_dir=None,
loss_denom=1.0,
decoder_grad_clip=1.0
),
test_cfg = dict(
img_size=(pic_h, pic_w),
density_thresh=0.01,
max_render_rays=pic_h * pic_w,
dt_gamma_scale=0.5,
n_inverse_rays=args.n_rays_init,
loss_coef=0.1 / (pic_h * pic_w),
n_inverse_steps=400,
optimizer=dict(type='Adam', lr=0.0, weight_decay=0.),
lr_scheduler=dict(type='ExponentialLR', gamma=0.998),
return_depth=False
))
nerf.bg_color = nerf.decoder.bg_color = torch.nn.Parameter(torch.ones(3) * args.bg_color, requires_grad=args.learn_bg)
nerf.to(device)
nerf.train()
|
sys.path.append('.')
torch.backends.cuda.matmul.allow_tf32 = True
def kmeans_downsample(points, n_points_to_sample):
kmeans = KMeans(n_points_to_sample).fit(points)
return ((points - kmeans.cluster_centers_[..., None, :]) ** 2).sum(-1).argmin(-1).tolist()
args = config_parser()
pprint(args)
model_scaling_factor = 16
device = args.device
BLENDER_TO_OPENCV_MATRIX = numpy.array([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
], dtype=numpy.float32)
code_size = (3, args.model_ch, args.model_res, args.model_res)
rst.seed(args.seed)
poses = []
intrinsics = []
if args.load_image:
image = numpy.array(Image.open(args.load_image)).astype(numpy.float32) / 255.0
image = torch.tensor(image).cuda()
images = einops.rearrange(image, '(ph h) (pw w) c -> (ph pw) h w c', ph=3, pw=2)[None]
meta = json.load(open(os.path.join(os.path.dirname(__file__), "meta.json")))
poses = numpy.array([
(numpy.array(frame['transform_matrix']) @ BLENDER_TO_OPENCV_MATRIX) * 2
for frame in meta['sample_0']['view_frames']
])
_, b, h, w, c = images.shape
x, y = w / 2, h / 2
focal_length = y / numpy.tan(meta['fovy'] / 2)
intrinsics = numpy.array([[focal_length, focal_length, x, y]] * args.n_views)
work_dir = "results/%s" % args.proj_name
os.makedirs(work_dir, exist_ok=True)
os.chdir(work_dir)
if not args.load_image:
if args.dataset == "nerf_syn":
model_scale = dict(chair=2.1, drums=2.3, ficus=2.3, hotdog=3.0, lego=2.4, materials=2.4, mic=2.5, ship=2.75)
world_scale = 2 / model_scale[args.obj]
dataset = NerfSynthetic([f"{args.data_dir}/{args.obj}/transforms_train.json"], rgba=True, world_scale=world_scale)
val = NerfSynthetic([f"{args.data_dir}/{args.obj}/transforms_val.json"], world_scale=world_scale)
test = NerfSynthetic([f"{args.data_dir}/{args.obj}/transforms_test.json"], world_scale=world_scale)
entry = dataset[0]
selected_idxs = kmeans_downsample(entry['cond_poses'][..., :3, 3], args.n_views)
elif args.dataset == "oi":
world_scale = 5.0
dataset = OppoDataset(f"{args.data_dir}/{args.obj}/output", split='train', world_scale=world_scale, rgba=True)
val = OppoDataset(f"{args.data_dir}/{args.obj}/output", split='test', world_scale=world_scale)
test = OppoDataset(f"{args.data_dir}/{args.obj}/output", split='test', world_scale=world_scale)
entry = dataset[0]
if args.n_views == 6:
selected_idxs = [10, 3, 19, 22, 17, 35]
elif args.n_views == 4:
selected_idxs = [10, 33, 35, 6]
else:
selected_idxs = kmeans_downsample(entry['cond_poses'][..., :3, 3], args.n_views)
data_entry = dict(
cond_imgs=torch.tensor(entry['cond_imgs'][selected_idxs][None]).float().to(device),
cond_poses=torch.tensor(entry['cond_poses'])[selected_idxs][None].float().to(device),
cond_intrinsics=torch.tensor(entry['cond_intrinsics'])[selected_idxs][None].float().to(device),
scene_id=[0],
scene_name=[args.proj_name]
)
entry = val[0]
val_entry = dict(
test_imgs=torch.tensor(entry['cond_imgs'][:args.n_val][None]).float().to(device),
test_poses=torch.tensor(entry['cond_poses'][:args.n_val])[None].float().to(device),
test_intrinsics=torch.tensor(entry['cond_intrinsics'][:args.n_val])[None].float().to(device),
scene_id=[0],
scene_name=[args.proj_name]
)
entry = test[0]
test_entry = dict(
test_imgs=torch.tensor(entry['cond_imgs'][:][None]).float().to(device),
test_poses=torch.tensor(entry['cond_poses'][:])[None].float().to(device),
test_intrinsics=torch.tensor(entry['cond_intrinsics'][:])[None].float().to(device),
scene_id=[0],
scene_name=[args.proj_name]
)
else:
data_entry = dict(
cond_imgs=images,
cond_poses=torch.tensor(poses)[None].float().to(device) * 0.9,
cond_intrinsics=torch.tensor(intrinsics)[None].float().to(device),
scene_id=[0],
scene_name=[args.proj_name]
)
selected_idxs = list(range(args.n_views))
pic_h = data_entry['cond_imgs'].shape[-3]
pic_w = data_entry['cond_imgs'].shape[-2]
if args.load_image:
args.model_res = 4
pic_h = pic_w = 320
cam = OrbitCamera('render', pic_w, pic_h, 3.2, 48)
decoder_1 = dict(
type='TensorialDecoder',
preprocessor=dict(
type='TensorialGenerator',
in_ch=args.model_ch, out_ch=16, noise_res=args.model_res,
tensor_config=(
['xy', 'z', 'yz', 'x', 'zx', 'y']
)
),
subreduce=1 if args.load_image else 2,
reduce='cat',
separate_density_and_color=False,
sh_coef_only=False,
sdf_mode=False,
max_steps=1024 if not args.load_image else 320,
n_images=args.n_views,
image_h=pic_h,
image_w=pic_w,
has_time_dynamics=False,
visualize_mesh=True
)
decoder_2 = dict(
type='FreqFactorizedDecoder',
preprocessor=dict(
type='TensorialGenerator',
in_ch=args.model_ch, out_ch=16, noise_res=args.model_res,
tensor_config=['xyz', 'xyz']
),
subreduce=1,
reduce='cat',
separate_density_and_color=False,
sh_coef_only=False,
sdf_mode=False,
max_steps=1024 if not args.load_image else 640,
n_images=args.n_views,
image_h=pic_h,
image_w=pic_w,
has_time_dynamics=False,
freq_bands=[None, 0.4],
visualize_mesh=True
)
patch_reg_loss = build_module(dict(
type='MaskedTVLoss',
power=1.5,
loss_weight=0.00
))
nerf: MultiSceneNeRF = build_model(dict(
type='MultiSceneNeRF',
code_size=code_size,
code_activation=dict(type='IdentityCode'),
grid_size=64,
patch_size=32,
decoder=decoder_2 if args.rep == 'dif' else decoder_1,
decoder_use_ema=False,
bg_color=1.0,
pixel_loss=dict(
type='MSELoss',
loss_weight=3.2
),
use_lpips_metric=torch.cuda.mem_get_info()[1] // 1000 ** 3 >= 32,
cache_size=1,
cache_16bit=False,
init_from_mean=True
), train_cfg = dict(
dt_gamma_scale=0.5,
density_thresh=0.05,
extra_scene_step=0,
n_inverse_rays=args.n_rays_init,
n_decoder_rays=args.n_rays_init,
loss_coef=0.1 / (pic_h * pic_w),
optimizer=dict(type='Adam', lr=0, weight_decay=0.),
lr_scheduler=dict(type='ExponentialLR', gamma=0.99),
cache_load_from=None,
viz_dir=None,
loss_denom=1.0,
decoder_grad_clip=1.0
),
test_cfg = dict(
img_size=(pic_h, pic_w),
density_thresh=0.01,
max_render_rays=pic_h * pic_w,
dt_gamma_scale=0.5,
n_inverse_rays=args.n_rays_init,
loss_coef=0.1 / (pic_h * pic_w),
n_inverse_steps=400,
optimizer=dict(type='Adam', lr=0.0, weight_decay=0.),
lr_scheduler=dict(type='ExponentialLR', gamma=0.998),
return_depth=False
))
nerf.bg_color = nerf.decoder.bg_color = torch.nn.Parameter(torch.ones(3) * args.bg_color, requires_grad=args.learn_bg)
nerf.to(device)
nerf.train() | optim = build_optimizers(nerf, dict(decoder=dict(type='AdamW', lr=args.net_lr, foreach=True, weight_decay=0.2, betas=(0.9, 0.98)))) | 1 | 2023-12-14 03:29:28+00:00 | 12k |
u2seg/U2Seg | detectron2/data/datasets/builtin.py | [
{
"identifier": "DatasetCatalog",
"path": "detectron2/data/catalog.py",
"snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }"
},
{
"identifier": "ADE20K_SEM_SEG_CATEGORIES",
"path": "detectron2/data/datasets/builtin_meta.py",
"snippet": "ADE20K_SEM_SEG_CATEGORIES = [\n \"wall\", \"building\", \"sky\", \"floor\", \"tree\", \"ceiling\", \"road, route\", \"bed\", \"window \", \"grass\", \"cabinet\", \"sidewalk, pavement\", \"person\", \"earth, ground\", \"door\", \"table\", \"mountain, mount\", \"plant\", \"curtain\", \"chair\", \"car\", \"water\", \"painting, picture\", \"sofa\", \"shelf\", \"house\", \"sea\", \"mirror\", \"rug\", \"field\", \"armchair\", \"seat\", \"fence\", \"desk\", \"rock, stone\", \"wardrobe, closet, press\", \"lamp\", \"tub\", \"rail\", \"cushion\", \"base, pedestal, stand\", \"box\", \"column, pillar\", \"signboard, sign\", \"chest of drawers, chest, bureau, dresser\", \"counter\", \"sand\", \"sink\", \"skyscraper\", \"fireplace\", \"refrigerator, icebox\", \"grandstand, covered stand\", \"path\", \"stairs\", \"runway\", \"case, display case, showcase, vitrine\", \"pool table, billiard table, snooker table\", \"pillow\", \"screen door, screen\", \"stairway, staircase\", \"river\", \"bridge, span\", \"bookcase\", \"blind, screen\", \"coffee table\", \"toilet, can, commode, crapper, pot, potty, stool, throne\", \"flower\", \"book\", \"hill\", \"bench\", \"countertop\", \"stove\", \"palm, palm tree\", \"kitchen island\", \"computer\", \"swivel chair\", \"boat\", \"bar\", \"arcade machine\", \"hovel, hut, hutch, shack, shanty\", \"bus\", \"towel\", \"light\", \"truck\", \"tower\", \"chandelier\", \"awning, sunshade, sunblind\", \"street lamp\", \"booth\", \"tv\", \"plane\", \"dirt track\", \"clothes\", \"pole\", \"land, ground, soil\", \"bannister, banister, balustrade, balusters, handrail\", \"escalator, moving staircase, moving stairway\", \"ottoman, pouf, pouffe, puff, hassock\", \"bottle\", \"buffet, counter, sideboard\", \"poster, posting, placard, notice, bill, card\", \"stage\", \"van\", \"ship\", \"fountain\", \"conveyer belt, conveyor belt, conveyer, conveyor, transporter\", \"canopy\", \"washer, automatic washer, washing machine\", \"plaything, toy\", \"pool\", \"stool\", \"barrel, cask\", \"basket, handbasket\", \"falls\", \"tent\", \"bag\", \"minibike, motorbike\", \"cradle\", \"oven\", \"ball\", \"food, solid food\", \"step, stair\", \"tank, storage tank\", \"trade name\", \"microwave\", \"pot\", \"animal\", \"bicycle\", \"lake\", \"dishwasher\", \"screen\", \"blanket, cover\", \"sculpture\", \"hood, exhaust hood\", \"sconce\", \"vase\", \"traffic light\", \"tray\", \"trash can\", \"fan\", \"pier\", \"crt screen\", \"plate\", \"monitor\", \"bulletin board\", \"shower\", \"radiator\", \"glass, drinking glass\", \"clock\", \"flag\", # noqa\n]"
},
{
"identifier": "_get_builtin_metadata",
"path": "detectron2/data/datasets/builtin_meta.py",
"snippet": "def _get_builtin_metadata(dataset_name):\n if dataset_name in [\"coco\", \"coco_semi\"]:\n return _get_coco_instances_meta()\n if dataset_name == \"coco_panoptic_separated\":\n return _get_coco_panoptic_separated_meta()\n elif dataset_name == \"coco_panoptic_standard\":\n meta = {}\n # The following metadata maps contiguous id from [0, #thing categories +\n # #stuff categories) to their names and colors. We have to replica of the\n # same name and color under \"thing_*\" and \"stuff_*\" because the current\n # visualization function in D2 handles thing and class classes differently\n # due to some heuristic used in Panoptic FPN. We keep the same naming to\n # enable reusing existing visualization functions.\n thing_classes = [k[\"name\"] for k in COCO_CATEGORIES]\n thing_colors = [k[\"color\"] for k in COCO_CATEGORIES]\n stuff_classes = [k[\"name\"] for k in COCO_CATEGORIES]\n stuff_colors = [k[\"color\"] for k in COCO_CATEGORIES]\n\n meta[\"thing_classes\"] = thing_classes\n meta[\"thing_colors\"] = thing_colors\n meta[\"stuff_classes\"] = stuff_classes\n meta[\"stuff_colors\"] = stuff_colors\n\n # Convert category id for training:\n # category id: like semantic segmentation, it is the class id for each\n # pixel. Since there are some classes not used in evaluation, the category\n # id is not always contiguous and thus we have two set of category ids:\n # - original category id: category id in the original dataset, mainly\n # used for evaluation.\n # - contiguous category id: [0, #classes), in order to train the linear\n # softmax classifier.\n thing_dataset_id_to_contiguous_id = {}\n stuff_dataset_id_to_contiguous_id = {}\n\n for i, cat in enumerate(COCO_CATEGORIES):\n if cat[\"isthing\"]:\n thing_dataset_id_to_contiguous_id[cat[\"id\"]] = i\n else:\n stuff_dataset_id_to_contiguous_id[cat[\"id\"]] = i\n\n meta[\"thing_dataset_id_to_contiguous_id\"] = thing_dataset_id_to_contiguous_id\n meta[\"stuff_dataset_id_to_contiguous_id\"] = stuff_dataset_id_to_contiguous_id\n\n return meta\n elif dataset_name == \"coco_person\":\n return {\n \"thing_classes\": [\"person\"],\n \"keypoint_names\": COCO_PERSON_KEYPOINT_NAMES,\n \"keypoint_flip_map\": COCO_PERSON_KEYPOINT_FLIP_MAP,\n \"keypoint_connection_rules\": KEYPOINT_CONNECTION_RULES,\n }\n elif dataset_name == \"cityscapes\":\n # fmt: off\n CITYSCAPES_THING_CLASSES = [\n \"person\", \"rider\", \"car\", \"truck\",\n \"bus\", \"train\", \"motorcycle\", \"bicycle\",\n ]\n CITYSCAPES_STUFF_CLASSES = [\n \"road\", \"sidewalk\", \"building\", \"wall\", \"fence\", \"pole\", \"traffic light\",\n \"traffic sign\", \"vegetation\", \"terrain\", \"sky\", \"person\", \"rider\", \"car\",\n \"truck\", \"bus\", \"train\", \"motorcycle\", \"bicycle\",\n ]\n # fmt: on\n return {\n \"thing_classes\": CITYSCAPES_THING_CLASSES,\n \"stuff_classes\": CITYSCAPES_STUFF_CLASSES,\n }\n raise KeyError(\"No built-in metadata for dataset {}\".format(dataset_name))"
},
{
"identifier": "load_cityscapes_instances",
"path": "detectron2/data/datasets/cityscapes.py",
"snippet": "def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):\n \"\"\"\n Args:\n image_dir (str): path to the raw dataset. e.g., \"~/cityscapes/leftImg8bit/train\".\n gt_dir (str): path to the raw annotations. e.g., \"~/cityscapes/gtFine/train\".\n from_json (bool): whether to read annotations from the raw json file or the png files.\n to_polygons (bool): whether to represent the segmentation as polygons\n (COCO's format) instead of masks (cityscapes's format).\n\n Returns:\n list[dict]: a list of dicts in Detectron2 standard format. (See\n `Using Custom Datasets </tutorials/datasets.html>`_ )\n \"\"\"\n if from_json:\n assert to_polygons, (\n \"Cityscapes's json annotations are in polygon format. \"\n \"Converting to mask format is not supported now.\"\n )\n files = _get_cityscapes_files(image_dir, gt_dir)\n\n logger.info(\"Preprocessing cityscapes annotations ...\")\n # This is still not fast: all workers will execute duplicate works and will\n # take up to 10m on a 8GPU server.\n pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))\n\n ret = pool.map(\n functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),\n files,\n )\n logger.info(\"Loaded {} images from {}\".format(len(ret), image_dir))\n\n # Map cityscape ids to contiguous ids\n from cityscapesscripts.helpers.labels import labels\n\n labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]\n dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}\n for dict_per_image in ret:\n for anno in dict_per_image[\"annotations\"]:\n anno[\"category_id\"] = dataset_id_to_contiguous_id[anno[\"category_id\"]]\n return ret"
},
{
"identifier": "load_cityscapes_semantic",
"path": "detectron2/data/datasets/cityscapes.py",
"snippet": "def load_cityscapes_semantic(image_dir, gt_dir):\n \"\"\"\n Args:\n image_dir (str): path to the raw dataset. e.g., \"~/cityscapes/leftImg8bit/train\".\n gt_dir (str): path to the raw annotations. e.g., \"~/cityscapes/gtFine/train\".\n\n Returns:\n list[dict]: a list of dict, each has \"file_name\" and\n \"sem_seg_file_name\".\n \"\"\"\n ret = []\n # gt_dir is small and contain many small files. make sense to fetch to local first\n gt_dir = PathManager.get_local_path(gt_dir)\n for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir):\n label_file = label_file.replace(\"labelIds\", \"labelTrainIds\")\n\n with PathManager.open(json_file, \"r\") as f:\n jsonobj = json.load(f)\n ret.append(\n {\n \"file_name\": image_file,\n \"sem_seg_file_name\": label_file,\n \"height\": jsonobj[\"imgHeight\"],\n \"width\": jsonobj[\"imgWidth\"],\n }\n )\n assert len(ret), f\"No images found in {image_dir}!\"\n assert PathManager.isfile(\n ret[0][\"sem_seg_file_name\"]\n ), \"Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py\" # noqa\n return ret"
},
{
"identifier": "register_all_cityscapes_panoptic",
"path": "detectron2/data/datasets/cityscapes_panoptic.py",
"snippet": "def register_all_cityscapes_panoptic(root):\n meta = {}\n # The following metadata maps contiguous id from [0, #thing categories +\n # #stuff categories) to their names and colors. We have to replica of the\n # same name and color under \"thing_*\" and \"stuff_*\" because the current\n # visualization function in D2 handles thing and class classes differently\n # due to some heuristic used in Panoptic FPN. We keep the same naming to\n # enable reusing existing visualization functions.\n thing_classes = [k[\"name\"] for k in CITYSCAPES_CATEGORIES]\n thing_colors = [k[\"color\"] for k in CITYSCAPES_CATEGORIES]\n stuff_classes = [k[\"name\"] for k in CITYSCAPES_CATEGORIES]\n stuff_colors = [k[\"color\"] for k in CITYSCAPES_CATEGORIES]\n\n meta[\"thing_classes\"] = thing_classes\n meta[\"thing_colors\"] = thing_colors\n meta[\"stuff_classes\"] = stuff_classes\n meta[\"stuff_colors\"] = stuff_colors\n\n # There are three types of ids in cityscapes panoptic segmentation:\n # (1) category id: like semantic segmentation, it is the class id for each\n # pixel. Since there are some classes not used in evaluation, the category\n # id is not always contiguous and thus we have two set of category ids:\n # - original category id: category id in the original dataset, mainly\n # used for evaluation.\n # - contiguous category id: [0, #classes), in order to train the classifier\n # (2) instance id: this id is used to differentiate different instances from\n # the same category. For \"stuff\" classes, the instance id is always 0; for\n # \"thing\" classes, the instance id starts from 1 and 0 is reserved for\n # ignored instances (e.g. crowd annotation).\n # (3) panoptic id: this is the compact id that encode both category and\n # instance id by: category_id * 1000 + instance_id.\n thing_dataset_id_to_contiguous_id = {}\n stuff_dataset_id_to_contiguous_id = {}\n\n for k in CITYSCAPES_CATEGORIES:\n if k[\"isthing\"] == 1:\n thing_dataset_id_to_contiguous_id[k[\"id\"]] = k[\"trainId\"]\n else:\n stuff_dataset_id_to_contiguous_id[k[\"id\"]] = k[\"trainId\"]\n\n meta[\"thing_dataset_id_to_contiguous_id\"] = thing_dataset_id_to_contiguous_id\n meta[\"stuff_dataset_id_to_contiguous_id\"] = stuff_dataset_id_to_contiguous_id\n\n for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items():\n image_dir = os.path.join(root, image_dir)\n gt_dir = os.path.join(root, gt_dir)\n gt_json = os.path.join(root, gt_json)\n\n DatasetCatalog.register(\n key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta)\n )\n MetadataCatalog.get(key).set(\n panoptic_root=gt_dir,\n image_root=image_dir,\n panoptic_json=gt_json,\n gt_dir=gt_dir.replace(\"cityscapes_panoptic_\", \"\"),\n evaluator_type=\"cityscapes_panoptic_seg\",\n ignore_label=255,\n label_divisor=1000,\n **meta,\n )"
},
{
"identifier": "load_sem_seg",
"path": "detectron2/data/datasets/coco.py",
"snippet": "def load_sem_seg(gt_root, image_root, gt_ext=\"png\", image_ext=\"jpg\"):\n \"\"\"\n Load semantic segmentation datasets. All files under \"gt_root\" with \"gt_ext\" extension are\n treated as ground truth annotations and all files under \"image_root\" with \"image_ext\" extension\n as input images. Ground truth and input images are matched using file paths relative to\n \"gt_root\" and \"image_root\" respectively without taking into account file extensions.\n This works for COCO as well as some other datasets.\n\n Args:\n gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation\n annotations are stored as images with integer values in pixels that represent\n corresponding semantic labels.\n image_root (str): the directory where the input images are.\n gt_ext (str): file extension for ground truth annotations.\n image_ext (str): file extension for input images.\n\n Returns:\n list[dict]:\n a list of dicts in detectron2 standard format without instance-level\n annotation.\n\n Notes:\n 1. This function does not read the image and ground truth files.\n The results do not have the \"image\" and \"sem_seg\" fields.\n \"\"\"\n\n # We match input images with ground truth based on their relative filepaths (without file\n # extensions) starting from 'image_root' and 'gt_root' respectively.\n def file2id(folder_path, file_path):\n # extract relative path starting from `folder_path`\n image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))\n # remove file extension\n image_id = os.path.splitext(image_id)[0]\n return image_id\n\n input_files = sorted(\n (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)),\n key=lambda file_path: file2id(image_root, file_path),\n )\n gt_files = sorted(\n (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)),\n key=lambda file_path: file2id(gt_root, file_path),\n )\n\n assert len(gt_files) > 0, \"No annotations found in {}.\".format(gt_root)\n\n # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images\n if len(input_files) != len(gt_files):\n logger.warn(\n \"Directory {} and {} has {} and {} files, respectively.\".format(\n image_root, gt_root, len(input_files), len(gt_files)\n )\n )\n input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files]\n gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files]\n intersect = list(set(input_basenames) & set(gt_basenames))\n # sort, otherwise each worker may obtain a list[dict] in different order\n intersect = sorted(intersect)\n logger.warn(\"Will use their intersection of {} files.\".format(len(intersect)))\n input_files = [os.path.join(image_root, f + image_ext) for f in intersect]\n gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect]\n\n logger.info(\n \"Loaded {} images with semantic segmentation from {}\".format(len(input_files), image_root)\n )\n\n dataset_dicts = []\n for (img_path, gt_path) in zip(input_files, gt_files):\n record = {}\n record[\"file_name\"] = img_path\n record[\"sem_seg_file_name\"] = gt_path\n dataset_dicts.append(record)\n\n return dataset_dicts"
},
{
"identifier": "register_coco_instances",
"path": "detectron2/data/datasets/coco.py",
"snippet": "def register_coco_instances(name, metadata, json_file, image_root):\n \"\"\"\n Register a dataset in COCO's json annotation format for\n instance detection, instance segmentation and keypoint detection.\n (i.e., Type 1 and 2 in http://cocodataset.org/#format-data.\n `instances*.json` and `person_keypoints*.json` in the dataset).\n\n This is an example of how to register a new dataset.\n You can do something similar to this function, to register new datasets.\n\n Args:\n name (str): the name that identifies a dataset, e.g. \"coco_2014_train\".\n metadata (dict): extra metadata associated with this dataset. You can\n leave it as an empty dict.\n json_file (str): path to the json instance annotation file.\n image_root (str or path-like): directory which contains all the images.\n \"\"\"\n assert isinstance(name, str), name\n assert isinstance(json_file, (str, os.PathLike)), json_file\n assert isinstance(image_root, (str, os.PathLike)), image_root\n # 1. register a function which returns dicts\n DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))\n\n # 2. Optionally, add metadata about this dataset,\n # since they might be useful in evaluation, visualization or logging\n MetadataCatalog.get(name).set(\n json_file=json_file, image_root=image_root, evaluator_type=\"coco\", **metadata\n )"
},
{
"identifier": "register_coco_panoptic",
"path": "detectron2/data/datasets/coco_panoptic.py",
"snippet": "def register_coco_panoptic(\n name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None\n):\n \"\"\"\n Register a \"standard\" version of COCO panoptic segmentation dataset named `name`.\n The dictionaries in this registered dataset follows detectron2's standard format.\n Hence it's called \"standard\".\n\n Args:\n name (str): the name that identifies a dataset,\n e.g. \"coco_2017_train_panoptic\"\n metadata (dict): extra metadata associated with this dataset.\n image_root (str): directory which contains all the images\n panoptic_root (str): directory which contains panoptic annotation images in COCO format\n panoptic_json (str): path to the json panoptic annotation file in COCO format\n sem_seg_root (none): not used, to be consistent with\n `register_coco_panoptic_separated`.\n instances_json (str): path to the json instance annotation file\n \"\"\"\n panoptic_name = name\n DatasetCatalog.register(\n panoptic_name,\n lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata),\n )\n MetadataCatalog.get(panoptic_name).set(\n panoptic_root=panoptic_root,\n image_root=image_root,\n panoptic_json=panoptic_json,\n json_file=instances_json,\n evaluator_type=\"coco_panoptic_seg\",\n ignore_label=255,\n label_divisor=1000,\n **metadata,\n )"
},
{
"identifier": "register_coco_panoptic_separated",
"path": "detectron2/data/datasets/coco_panoptic.py",
"snippet": "def register_coco_panoptic_separated(\n name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json\n):\n \"\"\"\n Register a \"separated\" version of COCO panoptic segmentation dataset named `name`.\n The annotations in this registered dataset will contain both instance annotations and\n semantic annotations, each with its own contiguous ids. Hence it's called \"separated\".\n\n It follows the setting used by the PanopticFPN paper:\n\n 1. The instance annotations directly come from polygons in the COCO\n instances annotation task, rather than from the masks in the COCO panoptic annotations.\n\n The two format have small differences:\n Polygons in the instance annotations may have overlaps.\n The mask annotations are produced by labeling the overlapped polygons\n with depth ordering.\n\n 2. The semantic annotations are converted from panoptic annotations, where\n all \"things\" are assigned a semantic id of 0.\n All semantic categories will therefore have ids in contiguous\n range [1, #stuff_categories].\n\n This function will also register a pure semantic segmentation dataset\n named ``name + '_stuffonly'``.\n\n Args:\n name (str): the name that identifies a dataset,\n e.g. \"coco_2017_train_panoptic\"\n metadata (dict): extra metadata associated with this dataset.\n image_root (str): directory which contains all the images\n panoptic_root (str): directory which contains panoptic annotation images\n panoptic_json (str): path to the json panoptic annotation file\n sem_seg_root (str): directory which contains all the ground truth segmentation annotations.\n instances_json (str): path to the json instance annotation file\n \"\"\"\n panoptic_name = name + \"_separated\"\n DatasetCatalog.register(\n panoptic_name,\n lambda: merge_to_panoptic(\n load_coco_json(instances_json, image_root, panoptic_name),\n load_sem_seg(sem_seg_root, image_root),\n ),\n )\n MetadataCatalog.get(panoptic_name).set(\n panoptic_root=panoptic_root,\n image_root=image_root,\n panoptic_json=panoptic_json,\n sem_seg_root=sem_seg_root,\n json_file=instances_json, # TODO rename\n evaluator_type=\"coco_panoptic_seg\",\n ignore_label=255,\n **metadata,\n )\n\n semantic_name = name + \"_stuffonly\"\n DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root))\n MetadataCatalog.get(semantic_name).set(\n sem_seg_root=sem_seg_root,\n image_root=image_root,\n evaluator_type=\"sem_seg\",\n ignore_label=255,\n **metadata,\n )"
},
{
"identifier": "get_lvis_instances_meta",
"path": "detectron2/data/datasets/lvis.py",
"snippet": "def get_lvis_instances_meta(dataset_name):\n \"\"\"\n Load LVIS metadata.\n\n Args:\n dataset_name (str): LVIS dataset name without the split name (e.g., \"lvis_v0.5\").\n\n Returns:\n dict: LVIS metadata with keys: thing_classes\n \"\"\"\n if \"cocofied\" in dataset_name:\n return _get_coco_instances_meta()\n if \"v0.5\" in dataset_name:\n return _get_lvis_instances_meta_v0_5()\n elif \"v1\" in dataset_name:\n return _get_lvis_instances_meta_v1()\n raise ValueError(\"No built-in metadata for dataset {}\".format(dataset_name))"
},
{
"identifier": "register_lvis_instances",
"path": "detectron2/data/datasets/lvis.py",
"snippet": "def register_lvis_instances(name, metadata, json_file, image_root):\n \"\"\"\n Register a dataset in LVIS's json annotation format for instance detection and segmentation.\n\n Args:\n name (str): a name that identifies the dataset, e.g. \"lvis_v0.5_train\".\n metadata (dict): extra metadata associated with this dataset. It can be an empty dict.\n json_file (str): path to the json instance annotation file.\n image_root (str or path-like): directory which contains all the images.\n \"\"\"\n DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name))\n MetadataCatalog.get(name).set(\n json_file=json_file, image_root=image_root, evaluator_type=\"lvis\", **metadata\n )"
},
{
"identifier": "register_pascal_voc",
"path": "detectron2/data/datasets/pascal_voc.py",
"snippet": "def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES):\n DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names))\n MetadataCatalog.get(name).set(\n thing_classes=list(class_names), dirname=dirname, year=year, split=split\n )"
}
] | import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata
from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic
from .cityscapes_panoptic import register_all_cityscapes_panoptic
from .coco import load_sem_seg, register_coco_instances
from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated
from .lvis import get_lvis_instances_meta, register_lvis_instances
from .pascal_voc import register_pascal_voc | 8,230 | """
# ==== Predefined datasets and splits for COCO ==========
cluster_num = os.getenv('CLUSTER_NUM', '800')
_PREDEFINED_SPLITS_COCO_SEMI = {}
_PREDEFINED_SPLITS_COCO_SEMI["coco_semi"] = {
# we use seed 42 to be consistent with previous works on SSL detection and segmentation
"coco_semi_1perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/1perc_instances_train2017.json"),
"coco_semi_2perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/2perc_instances_train2017.json"),
"coco_semi_5perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/5perc_instances_train2017.json"),
"coco_semi_10perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/10perc_instances_train2017.json"),
"coco_semi_20perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/20perc_instances_train2017.json"),
"coco_semi_30perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/30perc_instances_train2017.json"),
"coco_semi_40perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/40perc_instances_train2017.json"),
"coco_semi_50perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/50perc_instances_train2017.json"),
}
def register_all_coco_semi(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO_SEMI.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
_PREDEFINED_SPLITS_COCO = {}
_PREDEFINED_SPLITS_COCO["coco"] = {
"coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"),
"coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
"coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"),
"coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/instances_valminusminival2014.json",
),
"coco_2017_train": ("./coco/train2017", f"./prepare_ours/u2seg_annotations/ins_annotations/cocotrain_{cluster_num}.json"),
"coco_2017_val": ("./coco/val2017", "./coco/annotations/instances_val2017.json"),
"coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"),
"coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"),
"coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"),
}
_PREDEFINED_SPLITS_COCO["coco_person"] = {
"keypoints_coco_2014_train": (
"coco/train2014",
"coco/annotations/person_keypoints_train2014.json",
),
"keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"),
"keypoints_coco_2014_minival": (
"coco/val2014",
"coco/annotations/person_keypoints_minival2014.json",
),
"keypoints_coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/person_keypoints_valminusminival2014.json",
),
"keypoints_coco_2017_train": (
"coco/train2017",
"coco/annotations/person_keypoints_train2017.json",
),
"keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"),
"keypoints_coco_2017_val_100": (
"coco/val2017",
"coco/annotations/person_keypoints_val2017_100.json",
),
}
_PREDEFINED_SPLITS_COCO_PANOPTIC = {
"coco_2017_train_panoptic": (
# This is the original panoptic annotation directory
f"./prepare_ours/u2seg_annotations/panoptic_annotations/cocotrain_{cluster_num}", # this should be .png format annotations
f"./prepare_ours/u2seg_annotations/panoptic_annotations/cocotrain_{cluster_num}.json", #this should be .json file
# This directory contains semantic annotations that are
# converted from panoptic annotations.
# It is used by PanopticFPN.
# You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
# to create these directories.
f"./prepare_ours/u2seg_annotations/panoptic_annotations/panoptic_stuff_cocotrain_{cluster_num}",
),
"coco_2017_val_panoptic": (
"/home/niudt/u2seg_test/detectron2/datasets/datasets/coco/val2017",
"/home/niudt/u2seg_test/detectron2/datasets/datasets/panoptic_anns/panoptic_val2017.json",
"/home/niudt/u2seg_test/detectron2/datasets/datasets/panoptic_anns/panoptic_stuff_val2017",
),
"coco_2017_val_100_panoptic": (
"coco/panoptic_val2017_100",
"coco/annotations/panoptic_val2017_100.json",
"coco/panoptic_stuff_val2017_100",
),
}
def register_all_coco(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
for (
prefix,
(panoptic_root, panoptic_json, semantic_root),
) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
prefix_instances = prefix[: -len("_panoptic")]
instances_meta = MetadataCatalog.get(prefix_instances)
image_root, instances_json = instances_meta.image_root, instances_meta.json_file
# The "separated" version of COCO panoptic segmentation dataset,
# e.g. used by Panoptic FPN
# import pdb
# pdb.set_trace()
| # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
This file registers pre-defined datasets at hard-coded paths, and their metadata.
We hard-code metadata for common datasets. This will enable:
1. Consistency check when loading the datasets
2. Use models on these standard datasets directly and run demos,
without having to download the dataset annotations
We hard-code some paths to the dataset that's assumed to
exist in "./datasets/".
Users SHOULD NOT use this file to create new dataset / metadata for new dataset.
To add new dataset, refer to the tutorial "docs/DATASETS.md".
"""
# ==== Predefined datasets and splits for COCO ==========
cluster_num = os.getenv('CLUSTER_NUM', '800')
_PREDEFINED_SPLITS_COCO_SEMI = {}
_PREDEFINED_SPLITS_COCO_SEMI["coco_semi"] = {
# we use seed 42 to be consistent with previous works on SSL detection and segmentation
"coco_semi_1perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/1perc_instances_train2017.json"),
"coco_semi_2perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/2perc_instances_train2017.json"),
"coco_semi_5perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/5perc_instances_train2017.json"),
"coco_semi_10perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/10perc_instances_train2017.json"),
"coco_semi_20perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/20perc_instances_train2017.json"),
"coco_semi_30perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/30perc_instances_train2017.json"),
"coco_semi_40perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/40perc_instances_train2017.json"),
"coco_semi_50perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/50perc_instances_train2017.json"),
}
def register_all_coco_semi(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO_SEMI.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
_PREDEFINED_SPLITS_COCO = {}
_PREDEFINED_SPLITS_COCO["coco"] = {
"coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"),
"coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
"coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"),
"coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/instances_valminusminival2014.json",
),
"coco_2017_train": ("./coco/train2017", f"./prepare_ours/u2seg_annotations/ins_annotations/cocotrain_{cluster_num}.json"),
"coco_2017_val": ("./coco/val2017", "./coco/annotations/instances_val2017.json"),
"coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"),
"coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"),
"coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"),
}
_PREDEFINED_SPLITS_COCO["coco_person"] = {
"keypoints_coco_2014_train": (
"coco/train2014",
"coco/annotations/person_keypoints_train2014.json",
),
"keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"),
"keypoints_coco_2014_minival": (
"coco/val2014",
"coco/annotations/person_keypoints_minival2014.json",
),
"keypoints_coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/person_keypoints_valminusminival2014.json",
),
"keypoints_coco_2017_train": (
"coco/train2017",
"coco/annotations/person_keypoints_train2017.json",
),
"keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"),
"keypoints_coco_2017_val_100": (
"coco/val2017",
"coco/annotations/person_keypoints_val2017_100.json",
),
}
_PREDEFINED_SPLITS_COCO_PANOPTIC = {
"coco_2017_train_panoptic": (
# This is the original panoptic annotation directory
f"./prepare_ours/u2seg_annotations/panoptic_annotations/cocotrain_{cluster_num}", # this should be .png format annotations
f"./prepare_ours/u2seg_annotations/panoptic_annotations/cocotrain_{cluster_num}.json", #this should be .json file
# This directory contains semantic annotations that are
# converted from panoptic annotations.
# It is used by PanopticFPN.
# You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
# to create these directories.
f"./prepare_ours/u2seg_annotations/panoptic_annotations/panoptic_stuff_cocotrain_{cluster_num}",
),
"coco_2017_val_panoptic": (
"/home/niudt/u2seg_test/detectron2/datasets/datasets/coco/val2017",
"/home/niudt/u2seg_test/detectron2/datasets/datasets/panoptic_anns/panoptic_val2017.json",
"/home/niudt/u2seg_test/detectron2/datasets/datasets/panoptic_anns/panoptic_stuff_val2017",
),
"coco_2017_val_100_panoptic": (
"coco/panoptic_val2017_100",
"coco/annotations/panoptic_val2017_100.json",
"coco/panoptic_stuff_val2017_100",
),
}
def register_all_coco(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
for (
prefix,
(panoptic_root, panoptic_json, semantic_root),
) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
prefix_instances = prefix[: -len("_panoptic")]
instances_meta = MetadataCatalog.get(prefix_instances)
image_root, instances_json = instances_meta.image_root, instances_meta.json_file
# The "separated" version of COCO panoptic segmentation dataset,
# e.g. used by Panoptic FPN
# import pdb
# pdb.set_trace() | register_coco_panoptic_separated( | 9 | 2023-12-05 01:13:31+00:00 | 12k |
upfusion3d/upfusion | external/nerf/network_df.py | [
{
"identifier": "trunc_exp",
"path": "external/ngp_activation.py",
"snippet": "class _trunc_exp(Function):\n def forward(ctx, x):\n def backward(ctx, g):"
},
{
"identifier": "NeRFRenderer",
"path": "external/nerf/renderer_df.py",
"snippet": "class NeRFRenderer(nn.Module):\n def __init__(self, opt):\n super().__init__()\n\n self.opt = opt\n self.bound = opt.bound\n self.cascade = 1 + math.ceil(math.log2(opt.bound))\n self.grid_size = 128\n self.cuda_ray = opt.cuda_ray\n self.min_near = opt.min_near\n self.density_thresh = opt.density_thresh\n self.bg_radius = opt.bg_radius\n\n # prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax)\n # NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing.\n aabb_train = torch.FloatTensor([-opt.bound, -opt.bound, -opt.bound, opt.bound, opt.bound, opt.bound])\n aabb_infer = aabb_train.clone()\n self.register_buffer('aabb_train', aabb_train)\n self.register_buffer('aabb_infer', aabb_infer)\n\n # extra state for cuda raymarching\n if self.cuda_ray:\n # density grid\n density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n # step counter\n step_counter = torch.zeros(16, 2, dtype=torch.int32) # 16 is hardcoded for averaging...\n self.register_buffer('step_counter', step_counter)\n self.mean_count = 0\n self.local_step = 0\n\n \n def forward(self, x, d):\n raise NotImplementedError()\n\n def density(self, x):\n raise NotImplementedError()\n\n def color(self, x, d, mask=None, **kwargs):\n raise NotImplementedError()\n\n def reset_extra_state(self):\n if not self.cuda_ray:\n return \n # density grid\n self.density_grid.zero_()\n self.mean_density = 0\n self.iter_density = 0\n # step counter\n self.step_counter.zero_()\n self.mean_count = 0\n self.local_step = 0\n\n @torch.no_grad()\n def export_mesh(self, path, resolution=None, S=128):\n\n if resolution is None:\n resolution = self.grid_size\n\n #@ OVERRIDE SAMPLING RESOLTUION\n sampling_resolution = 128\n resolution = 128\n\n density_thresh = min(self.mean_density, self.density_thresh)\n\n sigmas = np.zeros([sampling_resolution, sampling_resolution, sampling_resolution], dtype=np.float32)\n\n # query\n X = torch.linspace(-self.bound, self.bound, sampling_resolution).split(S)\n Y = torch.linspace(-self.bound, self.bound, sampling_resolution).split(S)\n Z = torch.linspace(-self.bound, self.bound, sampling_resolution).split(S)\n\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [S, 3]\n val = self.density(pts.to(self.density_bitfield.device))\n sigmas[xi * S: xi * S + len(xs), yi * S: yi * S + len(ys), zi * S: zi * S + len(zs)] = val['sigma'].reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() # [S, 1] --> [x, y, z]\n\n print('sigmas shape', sigmas.shape, sigmas.mean().item(), sigmas.max().item())\n sigmas = mcubes.smooth_gaussian(sigmas, sigma=1.5)\n print('smooth sigmas shape', sigmas.shape, sigmas.mean().item(), sigmas.max().item(), sigmas.max().item())\n\n vertices, triangles = mcubes.marching_cubes(sigmas, sigmas.mean().item() + (sigmas.std() * 0.25))\n mcubes.export_obj(vertices, triangles, os.path.join(path, f'mcubes_mesh.obj'))\n\n vertices = vertices / (resolution - 1.0) * 2 - 1\n vertices = vertices.astype(np.float32)\n triangles = triangles.astype(np.int32)\n\n v = torch.from_numpy(vertices).to(self.density_bitfield.device)\n f = torch.from_numpy(triangles).int().to(self.density_bitfield.device)\n\n # mesh = trimesh.Trimesh(vertices, triangles, process=False) # important, process=True leads to seg fault...\n # mesh.export(os.path.join(path, f'mesh.ply'))\n\n # texture?\n def _export(v, f, h0=2048, w0=2048, ssaa=1, name=''):\n # v, f: torch Tensor\n device = v.device\n v_np = v.cpu().numpy() # [N, 3]\n f_np = f.cpu().numpy() # [M, 3]\n\n print(f'[INFO] running xatlas to unwrap UVs for mesh: v={v_np.shape} f={f_np.shape}')\n\n # unwrap uvs\n import xatlas\n import nvdiffrast.torch as dr\n from sklearn.neighbors import NearestNeighbors\n from scipy.ndimage import binary_dilation, binary_erosion\n\n glctx = dr.RasterizeCudaContext()\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(v_np, f_np)\n chart_options = xatlas.ChartOptions()\n chart_options.max_iterations = 0 # disable merge_chart for faster unwrap...\n atlas.generate(chart_options=chart_options)\n vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n # vmapping, ft_np, vt_np = xatlas.parametrize(v_np, f_np) # [N], [M, 3], [N, 2]\n\n vt = torch.from_numpy(vt_np.astype(np.float32)).float().to(device)\n ft = torch.from_numpy(ft_np.astype(np.int64)).int().to(device)\n\n # render uv maps\n uv = vt * 2.0 - 1.0 # uvs to range [-1, 1]\n uv = torch.cat((uv, torch.zeros_like(uv[..., :1]), torch.ones_like(uv[..., :1])), dim=-1) # [N, 4]\n\n if ssaa > 1:\n h = int(h0 * ssaa)\n w = int(w0 * ssaa)\n else:\n h, w = h0, w0\n\n rast, _ = dr.rasterize(glctx, uv.unsqueeze(0), ft, (h, w)) # [1, h, w, 4]\n xyzs, _ = dr.interpolate(v.unsqueeze(0), rast, f) # [1, h, w, 3]\n mask, _ = dr.interpolate(torch.ones_like(v[:, :1]).unsqueeze(0), rast, f) # [1, h, w, 1]\n\n # masked query \n xyzs = xyzs.view(-1, 3)\n mask = (mask > 0).view(-1)\n \n sigmas = torch.zeros(h * w, device=device, dtype=torch.float32)\n feats = torch.zeros(h * w, 3, device=device, dtype=torch.float32)\n\n if mask.any():\n xyzs = xyzs[mask] # [M, 3]\n\n # batched inference to avoid OOM\n all_sigmas = []\n all_feats = []\n head = 0\n while head < xyzs.shape[0]:\n tail = min(head + 640000, xyzs.shape[0])\n results_ = self.density(xyzs[head:tail])\n all_sigmas.append(results_['sigma'].float())\n all_feats.append(results_['albedo'].float())\n head += 640000\n\n sigmas[mask] = torch.cat(all_sigmas, dim=0)\n feats[mask] = torch.cat(all_feats, dim=0)\n \n sigmas = sigmas.view(h, w, 1)\n feats = feats.view(h, w, -1)\n mask = mask.view(h, w)\n\n ### alpha mask\n # deltas = 2 * np.sqrt(3) / 1024\n # alphas = 1 - torch.exp(-sigmas * deltas)\n # alphas_mask = alphas > 0.5\n # feats = feats * alphas_mask\n\n # quantize [0.0, 1.0] to [0, 255]\n feats = feats.cpu().numpy()\n feats = (feats * 255).astype(np.uint8)\n\n # alphas = alphas.cpu().numpy()\n # alphas = (alphas * 255).astype(np.uint8)\n\n ### NN search as an antialiasing ...\n mask = mask.cpu().numpy()\n\n inpaint_region = binary_dilation(mask, iterations=3)\n inpaint_region[mask] = 0\n\n search_region = mask.copy()\n not_search_region = binary_erosion(search_region, iterations=2)\n search_region[not_search_region] = 0\n\n search_coords = np.stack(np.nonzero(search_region), axis=-1)\n inpaint_coords = np.stack(np.nonzero(inpaint_region), axis=-1)\n\n knn = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(search_coords)\n _, indices = knn.kneighbors(inpaint_coords)\n\n feats[tuple(inpaint_coords.T)] = feats[tuple(search_coords[indices[:, 0]].T)]\n\n # do ssaa after the NN search, in numpy\n feats = cv2.cvtColor(feats, cv2.COLOR_RGB2BGR)\n\n if ssaa > 1:\n # alphas = cv2.resize(alphas, (w0, h0), interpolation=cv2.INTER_NEAREST)\n feats = cv2.resize(feats, (w0, h0), interpolation=cv2.INTER_LINEAR)\n\n # cv2.imwrite(os.path.join(path, f'alpha.png'), alphas)\n cv2.imwrite(os.path.join(path, f'{name}albedo.png'), feats)\n\n # save obj (v, vt, f /)\n obj_file = os.path.join(path, f'{name}mesh.obj')\n mtl_file = os.path.join(path, f'{name}mesh.mtl')\n\n print(f'[INFO] writing obj mesh to {obj_file}')\n with open(obj_file, \"w\") as fp:\n fp.write(f'mtllib {name}mesh.mtl \\n')\n \n print(f'[INFO] writing vertices {v_np.shape}')\n for v in v_np:\n fp.write(f'v {v[0]} {v[1]} {v[2]} \\n')\n \n print(f'[INFO] writing vertices texture coords {vt_np.shape}')\n for v in vt_np:\n fp.write(f'vt {v[0]} {1 - v[1]} \\n') \n\n print(f'[INFO] writing faces {f_np.shape}')\n fp.write(f'usemtl mat0 \\n')\n for i in range(len(f_np)):\n fp.write(f\"f {f_np[i, 0] + 1}/{ft_np[i, 0] + 1} {f_np[i, 1] + 1}/{ft_np[i, 1] + 1} {f_np[i, 2] + 1}/{ft_np[i, 2] + 1} \\n\")\n\n with open(mtl_file, \"w\") as fp:\n fp.write(f'newmtl mat0 \\n')\n fp.write(f'Ka 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Kd 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Ks 0.000000 0.000000 0.000000 \\n')\n fp.write(f'Tr 1.000000 \\n')\n fp.write(f'illum 1 \\n')\n fp.write(f'Ns 0.000000 \\n')\n fp.write(f'map_Kd {name}albedo.png \\n')\n\n # _export(v, f)\n\n def run(self, rays_o, rays_d, num_steps=128, upsample_steps=128, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, fixed_light=False, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # bg_color: [BN, 3] in range [0, 1]\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n results = {}\n\n # choose aabb\n aabb = self.aabb_train if self.training else self.aabb_infer\n\n # sample steps\n nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, aabb, self.min_near)\n nears.unsqueeze_(-1)\n fars.unsqueeze_(-1)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n if fixed_light:\n # 30 30 30\n # rot_m = torch.tensor([[0.88, -.22, 0.43],\n # [.43, .75, -0.5],\n # [-.22, .63, .75]], device=device, dtype=torch.float)\n # -30 -30 -30\n # rot_m = torch.tensor([[0.63, .65, -0.43],\n # [-.43, .75, 0.5],\n # [.65, -.13, .75]], device=device, dtype=torch.float)\n # 30 30 -30\n rot_m = torch.tensor([[0.63, .65, -0.43],\n [-.43, .75, -0.5],\n [-.65, .13, .75]], device=device, dtype=torch.float)\n light_d = rays_o[0] @ rot_m\n light_d = safe_normalize(light_d)\n else:\n light_d = (rays_o[0] + torch.randn(3, device=device, dtype=torch.float))\n light_d = safe_normalize(light_d)\n\n #print(f'nears = {nears.min().item()} ~ {nears.max().item()}, fars = {fars.min().item()} ~ {fars.max().item()}')\n\n z_vals = torch.linspace(0.0, 1.0, num_steps, device=device).unsqueeze(0) # [1, T]\n z_vals = z_vals.expand((N, num_steps)) # [N, T]\n z_vals = nears + (fars - nears) * z_vals # [N, T], in [nears, fars]\n\n # perturb z_vals\n sample_dist = (fars - nears) / num_steps\n if perturb:\n z_vals = z_vals + (torch.rand(z_vals.shape, device=device) - 0.5) * sample_dist\n #z_vals = z_vals.clamp(nears, fars) # avoid out of bounds xyzs.\n\n # generate xyzs\n xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * z_vals.unsqueeze(-1) # [N, 1, 3] * [N, T, 1] -> [N, T, 3]\n xyzs = torch.min(torch.max(xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n #plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n\n # query SDF and RGB\n density_outputs = self.density(xyzs.reshape(-1, 3))\n\n #sigmas = density_outputs['sigma'].view(N, num_steps) # [N, T]\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(N, num_steps, -1)\n\n # upsample z_vals (nerf-like)\n if upsample_steps > 0:\n with torch.no_grad():\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1)\n\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T]\n\n # sample new z_vals\n z_vals_mid = (z_vals[..., :-1] + 0.5 * deltas[..., :-1]) # [N, T-1]\n new_z_vals = sample_pdf(z_vals_mid, weights[:, 1:-1], upsample_steps, det=not self.training).detach() # [N, t]\n\n new_xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * new_z_vals.unsqueeze(-1) # [N, 1, 3] * [N, t, 1] -> [N, t, 3]\n new_xyzs = torch.min(torch.max(new_xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n # only forward new points to save computation\n new_density_outputs = self.density(new_xyzs.reshape(-1, 3))\n #new_sigmas = new_density_outputs['sigma'].view(N, upsample_steps) # [N, t]\n for k, v in new_density_outputs.items():\n new_density_outputs[k] = v.view(N, upsample_steps, -1)\n\n # re-order\n z_vals = torch.cat([z_vals, new_z_vals], dim=1) # [N, T+t]\n z_vals, z_index = torch.sort(z_vals, dim=1)\n\n xyzs = torch.cat([xyzs, new_xyzs], dim=1) # [N, T+t, 3]\n xyzs = torch.gather(xyzs, dim=1, index=z_index.unsqueeze(-1).expand_as(xyzs))\n\n for k in density_outputs:\n tmp_output = torch.cat([density_outputs[k], new_density_outputs[k]], dim=1)\n density_outputs[k] = torch.gather(tmp_output, dim=1, index=z_index.unsqueeze(-1).expand_as(tmp_output))\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T+t-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1)\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T+t]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+t+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T+t]\n\n dirs = rays_d.view(-1, 1, 3).expand_as(xyzs)\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(-1, v.shape[-1])\n\n sigmas, rgbs, normals = self(xyzs.reshape(-1, 3), dirs.reshape(-1, 3), light_d, ratio=ambient_ratio, shading=shading)\n rgbs = rgbs.view(N, -1, 3) # [N, T+t, 3]\n\n if normals is not None:\n # orientation loss\n normals = normals.view(N, -1, 3)\n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n\n # surface normal smoothness\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2).view(N, -1, 3)\n loss_smooth = (normals - normals_perturb).abs()\n results['loss_smooth'] = loss_smooth.mean()\n\n # calculate weight_sum (mask)\n weights_sum = weights.sum(dim=-1) # [N]\n \n # calculate depth \n ori_z_vals = ((z_vals - nears) / (fars - nears)).clamp(0, 1)\n depth = torch.sum(weights * ori_z_vals, dim=-1)\n\n # calculate color\n image = torch.sum(weights.unsqueeze(-1) * rgbs, dim=-2) # [N, 3], in [0, 1]\n\n # mix background color\n if self.bg_radius > 0:\n # use the bg model to calculate bg_color\n # sph = raymarching.sph_from_ray(rays_o, rays_d, self.bg_radius) # [N, 2] in [-1, 1]\n bg_color = self.background(rays_d.reshape(-1, 3)) # [N, 3]\n elif bg_color is None:\n bg_color = 1\n \n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n\n image = image.view(*prefix, 3)\n depth = depth.view(*prefix)\n\n mask = (nears < fars).reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n results['mask'] = mask\n\n return results\n\n\n def run_cuda(self, rays_o, rays_d, dt_gamma=0, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, force_all_rays=False, max_steps=1024, T_thresh=1e-4, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, self.aabb_train if self.training else self.aabb_infer)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = (rays_o[0] + torch.randn(3, device=device, dtype=torch.float))\n light_d = safe_normalize(light_d)\n\n results = {}\n\n if self.training:\n # setup counter\n counter = self.step_counter[self.local_step % 16]\n counter.zero_() # set to 0\n self.local_step += 1\n\n xyzs, dirs, deltas, rays = raymarching.march_rays_train(rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, counter, self.mean_count, perturb, 128, force_all_rays, dt_gamma, max_steps)\n\n #plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n \n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n\n #print(f'valid RGB query ratio: {mask.sum().item() / mask.shape[0]} (total = {mask.sum().item()})')\n\n weights_sum, depth, image = raymarching.composite_rays_train(sigmas, rgbs, deltas, rays, T_thresh)\n\n # normals related regularizations\n if normals is not None:\n # orientation loss\n weights = 1 - torch.exp(-sigmas)\n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n\n # surface normal smoothness\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n loss_smooth = (normals - normals_perturb).abs()\n results['loss_smooth'] = loss_smooth.mean()\n\n else:\n \n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n \n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = nears.clone() # [N]\n\n step = 0\n \n while step < max_steps: # hard coded max step\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n\n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n n_step = max(min(N // n_alive, 8), 1)\n\n xyzs, dirs, deltas = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, 128, perturb if step == 0 else False, dt_gamma, max_steps)\n\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n\n raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, deltas, weights_sum, depth, image, T_thresh)\n\n rays_alive = rays_alive[rays_alive >= 0]\n #print(f'step = {step}, n_step = {n_step}, n_alive = {n_alive}, xyzs: {xyzs.shape}')\n\n step += n_step\n\n # mix background color\n if self.bg_radius > 0:\n \n # use the bg model to calculate bg_color\n # sph = raymarching.sph_from_ray(rays_o, rays_d, self.bg_radius) # [N, 2] in [-1, 1]\n bg_color = self.background(rays_d) # [N, 3]\n\n elif bg_color is None:\n bg_color = 1\n\n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n image = image.view(*prefix, 3)\n\n depth = torch.clamp(depth - nears, min=0) / (fars - nears)\n depth = depth.view(*prefix)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n mask = (nears < fars).reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n results['mask'] = mask\n\n return results\n\n\n @torch.no_grad()\n def update_extra_state(self, decay=0.95, S=128):\n # call before each epoch to update extra states.\n\n if not self.cuda_ray:\n return \n \n ### update density grid\n tmp_grid = - torch.ones_like(self.density_grid)\n \n X = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n\n for xs in X:\n for ys in Y:\n for zs in Z:\n \n # construct points\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)\n indices = raymarching.morton3D(coords).long() # [N]\n xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1]\n\n # cascading\n for cas in range(self.cascade):\n bound = min(2 ** cas, self.bound)\n half_grid_size = bound / self.grid_size\n # scale to current cascade's resolution\n cas_xyzs = xyzs * (bound - half_grid_size)\n # add noise in [-hgs, hgs]\n cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size\n # query density\n sigmas = self.density(cas_xyzs)['sigma'].reshape(-1).detach()\n # assign \n tmp_grid[cas, indices] = sigmas\n \n # ema update\n valid_mask = self.density_grid >= 0\n self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])\n self.mean_density = torch.mean(self.density_grid[valid_mask]).item()\n self.iter_density += 1\n\n # convert to bitfield\n density_thresh = min(self.mean_density, self.density_thresh)\n self.density_bitfield = raymarching.packbits(self.density_grid, density_thresh, self.density_bitfield)\n\n ### update step counter\n total_step = min(16, self.local_step)\n if total_step > 0:\n self.mean_count = int(self.step_counter[:total_step, 0].sum().item() / total_step)\n self.local_step = 0\n\n # print(f'[density grid] min={self.density_grid.min().item():.4f}, max={self.density_grid.max().item():.4f}, mean={self.mean_density:.4f}, occ_rate={(self.density_grid > density_thresh).sum() / (128**3 * self.cascade):.3f} | [step counter] mean={self.mean_count}')\n\n\n def render(self, rays_o, rays_d, staged=False, max_ray_batch=4096, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # return: pred_rgb: [B, N, 3]\n\n if self.cuda_ray:\n _run = self.run_cuda\n else:\n _run = self.run\n\n B, N = rays_o.shape[:2]\n device = rays_o.device\n\n # never stage when cuda_ray\n if staged and not self.cuda_ray:\n depth = torch.empty((B, N), device=device)\n image = torch.empty((B, N, 3), device=device)\n weights_sum = torch.empty((B, N), device=device)\n\n for b in range(B):\n head = 0\n while head < N:\n tail = min(head + max_ray_batch, N)\n results_ = _run(rays_o[b:b+1, head:tail], rays_d[b:b+1, head:tail], **kwargs)\n depth[b:b+1, head:tail] = results_['depth']\n weights_sum[b:b+1, head:tail] = results_['weights_sum']\n image[b:b+1, head:tail] = results_['image']\n head += max_ray_batch\n \n results = {}\n results['depth'] = depth\n results['image'] = image\n results['weights_sum'] = weights_sum\n\n else:\n results = _run(rays_o, rays_d, **kwargs)\n\n return results\n\n def render_batched(self, rays_o, rays_d, batched=False, max_ray_batch=128*128, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # return: pred_rgb: [B, N, 3]\n\n with torch.no_grad():\n if self.cuda_ray:\n _run = self.run_cuda\n else:\n _run = self.run\n\n B, N = rays_o.shape[:2]\n device = rays_o.device\n\n # never stage when cuda_ray\n if batched:\n depth = torch.empty((B, N), device=device)\n image = torch.empty((B, N, 3), device=device)\n weights_sum = torch.empty((B, N), device=device)\n\n for b in range(B):\n head = 0\n while head < N:\n tail = min(head + max_ray_batch, N)\n results_ = _run(rays_o[b:b+1, head:tail], rays_d[b:b+1, head:tail], **kwargs)\n depth[b:b+1, head:tail] = results_['depth']\n weights_sum[b:b+1, head:tail] = results_['weights_sum']\n image[b:b+1, head:tail] = results_['image']\n head += max_ray_batch\n \n results = {}\n results['depth'] = depth\n results['image'] = image\n results['weights_sum'] = weights_sum\n\n else:\n results = _run(rays_o, rays_d, **kwargs)\n\n return results"
},
{
"identifier": "get_encoder",
"path": "external/ngp_encoder.py",
"snippet": "def get_encoder(encoding, input_dim=3, \n multires=6, \n degree=4,\n num_levels=16, level_dim=2, base_resolution=16, log2_hashmap_size=19, desired_resolution=2048, align_corners=False,\n **kwargs):\n\n if encoding == 'None':\n return lambda x, **kwargs: x, input_dim\n \n elif encoding == 'frequency':\n raise NotImplementedError\n\n elif encoding == 'sphere_harmonics':\n raise NotImplementedError\n\n elif encoding == 'hashgrid':\n from external.gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='hash', align_corners=align_corners)\n \n elif encoding == 'tiledgrid':\n from external.gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='tiled', align_corners=align_corners)\n \n elif encoding == 'ash':\n raise NotImplementedError\n\n else:\n raise NotImplementedError('Unknown encoding mode, choose from [None, frequency, sphere_harmonics, hashgrid, tiledgrid]')\n\n return encoder, encoder.output_dim"
},
{
"identifier": "safe_normalize",
"path": "external/nerf/utils.py",
"snippet": "def safe_normalize(x, eps=1e-20):\n return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps))"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from external.ngp_activation import trunc_exp
from external.nerf.renderer_df import NeRFRenderer
from external.ngp_encoder import get_encoder
from .utils import safe_normalize | 9,828 |
class MLP(nn.Module):
def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True):
super().__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.dim_hidden = dim_hidden
self.num_layers = num_layers
net = []
for l in range(num_layers):
net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias))
self.net = nn.ModuleList(net)
def forward(self, x):
for l in range(self.num_layers):
x = self.net[l](x)
if l != self.num_layers - 1:
x = F.relu(x, inplace=True)
return x
class NeRFNetwork(NeRFRenderer):
def __init__(self,
opt,
num_layers=5,
hidden_dim=128,
num_layers_bg=2,
hidden_dim_bg=64,
):
super().__init__(opt)
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.encoder, self.in_dim = get_encoder('frequency', input_dim=3)
self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True)
# background network
if self.bg_radius > 0:
self.num_layers_bg = num_layers_bg
self.hidden_dim_bg = hidden_dim_bg
self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3)
self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True)
else:
self.bg_net = None
def gaussian(self, x):
# x: [B, N, 3]
d = (x ** 2).sum(-1)
g = 5 * torch.exp(-d / (2 * 0.2 ** 2))
return g
def common_forward(self, x):
# x: [N, 3], in [-bound, bound]
# sigma
h = self.encoder(x, bound=self.bound)
h = self.sigma_net(h)
sigma = trunc_exp(h[..., 0] + self.gaussian(x))
albedo = torch.sigmoid(h[..., 1:])
return sigma, albedo
# ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192
def finite_difference_normal(self, x, epsilon=1e-2):
# x: [N, 3]
dx_pos, _ = self.common_forward((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound))
dx_neg, _ = self.common_forward((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound))
dy_pos, _ = self.common_forward((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound))
dy_neg, _ = self.common_forward((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound))
dz_pos, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound))
dz_neg, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound))
normal = torch.stack([
0.5 * (dx_pos - dx_neg) / epsilon,
0.5 * (dy_pos - dy_neg) / epsilon,
0.5 * (dz_pos - dz_neg) / epsilon
], dim=-1)
return normal
def normal(self, x):
with torch.enable_grad():
x.requires_grad_(True)
sigma, albedo = self.common_forward(x)
# query gradient
normal = - torch.autograd.grad(torch.sum(sigma), x, create_graph=True)[0] # [N, 3]
# normalize...
|
class MLP(nn.Module):
def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True):
super().__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.dim_hidden = dim_hidden
self.num_layers = num_layers
net = []
for l in range(num_layers):
net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias))
self.net = nn.ModuleList(net)
def forward(self, x):
for l in range(self.num_layers):
x = self.net[l](x)
if l != self.num_layers - 1:
x = F.relu(x, inplace=True)
return x
class NeRFNetwork(NeRFRenderer):
def __init__(self,
opt,
num_layers=5,
hidden_dim=128,
num_layers_bg=2,
hidden_dim_bg=64,
):
super().__init__(opt)
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.encoder, self.in_dim = get_encoder('frequency', input_dim=3)
self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True)
# background network
if self.bg_radius > 0:
self.num_layers_bg = num_layers_bg
self.hidden_dim_bg = hidden_dim_bg
self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3)
self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True)
else:
self.bg_net = None
def gaussian(self, x):
# x: [B, N, 3]
d = (x ** 2).sum(-1)
g = 5 * torch.exp(-d / (2 * 0.2 ** 2))
return g
def common_forward(self, x):
# x: [N, 3], in [-bound, bound]
# sigma
h = self.encoder(x, bound=self.bound)
h = self.sigma_net(h)
sigma = trunc_exp(h[..., 0] + self.gaussian(x))
albedo = torch.sigmoid(h[..., 1:])
return sigma, albedo
# ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192
def finite_difference_normal(self, x, epsilon=1e-2):
# x: [N, 3]
dx_pos, _ = self.common_forward((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound))
dx_neg, _ = self.common_forward((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound))
dy_pos, _ = self.common_forward((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound))
dy_neg, _ = self.common_forward((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound))
dz_pos, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound))
dz_neg, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound))
normal = torch.stack([
0.5 * (dx_pos - dx_neg) / epsilon,
0.5 * (dy_pos - dy_neg) / epsilon,
0.5 * (dz_pos - dz_neg) / epsilon
], dim=-1)
return normal
def normal(self, x):
with torch.enable_grad():
x.requires_grad_(True)
sigma, albedo = self.common_forward(x)
# query gradient
normal = - torch.autograd.grad(torch.sum(sigma), x, create_graph=True)[0] # [N, 3]
# normalize... | normal = safe_normalize(normal) | 3 | 2023-12-12 00:49:11+00:00 | 12k |
modelscope/normal-depth-diffusion | scripts/t2i.py | [
{
"identifier": "DDIMSampler",
"path": "ldm/models/diffusion/ddim.py",
"snippet": "class DDIMSampler(object):\n\n def __init__(self, model, schedule='linear', **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n def make_schedule(self,\n ddim_num_steps,\n ddim_discretize='uniform',\n ddim_eta=0.,\n verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[\n 0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model\n .device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev',\n to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod',\n to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod',\n to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod',\n to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas',\n np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) *\n (1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps',\n sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n **kwargs):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(\n 0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[\n 0]\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b, ), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n **kwargs):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat(\n [unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n elif isinstance(c[k], torch.Tensor):\n c_in[k] = torch.cat(\n [unconditional_conditioning[k], c[k]])\n else:\n assert c[k] == unconditional_conditioning[k]\n c_in[k] = c[k]\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(\n torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in,\n c_in).chunk(2)\n # model_t = self.model.apply_model(x, t, c, **kwargs)\n # model_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n model_output = model_uncond + unconditional_guidance_scale * (\n model_t - model_uncond)\n\n if self.model.parameterization == 'v':\n print('using v!')\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == 'eps', 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c,\n **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1),\n sqrt_one_minus_alphas[index],\n device=device)\n\n # current prediction for x_0\n if self.model.parameterization != 'v':\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device,\n repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape)\n * noise)\n\n @torch.no_grad()\n def decode(self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n **kwargs):\n\n timesteps = np.arange(self.ddpm_num_timesteps\n ) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0], ),\n step,\n device=x_latent.device,\n dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n return x_dec"
},
{
"identifier": "DPMSolverSampler",
"path": "ldm/models/diffusion/dpm_solver/sampler.py",
"snippet": "class DPMSolverSampler(object):\n\n def __init__(self, model, **kwargs):\n super().__init__()\n self.model = model\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.\n device)\n self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n\n # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')\n\n device = self.model.betas.device\n if x_T is None:\n img = torch.randn(size, device=device)\n else:\n img = x_T\n\n ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)\n\n model_fn = model_wrapper(\n lambda x, t, c: self.model.apply_model(x, t, c),\n ns,\n model_type='noise',\n guidance_type='classifier-free',\n condition=conditioning,\n unconditional_condition=unconditional_conditioning,\n guidance_scale=unconditional_guidance_scale,\n )\n\n dpm_solver = DPM_Solver(\n model_fn, ns, predict_x0=True, thresholding=False)\n x = dpm_solver.sample(\n img,\n steps=S,\n skip_type='time_uniform',\n method='multistep',\n order=2,\n lower_order_final=True)\n\n return x.to(device), None"
},
{
"identifier": "PLMSSampler",
"path": "ldm/models/diffusion/plms.py",
"snippet": "class PLMSSampler(object):\n\n def __init__(self, model, schedule='linear', **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n def make_schedule(self,\n ddim_num_steps,\n ddim_discretize='uniform',\n ddim_eta=0.,\n verbose=True):\n if ddim_eta != 0:\n raise ValueError('ddim_eta must be 0 for PLMS')\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[\n 0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model\n .device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev',\n to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod',\n to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod',\n to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod',\n to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas',\n np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) *\n (1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps',\n sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for PLMS sampling is {size}')\n\n samples, intermediates = self.plms_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def plms_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = list(reversed(range(\n 0, timesteps))) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[\n 0]\n print(f'Running PLMS Sampling with {total_steps} timesteps')\n\n iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)\n old_eps = []\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b, ), step, device=device, dtype=torch.long)\n ts_next = torch.full((b, ),\n time_range[min(i + 1,\n len(time_range) - 1)],\n device=device,\n dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_plms(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n old_eps=old_eps,\n t_next=ts_next)\n img, pred_x0, e_t = outs\n old_eps.append(e_t)\n if len(old_eps) >= 4:\n old_eps.pop(0)\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_plms(self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n old_eps=None,\n t_next=None):\n b, *_, device = *x.shape, x.device\n\n def get_model_output(x, t):\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in,\n c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (\n e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == 'eps'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c,\n **corrector_kwargs)\n\n return e_t\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n\n def get_x_prev_and_pred_x0(e_t, index):\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1),\n alphas_prev[index],\n device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1),\n sqrt_one_minus_alphas[index],\n device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device,\n repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n e_t = get_model_output(x, t)\n if len(old_eps) == 0:\n # Pseudo Improved Euler (2nd order)\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)\n e_t_next = get_model_output(x_prev, t_next)\n e_t_prime = (e_t + e_t_next) / 2\n elif len(old_eps) == 1:\n # 2nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (3 * e_t - old_eps[-1]) / 2\n elif len(old_eps) == 2:\n # 3nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12\n elif len(old_eps) >= 3:\n # 4nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2]\n - 9 * old_eps[-3]) / 24\n\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)\n\n return x_prev, pred_x0, e_t"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config):\n if not 'target' in config:\n\n print(config)\n if config == '__is_first_stage__':\n return None\n elif config == '__is_unconditional__':\n return None\n raise KeyError('Expected key `target` to instantiate.')\n return get_obj_from_str(config['target'])(**config.get('params', dict()))"
},
{
"identifier": "build_model",
"path": "model_zoo.py",
"snippet": "def build_model(model_name,\n ckpt_path=None,\n cache_dir=None,\n return_cfg=False,\n strict=True):\n if not model_name in PRETRAINED_MODELS:\n raise RuntimeError(\n f'Model name {model_name} is not a pre-trained model. Available models are:\\n- ' + \\\n '\\n- '.join(PRETRAINED_MODELS.keys())\n )\n model_info = PRETRAINED_MODELS[model_name]\n\n # Instiantiate the model\n print(f\"Loading model from config: {model_info['config']}\")\n config_file = os.path.join(REPO_DIR, model_info['config'])\n assert os.path.exists(config_file)\n\n config = OmegaConf.load(config_file)\n\n # loading from ema_model\n model = instantiate_from_config(config.model)\n if ckpt_path.endswith('_ema.ckpt'):\n ema_ckpt_path = ckpt_path\n else:\n ema_ckpt_path = os.path.splitext(ckpt_path)[0] + '_ema.ckpt'\n\n # model_ckpt = torch.load(ckpt_path, map_location='cpu')['state_dict']\n # model_ckpt = extract_ema(model, model_ckpt)\n print(ema_ckpt_path)\n if os.path.exists(ema_ckpt_path):\n print(f'load from ema_ckpt:{ema_ckpt_path}')\n ckpt_path = ema_ckpt_path\n model_ckpt = torch.load(ckpt_path, map_location='cpu')['state_dict']\n else:\n model_ckpt = torch.load(ckpt_path, map_location='cpu')\n model_ckpt = extract_ema(model, model_ckpt['state_dict'])\n torch.save({'state_dict': model_ckpt}, ema_ckpt_path)\n\n model.load_state_dict(model_ckpt, strict=strict)\n\n if not return_cfg:\n return model\n else:\n return model, config"
},
{
"identifier": "map_2_16bit",
"path": "utils/color_transfer.py",
"snippet": "def map_2_16bit(x):\n x = (np.clip(x, 0, 1.) * 65535).astype(np.uint16)\n\n low_x = np.zeros_like(x)\n low_x[x < 256] = x[x < 256]\n high_x = x >> 8\n\n return np.concatenate(\n [np.zeros_like(low_x[..., None]), high_x[..., None], low_x[..., None]],\n axis=-1).astype(np.uint8)"
},
{
"identifier": "map_16bit_2_8",
"path": "utils/color_transfer.py",
"snippet": "def map_16bit_2_8(x):\n\n x = x.astype(np.uint16)\n ret_v = x[..., 1] << 8 + x[..., 0]\n\n return ret_v / 65535."
},
{
"identifier": "split_rgbd",
"path": "utils/color_transfer.py",
"snippet": "def split_rgbd(x, is_bgr=False):\n '''\n x: np.uint8\n '''\n\n rgb, depth = x[..., :3], x[..., 3:]\n if is_bgr:\n rgb = rgb[..., ::-1]\n\n depth = (map_16bit_2_8(depth) * 255).astype(np.uint8)\n depth = np.repeat(depth[..., None], 3, axis=-1)\n rgbd = np.concatenate([rgb, depth], axis=1)\n\n return rgbd"
},
{
"identifier": "split_rgbd_only_tensor",
"path": "utils/color_transfer.py",
"snippet": "def split_rgbd_only_tensor(x_tensor):\n\n # depth is from [0 1]\n rgb, depth = x_tensor[:, :3], x_tensor[:, 3]\n depth_v = repeat(depth[:, None], 'b 1 h w -> b 3 h w')\n\n return torch.cat([rgb, depth_v], dim=1)"
},
{
"identifier": "split_rgbd_tensor",
"path": "utils/color_transfer.py",
"snippet": "def split_rgbd_tensor(x_tensor):\n\n # depth is from [0 1]\n rgb, depth = torch.split(x_tensor, 3, dim=1)\n depth = depth * 255\n depth_v = depth[:, 1] * 255 + depth[:, 0]\n depth_v = depth_v / 65535\n depth_v = repeat(depth_v[:, None], 'b 1 h w -> b 3 h w')\n\n return torch.cat([rgb, depth_v], dim=1)"
}
] | import argparse
import glob
import os
import pdb
import sys
import time
import cv2
import numpy as np
import torch
from contextlib import contextmanager, nullcontext
from itertools import islice
from diffusers.pipelines.stable_diffusion.safety_checker import \
StableDiffusionSafetyChecker
from einops import rearrange, repeat
from imwatermark import WatermarkEncoder
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.dpm_solver import DPMSolverSampler
from ldm.models.diffusion.plms import PLMSSampler
from ldm.util import instantiate_from_config
from model_zoo import build_model
from omegaconf import OmegaConf
from PIL import Image
from pytorch_lightning import seed_everything
from torch import autocast
from torchvision.utils import make_grid
from tqdm import tqdm, trange
from transformers import AutoFeatureExtractor
from utils.color_transfer import (map_2_16bit, map_16bit_2_8, split_rgbd,
split_rgbd_only_tensor, split_rgbd_tensor) | 10,267 | )
parser.add_argument(
'--laion400m',
action='store_true',
help='uses the LAION400M model',
)
parser.add_argument(
'--fixed_code',
action='store_true',
help='if enabled, uses the same starting code across samples ',
)
parser.add_argument(
'--ddim_eta',
type=float,
default=0.0,
help='ddim eta (eta=0.0 corresponds to deterministic sampling',
)
parser.add_argument(
'--n_iter',
type=int,
default=1,
help='sample this often',
)
parser.add_argument(
'--H',
type=int,
default=512,
help='image height, in pixel space',
)
parser.add_argument(
'--W',
type=int,
default=512,
help='image width, in pixel space',
)
parser.add_argument(
'--C',
type=int,
default=4,
help='latent channels',
)
parser.add_argument(
'--f',
type=int,
default=8,
help='downsampling factor',
)
parser.add_argument(
'--n_samples',
type=int,
default=1,
help=
'how many samples to produce for each given prompt. A.k.a. batch size',
)
parser.add_argument(
'--n_rows',
type=int,
default=0,
help='rows in the grid (default: n_samples)',
)
parser.add_argument(
'--scale',
type=float,
default=7.5,
help=
'unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))',
)
parser.add_argument(
'--from-file',
type=str,
help='if specified, load prompts from this file',
)
parser.add_argument(
'--config',
type=str,
default='./configs/inference/nd/nd-1.5-inference.yaml',
help='path to config which constructs model',
)
parser.add_argument(
'--ckpt',
type=str,
default='models/ldm/txt2depth/last.ckpt',
help='path to checkpoint of model',
)
parser.add_argument(
'--seed',
type=int,
default=42,
help='the seed (for reproducible sampling)',
)
parser.add_argument(
'--precision',
type=str,
help='evaluate at this precision',
choices=['full', 'autocast'],
default='autocast')
opt = parser.parse_args()
seed_everything(opt.seed)
ckpt_name = os.path.splitext(os.path.basename(opt.ckpt))[0]
outdir = os.path.join(opt.save_dir, ckpt_name)
os.makedirs(outdir, exist_ok=True)
outpath = outdir
# config = OmegaConf.load(f"{opt.config}")
# model = load_model_from_config(config, f"{opt.ckpt}")
model = build_model('nd', opt.ckpt, strict=False)
device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
model = model.to(device)
if opt.dpm_solver:
sampler = DPMSolverSampler(model)
elif opt.plms:
sampler = PLMSSampler(model)
else:
| sys.path.append('./')
# load safety model
'''
safety_model_id = "CompVis/stable-diffusion-safety-checker"
safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id)
safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id)
'''
NEGATIVE_PROMPTS = 'ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft.'
def chunk(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
def numpy_to_pil(images):
"""
Convert a numpy image or a batch of images to a PIL image.
"""
if images.ndim == 3:
images = images[None, ...]
images = (images * 255).round().astype('uint8')
pil_images = [Image.fromarray(image) for image in images]
return pil_images
def load_model_from_config(config, ckpt, verbose=False):
print(f'Loading model from {ckpt}')
pl_sd = torch.load(ckpt, map_location='cpu')
if 'global_step' in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd['state_dict']
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print('missing keys:')
print(m)
if len(u) > 0 and verbose:
print('unexpected keys:')
print(u)
model.cuda()
model.eval()
return model
def put_watermark(img, wm_encoder=None):
if wm_encoder is not None:
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
img = wm_encoder.encode(img, 'dwtDct')
img = Image.fromarray(img[:, :, ::-1])
return img
def load_replacement(x):
try:
hwc = x.shape
y = Image.open('assets/rick.jpeg').convert('RGB').resize(
(hwc[1], hwc[0]))
y = (np.array(y) / 255.0).astype(x.dtype)
assert y.shape == x.shape
return y
except Exception:
return x
def check_safety(x_image):
return x_image, False
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--prompt',
type=str,
nargs='?',
default=None,
help='the prompt to render')
parser.add_argument(
'--save_dir',
type=str,
nargs='?',
help='dir to write results to',
default='outputs/txt2img-samples')
parser.add_argument(
'--skip_grid',
action='store_true',
help=
'do not save a grid, only individual samples. Helpful when evaluating lots of samples',
)
parser.add_argument(
'--skip_save',
action='store_true',
help='do not save individual samples. For speed measurements.',
)
parser.add_argument(
'--ddim_steps',
type=int,
default=50,
help='number of ddim sampling steps',
)
parser.add_argument(
'--plms',
action='store_true',
help='use plms sampling',
)
parser.add_argument(
'--dpm_solver',
action='store_true',
help='use dpm_solver sampling',
)
parser.add_argument(
'--laion400m',
action='store_true',
help='uses the LAION400M model',
)
parser.add_argument(
'--fixed_code',
action='store_true',
help='if enabled, uses the same starting code across samples ',
)
parser.add_argument(
'--ddim_eta',
type=float,
default=0.0,
help='ddim eta (eta=0.0 corresponds to deterministic sampling',
)
parser.add_argument(
'--n_iter',
type=int,
default=1,
help='sample this often',
)
parser.add_argument(
'--H',
type=int,
default=512,
help='image height, in pixel space',
)
parser.add_argument(
'--W',
type=int,
default=512,
help='image width, in pixel space',
)
parser.add_argument(
'--C',
type=int,
default=4,
help='latent channels',
)
parser.add_argument(
'--f',
type=int,
default=8,
help='downsampling factor',
)
parser.add_argument(
'--n_samples',
type=int,
default=1,
help=
'how many samples to produce for each given prompt. A.k.a. batch size',
)
parser.add_argument(
'--n_rows',
type=int,
default=0,
help='rows in the grid (default: n_samples)',
)
parser.add_argument(
'--scale',
type=float,
default=7.5,
help=
'unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))',
)
parser.add_argument(
'--from-file',
type=str,
help='if specified, load prompts from this file',
)
parser.add_argument(
'--config',
type=str,
default='./configs/inference/nd/nd-1.5-inference.yaml',
help='path to config which constructs model',
)
parser.add_argument(
'--ckpt',
type=str,
default='models/ldm/txt2depth/last.ckpt',
help='path to checkpoint of model',
)
parser.add_argument(
'--seed',
type=int,
default=42,
help='the seed (for reproducible sampling)',
)
parser.add_argument(
'--precision',
type=str,
help='evaluate at this precision',
choices=['full', 'autocast'],
default='autocast')
opt = parser.parse_args()
seed_everything(opt.seed)
ckpt_name = os.path.splitext(os.path.basename(opt.ckpt))[0]
outdir = os.path.join(opt.save_dir, ckpt_name)
os.makedirs(outdir, exist_ok=True)
outpath = outdir
# config = OmegaConf.load(f"{opt.config}")
# model = load_model_from_config(config, f"{opt.ckpt}")
model = build_model('nd', opt.ckpt, strict=False)
device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
model = model.to(device)
if opt.dpm_solver:
sampler = DPMSolverSampler(model)
elif opt.plms:
sampler = PLMSSampler(model)
else: | sampler = DDIMSampler(model) | 0 | 2023-12-06 07:29:34+00:00 | 12k |
FrozenBurning/PrimDiffusion | visualize.py | [
{
"identifier": "RayMarcher",
"path": "dva/ray_marcher.py",
"snippet": "class RayMarcher(nn.Module):\n def __init__(\n self,\n image_height,\n image_width,\n volradius,\n fadescale=8.0,\n fadeexp=8.0,\n dt=1.0,\n ray_subsample_factor=1,\n accum=2,\n termthresh=0.99,\n blocksize=None,\n with_t_img=True,\n chlast=False,\n assets=None,\n ):\n super().__init__()\n\n # TODO: add config?\n self.image_height = image_height\n self.image_width = image_width\n self.volradius = volradius\n self.dt = dt\n\n self.fadescale = fadescale\n self.fadeexp = fadeexp\n\n # NOTE: this seems to not work for other configs?\n if blocksize is None:\n blocksize = (8, 16)\n\n self.blocksize = blocksize\n self.with_t_img = with_t_img\n self.chlast = chlast\n\n self.accum = accum\n self.termthresh = termthresh\n\n base_pixel_coords = th.stack(\n th.meshgrid(\n th.arange(self.image_height, dtype=th.float32),\n th.arange(self.image_width, dtype=th.float32),\n )[::-1],\n dim=-1,\n )\n self.register_buffer(\"base_pixel_coords\", base_pixel_coords, persistent=False)\n self.fixed_bvh_cache = {-1: (th.empty(0), th.empty(0), th.empty(0))}\n self.ray_subsample_factor = ray_subsample_factor\n\n def _set_pix_coords(self):\n dev = self.base_pixel_coords.device\n self.base_pixel_coords = th.stack(\n th.meshgrid(\n th.arange(self.image_height, dtype=th.float32, device=dev),\n th.arange(self.image_width, dtype=th.float32, device=dev),\n )[::-1],\n dim=-1,\n )\n\n def resize(self, h: int, w: int):\n self.image_height = h\n self.image_width = w\n\n self._set_pix_coords()\n\n def forward(\n self,\n prim_rgba: th.Tensor,\n prim_pos: th.Tensor,\n prim_rot: th.Tensor,\n prim_scale: th.Tensor,\n K: th.Tensor,\n RT: th.Tensor,\n ray_subsample_factor: Optional[int] = None,\n ):\n \"\"\"\n Args:\n prim_rgba: primitive payload [B, K, 4, S, S, S],\n K - # of primitives, S - primitive size\n prim_pos: locations [B, K, 3]\n prim_rot: rotations [B, K, 3, 3]\n prim_scale: scales [B, K, 3]\n K: intrinsics [B, 3, 3]\n RT: extrinsics [B, 3, 4]\n Returns:\n a dict of tensors\n \"\"\"\n # TODO: maybe we can re-use mvpraymarcher?\n B = prim_rgba.shape[0]\n device = prim_rgba.device\n\n # TODO: this should return focal 2x2?\n camera = convert_camera_parameters(RT, K)\n camera = {k: v.contiguous() for k, v in camera.items()}\n\n dt = self.dt / self.volradius\n\n if ray_subsample_factor is None:\n ray_subsample_factor = self.ray_subsample_factor\n\n if ray_subsample_factor > 1 and self.training:\n pixel_coords = subsample_pixel_coords(\n self.base_pixel_coords, int(B), ray_subsample_factor\n )\n elif ray_subsample_factor > 1:\n pixel_coords = resize_pixel_coords(\n self.base_pixel_coords,\n int(B),\n ray_subsample_factor,\n )\n else:\n pixel_coords = (\n self.base_pixel_coords[np.newaxis].expand(B, -1, -1, -1).contiguous()\n )\n\n prim_pos = prim_pos / self.volradius\n\n focal = th.diagonal(camera[\"focal\"], dim1=1, dim2=2).contiguous()\n\n # TODO: port this?\n raypos, raydir, tminmax = compute_raydirs(\n viewpos=camera[\"campos\"],\n viewrot=camera[\"camrot\"],\n focal=focal,\n princpt=camera[\"princpt\"],\n pixelcoords=pixel_coords,\n volradius=self.volradius,\n )\n\n rgba = mvpraymarch(\n raypos,\n raydir,\n stepsize=dt,\n tminmax=tminmax,\n algo=0,\n template=prim_rgba.permute(0, 1, 3, 4, 5, 2).contiguous(),\n warp=None,\n termthresh=self.termthresh,\n primtransf=(prim_pos, prim_rot, prim_scale),\n fadescale=self.fadescale,\n fadeexp=self.fadeexp,\n usebvh=\"fixedorder\",\n chlast=True,\n )\n\n rgba = rgba.permute(0, 3, 1, 2)\n\n preds = {\n \"rgba_image\": rgba,\n \"pixel_coords\": pixel_coords,\n }\n\n return preds"
},
{
"identifier": "generate_colored_boxes",
"path": "dva/ray_marcher.py",
"snippet": "def generate_colored_boxes(template, prim_rot, alpha=10000.0, seed=123456):\n B = template.shape[0]\n output = template.clone()\n device = template.device\n\n lightdir = -3 * th.ones([B, 3], dtype=th.float32, device=device)\n lightdir = lightdir / th.norm(lightdir, p=2, dim=1, keepdim=True)\n\n zz, yy, xx = th.meshgrid(\n th.linspace(-1.0, 1.0, template.size(-1), device=device),\n th.linspace(-1.0, 1.0, template.size(-1), device=device),\n th.linspace(-1.0, 1.0, template.size(-1), device=device),\n )\n primnormalx = th.where(\n (th.abs(xx) >= th.abs(yy)) & (th.abs(xx) >= th.abs(zz)),\n th.sign(xx) * th.ones_like(xx),\n th.zeros_like(xx),\n )\n primnormaly = th.where(\n (th.abs(yy) >= th.abs(xx)) & (th.abs(yy) >= th.abs(zz)),\n th.sign(yy) * th.ones_like(xx),\n th.zeros_like(xx),\n )\n primnormalz = th.where(\n (th.abs(zz) >= th.abs(xx)) & (th.abs(zz) >= th.abs(yy)),\n th.sign(zz) * th.ones_like(xx),\n th.zeros_like(xx),\n )\n primnormal = th.stack([primnormalx, -primnormaly, -primnormalz], dim=-1)\n primnormal = primnormal / th.sqrt(th.sum(primnormal**2, dim=-1, keepdim=True))\n\n output[:, :, 3, :, :, :] = alpha\n\n np.random.seed(seed)\n\n for i in range(template.size(1)):\n # generating a random color\n output[:, i, 0, :, :, :] = np.random.rand() * 255.0\n output[:, i, 1, :, :, :] = np.random.rand() * 255.0\n output[:, i, 2, :, :, :] = np.random.rand() * 255.0\n\n # get light direction in local coordinate system?\n lightdir0 = lightdir\n mult = th.sum(\n lightdir0[:, None, None, None, :] * primnormal[np.newaxis], dim=-1\n )[:, np.newaxis, :, :, :].clamp(min=0.2)\n output[:, i, :3, :, :, :] *= 1.4 * mult\n return output"
},
{
"identifier": "RenderPeopleSViewDataset",
"path": "primdiffusion/dataset/renderpeople_crossid_dataset.py",
"snippet": "class RenderPeopleSViewDataset(Dataset):\n def __init__(\n self,\n root_dir,\n subject_ids,\n smpl_poses,\n image,\n image_mask,\n image_part_mask,\n cam_path,\n frame_list=None,\n cameras=None,\n cond_cameras=None,\n sample_cameras=True,\n camera_id=None,\n image_height=1024,\n image_width=1024,\n is_train=True,\n **kwargs,\n ):\n super().__init__()\n # subject ids is a text file contains list of subject ids\n self.image_height = image_height\n self.image_width = image_width\n self.ref_frame = 0\n\n with open(subject_ids, 'r') as f:\n human_list = f.read().splitlines()\n self.subject_ids = human_list\n self.root_dir = root_dir\n\n if frame_list is None:\n n_frames = len(os.listdir(os.path.join(self.root_dir, self.subject_ids[0], 'img', 'camera0000')))\n self.frame_list = [str(fid) for fid in range(n_frames)]\n\n self.image_path = image\n self.image_mask_path = image_mask\n self.image_part_mask_path = image_part_mask\n\n self.is_train = is_train\n all_cameras = self.load_all_cameras(cam_path)\n\n # TODO: inference logics\n if not self.is_train:\n assert not sample_cameras\n assert camera_id is not None\n\n self.cameras = all_cameras\n\n self.cond_cameras = cond_cameras\n\n self.sample_cameras = sample_cameras\n self.camera_id = camera_id\n\n self.all_smpl = self.load_all_smpl(smpl_poses)\n\n def load_all_smpl(self, smpl_poses):\n all_smpl = {}\n for people_id in self.subject_ids:\n current_smpl_path = smpl_poses.format(people_id=people_id)\n smpl_param = dict(np.load(current_smpl_path, allow_pickle=True))['smpl'].item()\n poses = np.zeros((smpl_param['body_pose'].shape[0], 72)).astype(np.float32)\n poses[:, :3] = np.array(smpl_param['global_orient']).astype(np.float32)\n poses[:, 3:] = np.array(smpl_param['body_pose']).astype(np.float32)\n\n shapes = np.array(smpl_param['betas']).astype(np.float32)\n shapes = np.repeat(shapes[:], poses.shape[0], axis=0)\n Rh = smpl_param['global_orient'].astype(np.float32)\n Th = smpl_param['transl'].astype(np.float32)\n current_smpl = {\n 'shapes': shapes,\n 'Rh': Rh * 0, #FIXME: hack\n 'Th': Th,\n 'poses': poses,\n }\n all_smpl[people_id] = current_smpl\n\n return all_smpl\n\n def load_all_cameras(self, camera_path):\n # input path to camera.json under synbody sequence\n # all_cameras is dict of dict\n all_cameras = {}\n for people_id in self.subject_ids:\n current_camera_path = camera_path.format(people_id=people_id)\n current_camera = {}\n with open(current_camera_path) as f:\n camera = json.load(f)\n for view_index in range(len(camera.keys())):\n K, R, T, _ = get_KRTD(camera, view_index)\n current_camera['camera{:04d}'.format(view_index)] = {\n \"Rt\": np.concatenate([R, T[..., None]], axis=1).astype(np.float32),\n \"K\": K.astype(np.float32),\n }\n for c in current_camera.values():\n c[\"cam_pos\"] = -np.dot(c[\"Rt\"][:3, :3].T, c[\"Rt\"][:3, 3])\n c[\"Rt\"][:, -1] *= 1000.0\n all_cameras[people_id] = current_camera\n return all_cameras\n\n def __len__(self):\n return len(self.subject_ids) * 200\n\n def __getitem__(self, idx):\n # idx is subject_id wise index\n people_id = self.subject_ids[idx % len(self.subject_ids)]\n\n # random sample frames\n frame = (\n random.choice(self.frame_list)\n )\n\n # random sample cameras\n camera_id = (\n random.choice(list(self.cameras[people_id].keys()))\n if self.sample_cameras\n else self.camera_id\n )\n fmts = dict(people_id=people_id, frame=int(frame), camera=camera_id)\n\n sample = {\"index\": idx, **fmts}\n\n sample.update(load_smpl_params(self.all_smpl[people_id], int(frame)))\n\n ref_frame_smpl = {'ref_' + k: v for k, v in load_smpl_params(self.all_smpl[people_id], int(self.ref_frame)).items()}\n sample.update(ref_frame_smpl)\n\n sample[\"image\"] = np.transpose(\n cv2.imread(self.image_path.format(**fmts))[..., ::-1].astype(np.float32),\n axes=(2, 0, 1),\n )\n\n # reading all the cond images\n if self.cond_cameras:\n sample[\"cond_image\"] = []\n sample[\"cond_Rt\"] = []\n sample[\"cond_K\"] = []\n # for cond_camera_id in self.cond_cameras:\n # FIXME: hack for random condition views\n cond_camera_id = random.choice(list(self.cameras[people_id].keys()))\n if True:\n cond_image = np.transpose(\n cv2.imread(\n self.image_path.format(\n people_id=people_id, frame=int(self.ref_frame), camera=cond_camera_id\n )\n )[..., ::-1].astype(np.float32),\n axes=(2, 0, 1),\n )\n sample[\"cond_image\"].append(cond_image)\n sample[\"cond_Rt\"].append(self.cameras[people_id][cond_camera_id][\"Rt\"])\n sample[\"cond_K\"].append(self.cameras[people_id][cond_camera_id][\"K\"])\n\n for key in [\"image\", \"K\", \"Rt\"]:\n sample[f\"cond_{key}\"] = np.stack(sample[f\"cond_{key}\"], axis=0)\n\n sample[\"cond_cameras\"] = self.cond_cameras[:]\n\n sample[\"image\"] = np.transpose(\n cv2.imread(self.image_path.format(**fmts))[..., ::-1].astype(np.float32),\n axes=(2, 0, 1),\n )\n\n image_mask = cv2.imread(self.image_mask_path.format(**fmts))\n border = 3\n kernel = np.ones((border, border), np.uint8)\n msk_erode = cv2.erode(image_mask.copy(), kernel)[np.newaxis, ..., 0]\n sample[\"image_mask\"] = (msk_erode != 0).astype(np.float32)\n\n image_part_mask = cv2.imread(self.image_part_mask_path.format(**fmts))\n part_msk_erode = cv2.erode(image_part_mask.copy(), kernel)[np.newaxis, ..., 0]\n sample[\"image_part_mask\"] = part_msk_erode\n\n sample[\"image_bg\"] = sample[\"image\"] * ~(sample[\"image_part_mask\"] != 0)\n\n sample.update(self.cameras[people_id][camera_id])\n\n return sample\n \n def gen_inf_cameras(self, num_views = 5):\n training_views = self.cameras[self.subject_ids[0]]\n self.training_views = training_views\n num_training_views = len(training_views.keys())\n interpolation_anchors = []\n for view_index in range(num_training_views):\n Rt = training_views['camera{:04d}'.format(view_index)]['Rt']\n K = training_views['camera{:04d}'.format(view_index)]['K']\n rot = Rt[:, :3]\n trans = Rt[:, 3]\n interpolation_anchors.append((rot, trans))\n interpolated_poses = interpolate_poses(interpolation_anchors, num_views)\n\n inf_camera = {}\n for people_id in self.subject_ids:\n current_camera = {}\n for view_index in range(len(interpolated_poses)):\n R, T = interpolated_poses[view_index]\n current_camera['camera{:04d}'.format(view_index)] = {\n \"Rt\": np.concatenate([R, T[..., None]], axis=1).astype(np.float32),\n \"K\": K.astype(np.float32),\n }\n for c in current_camera.values():\n c[\"cam_pos\"] = -np.dot(c[\"Rt\"][:3, :3].T, c[\"Rt\"][:3, 3])\n # c[\"Rt\"][:, -1] *= 1000.0\n inf_camera[people_id] = current_camera\n self.inf_cameras = inf_camera\n\n\n def inf_sample(self, people_id, camera_id, frame_id, cond_sample):\n fmts = dict(people_id=people_id, frame=int(frame_id), camera=camera_id)\n sample = {}\n sample.update({**fmts})\n\n sample.update(load_smpl_params(self.all_smpl[people_id], int(frame_id)))\n\n sample.update(self.inf_cameras[people_id][camera_id])\n\n for k, v in sample.items():\n if isinstance(v, np.ndarray):\n sample[k] = v[None, ...]\n\n sample.update(cond_sample)\n return sample\n\n def cond_sample(self, people_id):\n sample = {}\n # reading all the cond images\n if self.cond_cameras:\n sample[\"cond_image\"] = []\n sample[\"cond_Rt\"] = []\n sample[\"cond_K\"] = []\n cond_camera_id = random.choice(list(self.cameras[people_id].keys()))\n if True:\n cond_image = np.transpose(\n cv2.imread(\n self.image_path.format(\n people_id=people_id, frame=int(self.ref_frame), camera=cond_camera_id\n )\n )[..., ::-1].astype(np.float32),\n axes=(2, 0, 1),\n )\n sample[\"cond_image\"].append(cond_image)\n sample[\"cond_Rt\"].append(self.cameras[people_id][cond_camera_id][\"Rt\"])\n sample[\"cond_K\"].append(self.cameras[people_id][cond_camera_id][\"K\"])\n\n for key in [\"image\", \"K\", \"Rt\"]:\n sample[f\"cond_{key}\"] = np.stack(sample[f\"cond_{key}\"], axis=0)\n\n sample[\"cond_cameras\"] = self.cond_cameras[:]\n for k, v in sample.items():\n if isinstance(v, np.ndarray):\n sample[k] = v[None, ...]\n return sample\n \n\n def inf_sample_wsmpl(self, people_id, camera_id, frame_id, cond_sample, smpl_param):\n fmts = dict(people_id=people_id, frame=int(frame_id), camera=camera_id)\n sample = {}\n sample.update({**fmts})\n\n sample.update(load_smpl_params(smpl_param, int(frame_id)))\n\n sample.update(self.inf_cameras[people_id][camera_id])\n\n for k, v in sample.items():\n if isinstance(v, np.ndarray):\n sample[k] = v[None, ...]\n\n sample.update(cond_sample)\n return sample\n\n def sample_cam_smpl(self):\n people_id = random.choice(self.subject_ids)\n frame_id = random.choice(self.frame_list)\n camera_id = random.choice(list(self.cameras[people_id].keys()))\n fmts = dict(people_id=people_id, frame=int(frame_id), camera=camera_id)\n sample = {}\n sample.update({**fmts})\n sample.update(load_smpl_params(self.all_smpl[people_id], int(frame_id)))\n sample.update(self.cameras[people_id][camera_id])\n for k, v in sample.items():\n if isinstance(v, np.ndarray):\n sample[k] = v[None, ...]\n return sample"
},
{
"identifier": "load_static_assets_crossid_smpl",
"path": "dva/io.py",
"snippet": "def load_static_assets_crossid_smpl(config):\n # with chumpy dependency!!!\n data_struct = read_pickle(config.data.smpl_topology)\n vt = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_vt.npy'))\n ft = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_ft.npy'))\n\n n_verts = data_struct[\"v_template\"].shape[0]\n\n topology = AttrDict(\n dict(\n vi=data_struct[\"f\"].astype(np.int64),\n vt=vt.astype(np.float32),\n vti=ft.astype(np.int64),\n n_verts=n_verts,\n )\n )\n\n topology.v2uv = compute_v2uv(topology.n_verts, topology.vi, topology.vti)\n\n nbs_idxs, nbs_weights = compute_neighbours(topology.n_verts, topology[\"vi\"])\n topology.nbs_idxs = nbs_idxs\n topology.nbs_weights = nbs_weights\n\n static_assets = AttrDict(\n dict(\n topology=topology,\n lbs_template_verts=data_struct[\"v_template\"],\n smpl_path=config.smpl_dir,\n )\n )\n\n if \"ref_frame\" in config: \n current_smpl_path = config.data.smpl_poses.format(people_id='seq_000016-rp_alison_rigged_002')\n smpl_param = dict(np.load(current_smpl_path, allow_pickle=True))['smpl'].item()\n poses = np.zeros((smpl_param['body_pose'].shape[0], 72)).astype(np.float32)\n poses[:, :3] = np.array(smpl_param['global_orient']).astype(np.float32)\n poses[:, 3:] = np.array(smpl_param['body_pose']).astype(np.float32)\n shapes = np.array(smpl_param['betas']).astype(np.float32)\n shapes = np.repeat(shapes[:], poses.shape[0], axis=0)\n Rh = smpl_param['global_orient'].astype(np.float32)\n Th = smpl_param['transl'].astype(np.float32)\n current_smpl = {\n 'shapes': shapes,\n 'Rh': Rh * 0, #FIXME: hack\n 'Th': Th,\n 'poses': poses,\n }\n\n static_assets[\"ref_frame\"] = {k: v[config.ref_frame][None, ...] for k, v in current_smpl.items()}\n\n return static_assets"
},
{
"identifier": "load_from_config",
"path": "dva/io.py",
"snippet": "def load_from_config(config, **kwargs):\n \"\"\"Instantiate an object given a config and arguments.\"\"\"\n assert \"class_name\" in config and \"module_name\" not in config\n config = copy.deepcopy(config)\n class_name = config.pop(\"class_name\")\n object_class = load_class(class_name)\n return object_class(**config, **kwargs)"
},
{
"identifier": "to_device",
"path": "dva/utils.py",
"snippet": "def to_device(values, device=None, non_blocking=True):\n \"\"\"Transfer a set of values to the device.\n Args:\n values: a nested dict/list/tuple of tensors\n device: argument to `to()` for the underlying vector\n NOTE:\n if the device is not specified, using `th.cuda()`\n \"\"\"\n if device is None:\n device = th.device(\"cuda\")\n\n if isinstance(values, dict):\n return {k: to_device(v, device=device) for k, v in values.items()}\n elif isinstance(values, tuple):\n return tuple(to_device(v, device=device) for v in values)\n elif isinstance(values, list):\n return [to_device(v, device=device) for v in values]\n elif isinstance(values, th.Tensor):\n return values.to(device, non_blocking=non_blocking)\n elif isinstance(values, nn.Module):\n return values.to(device)\n elif isinstance(values, np.ndarray):\n return th.from_numpy(values).to(device)\n else:\n return values"
},
{
"identifier": "make_postex",
"path": "dva/geom.py",
"snippet": "def make_postex(v, idxim, barim):\n return (\n barim[None, :, :, 0, None] * v[:, idxim[:, :, 0]]\n + barim[None, :, :, 1, None] * v[:, idxim[:, :, 1]]\n + barim[None, :, :, 2, None] * v[:, idxim[:, :, 2]]\n ).permute(0, 3, 1, 2)"
},
{
"identifier": "compute_tbn",
"path": "dva/geom.py",
"snippet": "def compute_tbn(geom, vt, vi, vti):\n \"\"\"Computes tangent, bitangent, and normal vectors given a mesh.\n Args:\n geom: [N, n_verts, 3] th.Tensor\n Vertex positions.\n vt: [n_uv_coords, 2] th.Tensor\n UV coordinates.\n vi: [..., 3] th.Tensor\n Face vertex indices.\n vti: [..., 3] th.Tensor\n Face UV indices.\n Returns:\n [..., 3] th.Tensors for T, B, N.\n \"\"\"\n\n v0 = geom[:, vi[..., 0]]\n v1 = geom[:, vi[..., 1]]\n v2 = geom[:, vi[..., 2]]\n vt0 = vt[vti[..., 0]]\n vt1 = vt[vti[..., 1]]\n vt2 = vt[vti[..., 2]]\n\n v01 = v1 - v0\n v02 = v2 - v0\n vt01 = vt1 - vt0\n vt02 = vt2 - vt0\n f = 1.0 / (\n vt01[None, ..., 0] * vt02[None, ..., 1]\n - vt01[None, ..., 1] * vt02[None, ..., 0]\n )\n tangent = f[..., None] * th.stack(\n [\n v01[..., 0] * vt02[None, ..., 1] - v02[..., 0] * vt01[None, ..., 1],\n v01[..., 1] * vt02[None, ..., 1] - v02[..., 1] * vt01[None, ..., 1],\n v01[..., 2] * vt02[None, ..., 1] - v02[..., 2] * vt01[None, ..., 1],\n ],\n dim=-1,\n )\n tangent = F.normalize(tangent, dim=-1)\n normal = F.normalize(th.cross(v01, v02, dim=3), dim=-1)\n bitangent = F.normalize(th.cross(tangent, normal, dim=3), dim=-1)\n\n return tangent, bitangent, normal"
}
] | import os
import sys
import imageio
import torch as th
import numpy as np
import random
import logging
from omegaconf import OmegaConf
from dva.ray_marcher import RayMarcher, generate_colored_boxes
from primdiffusion.dataset.renderpeople_crossid_dataset import RenderPeopleSViewDataset
from dva.io import load_static_assets_crossid_smpl, load_from_config
from dva.utils import to_device
from dva.geom import make_postex, compute_tbn | 7,701 | )
preds_boxes = rm(
prim_rgba=boxes_rgba,
prim_pos=preds["prim_pos"],
prim_scale=preds["prim_scale"],
prim_rot=preds["prim_rot"],
RT=batch["Rt"],
K=batch["K"],
)
return preds_boxes["rgba_image"][:, :3].permute(0, 2, 3, 1)
def set_random_seed(seed):
r"""Set random seeds for everything.
Args:
seed (int): Random seed.
by_rank (bool):
"""
print(f"Using random seed {seed}")
random.seed(seed)
np.random.seed(seed)
th.manual_seed(seed)
th.cuda.manual_seed(seed)
th.cuda.manual_seed_all(seed)
def to_video_out(input):
ndarr = input[0].mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", th.uint8).numpy()
return ndarr
def main(config):
use_ddim = config.ddim
device = th.device("cuda:0")
th.cuda.set_device(device)
static_assets = load_static_assets_crossid_smpl(config)
inference_output_dir = f"{config.output_dir}/primdiffusion_interm_visualization"
checkpoint_path = config.checkpoint_path
os.makedirs(inference_output_dir, exist_ok=True)
video_path = os.path.join(inference_output_dir, 'videos')
os.makedirs(video_path, exist_ok=True)
OmegaConf.save(config, os.path.join(inference_output_dir, "config.yml"))
logger.info(f"saving results to {inference_output_dir}")
logger.info(f"starting inference with the config: {OmegaConf.to_yaml(config)}")
model = load_from_config(
config.model,
assets=static_assets,
)
print('loading checkpoint {}'.format(checkpoint_path))
state_dict = th.load(checkpoint_path, map_location='cpu')
model.load_state_dict(state_dict['model_state_dict'])
model = model.to(device)
model.device = device
model.eval()
# computing values for the given viewpoints
rm = RayMarcher(
config.image_height,
config.image_width,
**config.rm,
).to(device)
dataset = RenderPeopleSViewDataset(
**config.data,
cameras=config.cameras_train,
cond_cameras=config.cameras_cond,
sample_cameras=False,
is_train=False,
camera_id='00',
)
sample_num = 1
seed_list = [1007,]
dataset.gen_inf_cameras(num_views=5)
for iter in range(1000):
logger.info('Rendering iteration-{:04d}......'.format(iter))
set_random_seed(iter)
batch = dataset.sample_cam_smpl()
batch = to_device(batch, device)
if use_ddim:
log_every_t = 1
samples, z_denoise_row = model.sample_log(cond=None, batch_size = sample_num, ddim=True, ddim_steps=100, eta=0.0, log_every_t=log_every_t)
z_denoise_row = z_denoise_row['x_inter']
else:
log_every_t = 10
samples, z_denoise_row = model.sample_log(cond=None, batch_size = sample_num, ddim=False, ddim_steps=None, eta=0.0, log_every_t=log_every_t)
samples = (samples / model.scaling_factor + 1) / 2. * 255.
denoise_row = (th.stack(z_denoise_row) / model.scaling_factor + 1) / 2. * 255
prim_size = config.model.bodydecoder_config.prim_size
n_prims_x = n_prims_y = int(config.model.bodydecoder_config.n_prims ** 0.5)
# plot denoising row
denoise_row = denoise_row.reshape(-1, sample_num, prim_size, 7, n_prims_y, prim_size, n_prims_x, prim_size).permute(0, 1, 4, 6, 3, 2, 5, 7).reshape(-1, sample_num, n_prims_y * n_prims_x, 7, prim_size, prim_size, prim_size)
denoise_sample_deltascale = th.mean(denoise_row[:, :, :, 4:], dim=(-1, -2, -3)) / 255. * 20.
denoise_sample_rgba = denoise_row[:, :, :, :4, :, :, :]
num_steps = denoise_row.shape[0]
for i in range(sample_num):
batch = dataset.sample_cam_smpl()
sam_cam = {}
sam_cam.update(dataset.inf_cameras[dataset.subject_ids[0]]['camera0000'])
for k, v in sam_cam.items():
if isinstance(v, np.ndarray):
sam_cam[k] = v[None, ...]
batch.update(sam_cam)
batch = to_device(batch, device)
B = 1
geom = model.bodydecoder.lbs_fn(
poses = batch["poses"],
shapes = batch["shapes"],
Rh = batch["Rh"],
Th = batch["Th"],
v_template = model.bodydecoder.lbs_fn.v_template[np.newaxis],
) * 1000.0
prim_pos_mesh = (
|
device = th.device("cuda")
logger = logging.getLogger("visualize.py")
def render_mvp_boxes(rm, batch, preds):
with th.no_grad():
boxes_rgba = generate_colored_boxes(
preds["prim_rgba"],
preds["prim_rot"],
)
preds_boxes = rm(
prim_rgba=boxes_rgba,
prim_pos=preds["prim_pos"],
prim_scale=preds["prim_scale"],
prim_rot=preds["prim_rot"],
RT=batch["Rt"],
K=batch["K"],
)
return preds_boxes["rgba_image"][:, :3].permute(0, 2, 3, 1)
def set_random_seed(seed):
r"""Set random seeds for everything.
Args:
seed (int): Random seed.
by_rank (bool):
"""
print(f"Using random seed {seed}")
random.seed(seed)
np.random.seed(seed)
th.manual_seed(seed)
th.cuda.manual_seed(seed)
th.cuda.manual_seed_all(seed)
def to_video_out(input):
ndarr = input[0].mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", th.uint8).numpy()
return ndarr
def main(config):
use_ddim = config.ddim
device = th.device("cuda:0")
th.cuda.set_device(device)
static_assets = load_static_assets_crossid_smpl(config)
inference_output_dir = f"{config.output_dir}/primdiffusion_interm_visualization"
checkpoint_path = config.checkpoint_path
os.makedirs(inference_output_dir, exist_ok=True)
video_path = os.path.join(inference_output_dir, 'videos')
os.makedirs(video_path, exist_ok=True)
OmegaConf.save(config, os.path.join(inference_output_dir, "config.yml"))
logger.info(f"saving results to {inference_output_dir}")
logger.info(f"starting inference with the config: {OmegaConf.to_yaml(config)}")
model = load_from_config(
config.model,
assets=static_assets,
)
print('loading checkpoint {}'.format(checkpoint_path))
state_dict = th.load(checkpoint_path, map_location='cpu')
model.load_state_dict(state_dict['model_state_dict'])
model = model.to(device)
model.device = device
model.eval()
# computing values for the given viewpoints
rm = RayMarcher(
config.image_height,
config.image_width,
**config.rm,
).to(device)
dataset = RenderPeopleSViewDataset(
**config.data,
cameras=config.cameras_train,
cond_cameras=config.cameras_cond,
sample_cameras=False,
is_train=False,
camera_id='00',
)
sample_num = 1
seed_list = [1007,]
dataset.gen_inf_cameras(num_views=5)
for iter in range(1000):
logger.info('Rendering iteration-{:04d}......'.format(iter))
set_random_seed(iter)
batch = dataset.sample_cam_smpl()
batch = to_device(batch, device)
if use_ddim:
log_every_t = 1
samples, z_denoise_row = model.sample_log(cond=None, batch_size = sample_num, ddim=True, ddim_steps=100, eta=0.0, log_every_t=log_every_t)
z_denoise_row = z_denoise_row['x_inter']
else:
log_every_t = 10
samples, z_denoise_row = model.sample_log(cond=None, batch_size = sample_num, ddim=False, ddim_steps=None, eta=0.0, log_every_t=log_every_t)
samples = (samples / model.scaling_factor + 1) / 2. * 255.
denoise_row = (th.stack(z_denoise_row) / model.scaling_factor + 1) / 2. * 255
prim_size = config.model.bodydecoder_config.prim_size
n_prims_x = n_prims_y = int(config.model.bodydecoder_config.n_prims ** 0.5)
# plot denoising row
denoise_row = denoise_row.reshape(-1, sample_num, prim_size, 7, n_prims_y, prim_size, n_prims_x, prim_size).permute(0, 1, 4, 6, 3, 2, 5, 7).reshape(-1, sample_num, n_prims_y * n_prims_x, 7, prim_size, prim_size, prim_size)
denoise_sample_deltascale = th.mean(denoise_row[:, :, :, 4:], dim=(-1, -2, -3)) / 255. * 20.
denoise_sample_rgba = denoise_row[:, :, :, :4, :, :, :]
num_steps = denoise_row.shape[0]
for i in range(sample_num):
batch = dataset.sample_cam_smpl()
sam_cam = {}
sam_cam.update(dataset.inf_cameras[dataset.subject_ids[0]]['camera0000'])
for k, v in sam_cam.items():
if isinstance(v, np.ndarray):
sam_cam[k] = v[None, ...]
batch.update(sam_cam)
batch = to_device(batch, device)
B = 1
geom = model.bodydecoder.lbs_fn(
poses = batch["poses"],
shapes = batch["shapes"],
Rh = batch["Rh"],
Th = batch["Th"],
v_template = model.bodydecoder.lbs_fn.v_template[np.newaxis],
) * 1000.0
prim_pos_mesh = ( | make_postex(geom, model.bodydecoder.prim_vidx_img, model.bodydecoder.prim_bary_img) | 6 | 2023-12-06 05:12:55+00:00 | 12k |
ml-stat-Sustech/TorchCP | tests/test_conformal_training.py | [
{
"identifier": "ConfTr",
"path": "torchcp/classification/loss/conftr.py",
"snippet": "class ConfTr(nn.Module):\n \"\"\"\n Conformal Training (Stutz et al., 2021).\n Paper: https://arxiv.org/abs/2110.09192\n\n :param weight: the weight of each loss function\n :param predictor: the CP predictors\n :param alpha: the significance level for each training batch\n :param fraction: the fraction of the calibration set in each training batch\n :param loss_type: the selected (multi-selected) loss functions, which can be \"valid\", \"classification\", \"probs\", \"coverage\".\n :param target_size: Optional: 0 | 1.\n :param loss_transform: a transform for loss\n :param base_loss_fn: a base loss function. For example, cross entropy in classification.\n \"\"\"\n def __init__(self, weight, predictor, alpha, fraction, loss_type=\"valid\", target_size=1,\n loss_transform=\"square\", base_loss_fn=None):\n \n super(ConfTr, self).__init__()\n assert weight>0, \"weight must be greater than 0.\"\n assert (0 < fraction < 1), \"fraction should be a value in (0,1).\"\n assert loss_type in [\"valid\", \"classification\", \"probs\", \"coverage\"], ('loss_type should be a value in ['\n '\"valid\", \"classification\", \"probs\", '\n '\"coverage\"].')\n assert target_size==0 or target_size ==1, \"target_size should be 0 or 1.\"\n assert loss_transform in [\"square\", \"abs\", \"log\"], ('loss_transform should be a value in [\"square\", \"abs\", '\n '\"log\"].')\n self.weight = weight\n self.predictor = predictor\n self.alpha = alpha\n self.fraction = fraction\n self.loss_type = loss_type\n self.target_size = target_size\n self.base_loss_fn = base_loss_fn\n \n if loss_transform == \"square\":\n self.transform = torch.square\n elif loss_transform == \"abs\":\n self.transform = torch.abs\n elif loss_transform == \"log\":\n self.transform = torch.log\n self.loss_functions_dict = {\"valid\": self.__compute_hinge_size_loss,\n \"probs\": self.__compute_probabilistic_size_loss,\n \"coverage\": self.__compute_coverage_loss,\n \"classification\": self.__compute_classification_loss\n }\n\n def forward(self, logits, labels):\n # Compute Size Loss\n val_split = int(self.fraction * logits.shape[0])\n cal_logits = logits[:val_split]\n cal_labels = labels[:val_split]\n test_logits = logits[val_split:]\n test_labels = labels[val_split:]\n\n self.predictor.calculate_threshold(cal_logits.detach(), cal_labels.detach(), self.alpha)\n tau = self.predictor.q_hat\n test_scores = self.predictor.score_function(test_logits)\n # Computing the probability of each label contained in the prediction set.\n pred_sets = torch.sigmoid(tau - test_scores)\n loss = self.weight * self.loss_functions_dict[self.loss_type](pred_sets, test_labels)\n\n if self.base_loss_fn is not None:\n loss += self.base_loss_fn(logits, labels).float()\n\n return loss\n\n def __compute_hinge_size_loss(self, pred_sets, labels):\n return torch.mean(\n self.transform(\n torch.maximum(torch.sum(pred_sets, dim=1) - self.target_size, torch.tensor(0).to(pred_sets.device))))\n\n def __compute_probabilistic_size_loss(self, pred_sets, labels):\n classes = pred_sets.shape[0]\n one_hot_labels = torch.unsqueeze(torch.eye(classes).to(pred_sets.device), dim=0)\n repeated_confidence_sets = torch.repeat_interleave(\n torch.unsqueeze(pred_sets, 2), classes, dim=2)\n loss = one_hot_labels * repeated_confidence_sets + \\\n (1 - one_hot_labels) * (1 - repeated_confidence_sets)\n loss = torch.prod(loss, dim=1)\n return torch.sum(loss, dim=1)\n\n def __compute_coverage_loss(self, pred_sets, labels):\n one_hot_labels = F.one_hot(labels, num_classes=pred_sets.shape[1])\n\n # Compute the mean of the sum of confidence_sets multiplied by one_hot_labels\n loss = torch.mean(torch.sum(pred_sets * one_hot_labels, dim=1)) - (1 - self.alpha)\n\n # Apply the transform function (you need to define this)\n transformed_loss = self.transform(loss)\n\n return transformed_loss\n\n def __compute_classification_loss(self, pred_sets, labels):\n # Convert labels to one-hot encoding\n one_hot_labels = F.one_hot(labels, num_classes=pred_sets.shape[1]).float()\n loss_matrix = torch.eye(pred_sets.shape[1], device=pred_sets.device)\n # Calculate l1 and l2 losses\n l1 = (1 - pred_sets) * one_hot_labels * loss_matrix[labels]\n l2 = pred_sets * (1 - one_hot_labels) * loss_matrix[labels]\n\n # Calculate the total loss\n loss = torch.sum(torch.maximum(l1 + l2, torch.zeros_like(l1, device=pred_sets.device)), dim=1)\n\n # Return the mean loss\n return torch.mean(loss)"
},
{
"identifier": "ClassWisePredictor",
"path": "torchcp/classification/predictors/classwise.py",
"snippet": "class ClassWisePredictor(SplitPredictor):\n \"\"\"\n\n Applications of Class-Conditional Conformal Predictor in Multi-Class Classification (Shi et al., 2013)\n paper: https://ieeexplore.ieee.org/document/6784618\n \n \n :param score_function: non-conformity score function.\n :param model: a pytorch model.\n \"\"\"\n\n def __init__(self, score_function, model=None):\n super(ClassWisePredictor, self).__init__(score_function, model)\n self.q_hat = None\n\n def calculate_threshold(self, logits, labels, alpha):\n if alpha >= 1 or alpha <= 0:\n raise ValueError(\"Significance level 'alpha' must be in (0,1).\")\n logits = logits.to(self._device)\n labels = labels.to(self._device)\n # Count the number of classes\n num_classes = logits.shape[1]\n self.q_hat = torch.zeros(num_classes, device=self._device)\n for label in range(num_classes):\n x_cal_tmp = logits[labels == label]\n y_cal_tmp = labels[labels == label]\n scores = self.score_function(x_cal_tmp, y_cal_tmp)\n self.q_hat[label] = self._calculate_conformal_value(scores, alpha)"
},
{
"identifier": "ClusterPredictor",
"path": "torchcp/classification/predictors/cluster.py",
"snippet": "class ClusterPredictor(SplitPredictor):\n \"\"\"\n Class-Conditional Conformal Prediction with Many Classes (Ding et al., 2023).\n paper: https://arxiv.org/abs/2306.09335.\n \n :param score_function: a non-conformity score function.\n :param model: a pytorch model.\n :param ratio_clustering: the ratio of examples in the calibration dataset used to cluster classes.\n :param num_clusters: the number of clusters. If ratio_clustering is \"auto\", the number of clusters is automatically computed.\n :param split: the method to split the dataset into clustering dataset and calibration set. Options are 'proportional' (sample proportional to distribution such that rarest class has n_clustering example), 'doubledip' (don't split and use all data for both steps, or 'random' (each example is assigned to clustering step with some fixed probability).\n \"\"\"\n\n def __init__(self, score_function, model=None, ratio_clustering=\"auto\", num_clusters=\"auto\", split='random',\n temperature=1):\n\n super(ClusterPredictor, self).__init__(score_function, model, temperature)\n self.__ratio_clustering = ratio_clustering\n self.__num_clusters = num_clusters\n self.__split = split\n\n def calculate_threshold(self, logits, labels, alpha):\n if alpha >= 1 or alpha <= 0:\n raise ValueError(\"Significance level 'alpha' must be in (0,1).\")\n logits = logits.to(self._device)\n labels = labels.to(self._device)\n num_classes = logits.shape[1]\n scores = self.score_function(logits, labels)\n\n alpha = torch.tensor(alpha, device=self._device)\n classes_statistics = torch.tensor([torch.sum(labels == k).item() for k in range(num_classes)],\n device=self._device)\n\n # 1) Choose necessary parameters for Cluster algorithm\n if self.__ratio_clustering == 'auto' and self.__num_clusters == 'auto':\n n_min = torch.min(classes_statistics)\n n_thresh = self.__get_quantile_minimum(alpha)\n # Classes with fewer than n_thresh examples will be excluded from clustering\n n_min = torch.maximum(n_min, n_thresh)\n num_remaining_classes = torch.sum((classes_statistics >= n_min).float())\n\n # Compute the number of clusters and the minium number of examples for each class\n n_clustering = (n_min * num_remaining_classes / (75 + num_remaining_classes)).clone().to(\n torch.int32).to(self._device)\n self.__num_clusters = torch.floor(n_clustering / 2).to(torch.int32)\n self.__ratio_clustering = n_clustering / n_min\n\n # 2) Split data\n clustering_scores, clustering_labels, cal_scores, cal_labels = self.__split_data(scores,\n labels,\n classes_statistics)\n\n # 3) Filter \"rare\" classes\n rare_classes = self.__get_rare_classes(clustering_labels, alpha, num_classes)\n\n # 4) Run clustering\n if (num_classes - len(rare_classes) > self.__num_clusters) and (self.__num_clusters > 1):\n # Filter out rare classes and re-index\n remaining_idx, filtered_labels, class_remapping = self.__remap_classes(clustering_labels, rare_classes)\n filtered_scores = clustering_scores[remaining_idx]\n\n # Compute embedding for each class and get class counts\n embeddings, class_cts = self.__embed_all_classes(filtered_scores, filtered_labels)\n kmeans = KMeans(n_clusters=int(self.__num_clusters), n_init=10).fit(X=embeddings.detach().cpu().numpy(),\n sample_weight=np.sqrt(\n class_cts.detach().cpu().numpy()))\n nonrare_class_cluster_assignments = torch.tensor(kmeans.labels_, device=self._device)\n\n cluster_assignments = - torch.ones((num_classes,), dtype=torch.int32, device=self._device)\n\n for cls, remapped_cls in class_remapping.items():\n cluster_assignments[cls] = nonrare_class_cluster_assignments[remapped_cls]\n else:\n cluster_assignments = - torch.ones((num_classes,), dtype=torch.int32, device=self._device)\n\n # 5) Compute qhats for each cluster\n\n self.q_hat = self.__compute_cluster_specific_qhats(cluster_assignments,\n cal_scores,\n cal_labels,\n alpha)\n\n def __split_data(self, scores, labels, classes_statistics):\n if self.__split == 'proportional':\n # Split dataset along with fraction \"frac_clustering\"\n num_classes = classes_statistics.shape[0]\n n_k = torch.tensor([self.__ratio_clustering * classes_statistics[k] for k in range(num_classes)],\n device=self._device, dtype=torch.int32)\n idx1 = torch.zeros(labels.shape, dtype=torch.bool, device=self._device)\n for k in range(num_classes):\n # Randomly select n instances of class k\n idx = torch.argwhere(labels == k).flatten()\n random_indices = torch.randint(0, classes_statistics[k], (n_k[k],), device=self._device)\n selected_idx = idx[random_indices]\n idx1[selected_idx] = 1\n clustering_scores = scores[idx1]\n clustering_labels = labels[idx1]\n cal_scores = scores[~idx1]\n cal_labels = labels[~idx1]\n\n elif self.__split == 'doubledip':\n clustering_scores, clustering_labels = scores, labels\n cal_scores, cal_labels = scores, labels\n\n elif self.__split == 'random':\n # Each point is assigned to clustering set w.p. frac_clustering \n idx1 = torch.rand(size=(len(labels),), device=self._device) < self.__ratio_clustering\n\n clustering_scores = scores[idx1]\n clustering_labels = labels[idx1]\n cal_scores = scores[~idx1]\n cal_labels = labels[~idx1]\n else:\n raise Exception(\"Invalid split method. Options are 'proportional', 'doubledip', and 'random'\")\n return clustering_scores, clustering_labels, cal_scores, cal_labels\n\n def __get_quantile_minimum(self, alpha):\n \"\"\"\n Compute smallest n such that ceil((n+1)*(1-alpha)/n) <= 1\n \"\"\"\n n = torch.tensor(0, device=alpha.device)\n while torch.ceil((n + 1) * (1 - alpha) / n) > 1:\n n += 1\n return n\n\n def __get_rare_classes(self, labels, alpha, num_classes):\n \"\"\"\n Choose classes whose number is less than or equal to .\n \"\"\"\n thresh = self.__get_quantile_minimum(alpha)\n classes, cts = torch.unique(labels, return_counts=True)\n rare_classes = classes[cts < thresh].to(self._device)\n\n # Also included any classes that are so rare that we have 0 labels for it\n\n all_classes = torch.arange(num_classes, device=self._device)\n zero_ct_classes = all_classes[(all_classes.view(1, -1) != classes.view(-1, 1)).all(dim=0)]\n rare_classes = torch.concatenate((rare_classes, zero_ct_classes))\n\n return rare_classes\n\n def __remap_classes(self, labels, rare_classes):\n \"\"\"\n Exclude classes in rare_classes and remap remaining classes to be 0-indexed\n\n :returns:\n - remaining_idx: Boolean array the same length as labels. Entry i is True\n if labels[i] is not in rare_classes\n - remapped_labels : Array that only contains the entries of labels that are\n not in rare_classes (in order)\n - remapping : Dict mapping old class index to new class index\n\n \"\"\"\n labels = labels.detach().cpu().numpy()\n rare_classes = rare_classes.detach().cpu().numpy()\n remaining_idx = ~np.isin(labels, rare_classes)\n\n remaining_labels = labels[remaining_idx]\n remapped_labels = np.zeros(remaining_labels.shape, dtype=int)\n new_idx = 0\n remapping = {}\n for i in range(len(remaining_labels)):\n if remaining_labels[i] in remapping:\n remapped_labels[i] = remapping[remaining_labels[i]]\n else:\n remapped_labels[i] = new_idx\n remapping[remaining_labels[i]] = new_idx\n new_idx += 1\n\n return torch.from_numpy(remaining_idx).to(self._device), torch.tensor(remapped_labels,\n device=self._device), remapping\n\n def __embed_all_classes(self, scores_all, labels, q=[0.5, 0.6, 0.7, 0.8, 0.9]):\n \"\"\"\n :param scores_all: num_instances-length array where scores_all[i] = score of true class for instance i.\n :param labels: num_instances-length array of true class labels.\n :param q: quantiles to include in embedding.\n\n :returns:\n - embeddings: num_classes x len(q) array where ith row is the embeddings of class i.\n - cts: num_classes-length array where cts[i] = # of times class i appears in labels .\n \"\"\"\n num_classes = len(torch.unique(labels))\n embeddings = torch.zeros((num_classes, len(q)), device=self._device)\n cts = torch.zeros((num_classes,), device=self._device)\n\n for i in range(num_classes):\n if len(scores_all.shape) > 1:\n raise DimensionError(f\"Expected 1-dimension, but got {len(scores_all.shape)}-dimension.\")\n\n class_i_scores = scores_all[labels == i]\n\n cts[i] = class_i_scores.shape[0]\n # Computes the q-quantiles of samples and returns the vector of quantiles\n embeddings[i, :] = torch.quantile(class_i_scores, torch.tensor(q, device=self._device))\n\n return embeddings, cts\n\n def __compute_cluster_specific_qhats(self, cluster_assignments, cal_class_scores, cal_true_labels, alpha):\n '''\n Computes cluster-specific quantiles (one for each class) that will result in marginal coverage of (1-alpha)\n \n :param cluster_assignments: num_classes length array where entry i is the index of the cluster that class i belongs to. Rare classes can be assigned to cluster -1 and they will automatically be given as default_qhat. \n :param cal_class_scores: cal_class_scores[i] is the score for instance i.\n :param cal_true_labels: true class labels for instances\n :param alpha: Desired coverage level\n\n\n :return : num_classes length array where entry i is the quantile correspond to the cluster that class i belongs to.\n '''\n\n # Map true class labels to clusters\n cal_true_clusters = torch.tensor([cluster_assignments[label] for label in cal_true_labels], device=self._device)\n num_clusters = torch.max(cluster_assignments) + 1\n \n cluster_qhats = self.__compute_class_specific_qhats(cal_class_scores, cal_true_clusters, num_clusters, alpha)\n # Map cluster qhats back to classes\n num_classes = len(cluster_assignments)\n qhats_class = torch.tensor([cluster_qhats[cluster_assignments[k]] for k in range(num_classes)],\n device=self._device)\n\n return qhats_class\n\n def __compute_class_specific_qhats(self, cal_class_scores, cal_true_clusters, num_clusters, alpha):\n '''\n Computes class-specific quantiles (one for each class) that will result in marginal coverage of (1-alpha)\n \n :param cal_class_scores: num_instances-length array where cal_class_scores[i] is the score for instance i\n :param cal_true_clusters: num_instances-length array of true class labels. If class -1 appears, it will be assigned the null_qhat value. It is appended as an extra entry of the returned q_hats so that q_hats[-1] = null_qhat.\n :param num_clusters: the number of clusters.\n :param alpha: Desired coverage level.\n\n :return: the threshold of each class\n '''\n\n # Compute quantile q_hat that will result in marginal coverage of (1-alpha)\n null_qhat = self._calculate_conformal_value(cal_class_scores, alpha)\n\n q_hats = torch.zeros((num_clusters,), device=self._device) # q_hats[i] = quantile for class i\n for k in range(num_clusters):\n # Only select data for which k is true class\n idx = (cal_true_clusters == k)\n scores = cal_class_scores[idx]\n q_hats[k] = self._calculate_conformal_value(scores, alpha)\n if -1 in cal_true_clusters:\n q_hats = torch.concatenate((q_hats, torch.tensor([null_qhat], device=self._device)))\n\n return q_hats"
},
{
"identifier": "SplitPredictor",
"path": "torchcp/classification/predictors/split.py",
"snippet": "class SplitPredictor(BasePredictor):\n \"\"\"\n Split Conformal Prediction (Vovk et a., 2005).\n Book: https://link.springer.com/book/10.1007/978-3-031-06649-8.\n \n :param score_function: non-conformity score function.\n :param model: a pytorch model.\n :param temperature: the temperature of Temperature Scaling.\n \"\"\"\n def __init__(self, score_function, model=None, temperature=1):\n super().__init__(score_function, model, temperature)\n\n #############################\n # The calibration process\n ############################\n def calibrate(self, cal_dataloader, alpha):\n self._model.eval()\n logits_list = []\n labels_list = []\n with torch.no_grad():\n for examples in cal_dataloader:\n tmp_x, tmp_labels = examples[0].to(self._device), examples[1].to(self._device)\n tmp_logits = self._logits_transformation(self._model(tmp_x)).detach()\n logits_list.append(tmp_logits)\n labels_list.append(tmp_labels)\n logits = torch.cat(logits_list).float()\n labels = torch.cat(labels_list)\n self.calculate_threshold(logits, labels, alpha)\n\n def calculate_threshold(self, logits, labels, alpha):\n if alpha >= 1 or alpha <= 0:\n raise ValueError(\"Significance level 'alpha' must be in (0,1).\")\n logits = logits.to(self._device)\n labels = labels.to(self._device)\n scores = self.score_function(logits, labels)\n self.q_hat = self._calculate_conformal_value(scores, alpha)\n\n def _calculate_conformal_value(self, scores, alpha):\n \"\"\"\n Calculate the 1-alpha quantile of scores.\n \n :param scores: non-conformity scores.\n :param alpha: a significance level.\n \n :return: the threshold which is use to construct prediction sets.\n \"\"\"\n if len(scores) == 0:\n warnings.warn(\n \"The number of scores is 0, which is a invalid scores. To avoid program crash, the threshold is set as torch.inf.\")\n return torch.inf\n qunatile_value = math.ceil(scores.shape[0] + 1) * (1 - alpha) / scores.shape[0]\n\n if qunatile_value > 1:\n warnings.warn(\n \"The value of quantile exceeds 1. It should be a value in (0,1). To avoid program crash, the threshold is set as torch.inf.\")\n return torch.inf\n\n return torch.quantile(scores, qunatile_value).to(self._device)\n\n #############################\n # The prediction process\n ############################\n def predict(self, x_batch):\n \"\"\"\n The input of score function is softmax probability.\n\n :param x_batch: a batch of instances.\n \"\"\"\n self._model.eval()\n if self._model != None:\n x_batch = self._model(x_batch.to(self._device)).float()\n x_batch = self._logits_transformation(x_batch).detach()\n sets = self.predict_with_logits(x_batch)\n return sets\n\n def predict_with_logits(self, logits, q_hat=None):\n \"\"\"\n The input of score function is softmax probability.\n if q_hat is not given by the function 'self.calibrate', the construction progress of prediction set is a naive method.\n\n :param logits: model output before softmax.\n :param q_hat: the conformal threshold.\n\n :return: prediction sets\n \"\"\"\n scores = self.score_function(logits).to(self._device)\n if q_hat is None:\n S = self._generate_prediction_set(scores, self.q_hat)\n else:\n S = self._generate_prediction_set(scores, q_hat)\n return S\n\n #############################\n # The evaluation process\n ############################\n\n def evaluate(self, val_dataloader):\n prediction_sets = []\n labels_list = []\n with torch.no_grad():\n for examples in val_dataloader:\n tmp_x, tmp_label = examples[0].to(self._device), examples[1].to(self._device)\n prediction_sets_batch = self.predict(tmp_x)\n prediction_sets.extend(prediction_sets_batch)\n labels_list.append(tmp_label)\n val_labels = torch.cat(labels_list)\n\n res_dict = {\"Coverage_rate\": self._metric('coverage_rate')(prediction_sets, val_labels),\n \"Average_size\": self._metric('average_size')(prediction_sets, val_labels)}\n return res_dict"
},
{
"identifier": "APS",
"path": "torchcp/classification/scores/aps.py",
"snippet": "class APS(BaseScore):\n \"\"\"\n Adaptive Prediction Sets (Romano et al., 2020)\n paper :https://proceedings.neurips.cc/paper/2020/file/244edd7e85dc81602b7615cd705545f5-Paper.pdf\n \"\"\"\n\n def __call__(self, logits, label=None):\n assert len(logits.shape) <= 2, \"The dimension of logits must be less than 2.\"\n if len(logits.shape) == 1:\n logits = logits.unsqueeze(0)\n probs = torch.softmax(logits, dim=-1)\n if label is None:\n return self._calculate_all_label(probs)\n else:\n return self._calculate_single_label(probs, label)\n\n def _calculate_all_label(self, probs):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(probs.shape, device=probs.device)\n ordered_scores = cumsum - ordered * U\n _, sorted_indices = torch.sort(indices, descending=False, dim=-1)\n scores = ordered_scores.gather(dim=-1, index=sorted_indices)\n return scores\n\n def _sort_sum(self, probs):\n # ordered: the ordered probabilities in descending order\n # indices: the rank of ordered probabilities in descending order\n # cumsum: the accumulation of sorted probabilities\n ordered, indices = torch.sort(probs, dim=-1, descending=True)\n cumsum = torch.cumsum(ordered, dim=-1)\n return indices, ordered, cumsum\n\n def _calculate_single_label(self, probs, label):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(indices.shape[0], device=probs.device)\n idx = torch.where(indices == label.view(-1, 1))\n scores_first_rank = U * cumsum[idx]\n idx_minus_one = (idx[0], idx[1] - 1)\n scores_usual = U * ordered[idx] + cumsum[idx_minus_one]\n return torch.where(idx[1] == 0, scores_first_rank, scores_usual)"
},
{
"identifier": "RAPS",
"path": "torchcp/classification/scores/raps.py",
"snippet": "class RAPS(APS):\n \"\"\"\n Regularized Adaptive Prediction Sets (Angelopoulos et al., 2020)\n paper : https://arxiv.org/abs/2009.14193\n \n :param penalty: the weight of regularization. When penalty = 0, RAPS=APS.\n :param kreg: the rank of regularization which is an integer in [0,labels_num].\n \"\"\"\n\n def __init__(self, penalty, kreg=0):\n \n if penalty <= 0:\n raise ValueError(\"The parameter 'penalty' must be a positive value.\")\n if kreg < 0:\n raise ValueError(\"The parameter 'kreg' must be a nonnegative value.\")\n if type(kreg) != int:\n raise TypeError(\"The parameter 'kreg' must be a integer.\")\n super(RAPS, self).__init__()\n self.__penalty = penalty\n self.__kreg = kreg\n\n def _calculate_all_label(self, probs):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(probs.shape, device=probs.device)\n reg = torch.maximum(self.__penalty * (torch.arange(1, probs.shape[-1] + 1, device=probs.device) - self.__kreg),\n torch.tensor(0, device=probs.device))\n ordered_scores = cumsum - ordered * U + reg\n _, sorted_indices = torch.sort(indices, descending=False, dim=-1)\n scores = ordered_scores.gather(dim=-1, index=sorted_indices)\n return scores\n \n def _calculate_single_label(self, probs, label):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(indices.shape[0], device=probs.device)\n idx = torch.where(indices == label.view(-1, 1))\n reg = torch.maximum(self.__penalty * (idx[1] + 1 - self.__kreg), torch.tensor(0).to(probs.device))\n scores_first_rank = U * ordered[idx] + reg\n idx_minus_one = (idx[0], idx[1] - 1)\n scores_usual = U * ordered[idx] + cumsum[idx_minus_one] + reg\n return torch.where(idx[1] == 0, scores_first_rank, scores_usual)"
},
{
"identifier": "SAPS",
"path": "torchcp/classification/scores/saps.py",
"snippet": "class SAPS(APS):\n \"\"\"\n Sorted Adaptive Prediction Sets (Huang et al., 2023)\n paper: https://arxiv.org/abs/2310.06430\n \n :param weight: the weight of label ranking.\n \"\"\"\n\n def __init__(self, weight):\n\n super(SAPS, self).__init__()\n if weight <= 0:\n raise ValueError(\"The parameter 'weight' must be a positive value.\")\n self.__weight = weight\n\n def _calculate_all_label(self, probs):\n indices, ordered, cumsum = self._sort_sum(probs)\n ordered[:, 1:] = self.__weight\n cumsum = torch.cumsum(ordered, dim=-1)\n U = torch.rand(probs.shape, device=probs.device)\n ordered_scores = cumsum - ordered * U\n _, sorted_indices = torch.sort(indices, descending=False, dim=-1)\n scores = ordered_scores.gather(dim=-1, index=sorted_indices)\n return scores\n\n def _calculate_single_label(self, probs, label):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(indices.shape[0], device=probs.device)\n idx = torch.where(indices == label.view(-1, 1))\n scores_first_rank = U * cumsum[idx]\n scores_usual = self.__weight * (idx[1] - U) + ordered[:, 0]\n return torch.where(idx[1] == 0, scores_first_rank, scores_usual)"
},
{
"identifier": "THR",
"path": "torchcp/classification/scores/thr.py",
"snippet": "class THR(BaseScore):\n \"\"\"\n Threshold conformal predictors (Sadinle et al., 2016).\n paper : https://arxiv.org/abs/1609.00451.\n \n :param score_type: a transformation on logits. Default: \"softmax\". Optional: \"softmax\", \"Identity\", \"log_softmax\" or \"log\".\n \"\"\"\n\n def __init__(self, score_type=\"softmax\") -> None:\n \n super().__init__()\n self.score_type = score_type\n if score_type == \"Identity\":\n self.transform = lambda x: x\n elif score_type == \"softmax\":\n self.transform = lambda x: torch.softmax(x, dim=- 1)\n elif score_type == \"log_softmax\":\n self.transform = lambda x: torch.log_softmax(x, dim=-1)\n elif score_type == \"log\":\n self.transform = lambda x: torch.log(x)\n else:\n raise NotImplementedError\n\n def __call__(self, logits, label=None):\n assert len(logits.shape) <= 2, \"The dimension of logits must be less than 2.\"\n if len(logits.shape) == 1:\n logits = logits.unsqueeze(0)\n temp_values = self.transform(logits)\n if label is None:\n return self.__calculate_all_label(temp_values)\n else:\n return self.__calculate_single_label(temp_values, label)\n\n def __calculate_single_label(self, temp_values, label):\n return 1 - temp_values[torch.arange(label.shape[0], device=temp_values.device), label]\n\n def __calculate_all_label(self, temp_values):\n return 1 - temp_values"
},
{
"identifier": "fix_randomness",
"path": "torchcp/utils/common.py",
"snippet": "def fix_randomness(seed=0):\n \"\"\"\n Fix the random seed for python, torch, numpy.\n\n :param seed: the random seed\n \"\"\"\n np.random.seed(seed=seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n random.seed(seed)"
}
] | import argparse
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from dataset import build_dataset
from torchcp.classification.loss import ConfTr
from torchcp.classification.predictors import SplitPredictor, ClusterPredictor, ClassWisePredictor
from torchcp.classification.scores import THR, APS, SAPS, RAPS
from torchcp.utils import fix_randomness | 8,108 | # Copyright (c) 2023-present, SUSTech-ML.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# @Time : 13/12/2023 21:13
# Copyright (c) 2023-present, SUSTech-ML.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(28 * 28, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def train(model, device, train_loader,criterion, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
def test_training():
alpha = 0.01
num_trials = 5
result = {}
for loss in ["CE", "ConfTr"]:
print(f"############################## {loss} #########################")
result[loss] = {}
if loss == "CE":
criterion = nn.CrossEntropyLoss()
elif loss == "ConfTr":
predictor = SplitPredictor(score_function=THR(score_type="log_softmax"))
| # Copyright (c) 2023-present, SUSTech-ML.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# @Time : 13/12/2023 21:13
# Copyright (c) 2023-present, SUSTech-ML.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(28 * 28, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def train(model, device, train_loader,criterion, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
def test_training():
alpha = 0.01
num_trials = 5
result = {}
for loss in ["CE", "ConfTr"]:
print(f"############################## {loss} #########################")
result[loss] = {}
if loss == "CE":
criterion = nn.CrossEntropyLoss()
elif loss == "ConfTr":
predictor = SplitPredictor(score_function=THR(score_type="log_softmax")) | criterion = ConfTr(weight=0.01, | 0 | 2023-12-06 09:08:41+00:00 | 12k |
OpenDriveLab/LaneSegNet | projects/lanesegnet/models/modules/bevformer_constructer.py | [
{
"identifier": "BEV_CONSTRUCTOR",
"path": "projects/lanesegnet/utils/builder.py",
"snippet": "BEV_CONSTRUCTOR = Registry('BEV Constructor')"
},
{
"identifier": "TemporalSelfAttention",
"path": "projects/bevformer/modules/temporal_self_attention.py",
"snippet": "class TemporalSelfAttention(BaseModule):\r\n \"\"\"An attention module used in BEVFormer based on Deformable-Detr.\r\n\r\n `Deformable DETR: Deformable Transformers for End-to-End Object Detection.\r\n <https://arxiv.org/pdf/2010.04159.pdf>`_.\r\n\r\n Args:\r\n embed_dims (int): The embedding dimension of Attention.\r\n Default: 256.\r\n num_heads (int): Parallel attention heads. Default: 64.\r\n num_levels (int): The number of feature map used in\r\n Attention. Default: 4.\r\n num_points (int): The number of sampling points for\r\n each query in each head. Default: 4.\r\n im2col_step (int): The step used in image_to_column.\r\n Default: 64.\r\n dropout (float): A Dropout layer on `inp_identity`.\r\n Default: 0.1.\r\n batch_first (bool): Key, Query and Value are shape of\r\n (batch, n, embed_dim)\r\n or (n, batch, embed_dim). Default to True.\r\n norm_cfg (dict): Config dict for normalization layer.\r\n Default: None.\r\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\r\n Default: None.\r\n num_bev_queue (int): In this version, we only use one history BEV and one currenct BEV.\r\n the length of BEV queue is 2.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n embed_dims=256,\r\n num_heads=8,\r\n num_levels=4,\r\n num_points=4,\r\n num_bev_queue=2,\r\n im2col_step=64,\r\n dropout=0.1,\r\n batch_first=True,\r\n norm_cfg=None,\r\n init_cfg=None):\r\n\r\n super().__init__(init_cfg)\r\n if embed_dims % num_heads != 0:\r\n raise ValueError(f'embed_dims must be divisible by num_heads, '\r\n f'but got {embed_dims} and {num_heads}')\r\n dim_per_head = embed_dims // num_heads\r\n self.norm_cfg = norm_cfg\r\n self.dropout = nn.Dropout(dropout)\r\n self.batch_first = batch_first\r\n self.fp16_enabled = False\r\n\r\n # you'd better set dim_per_head to a power of 2\r\n # which is more efficient in the CUDA implementation\r\n def _is_power_of_2(n):\r\n if (not isinstance(n, int)) or (n < 0):\r\n raise ValueError(\r\n 'invalid input for _is_power_of_2: {} (type: {})'.format(\r\n n, type(n)))\r\n return (n & (n - 1) == 0) and n != 0\r\n\r\n if not _is_power_of_2(dim_per_head):\r\n warnings.warn(\r\n \"You'd better set embed_dims in \"\r\n 'MultiScaleDeformAttention to make '\r\n 'the dimension of each attention head a power of 2 '\r\n 'which is more efficient in our CUDA implementation.')\r\n\r\n self.im2col_step = im2col_step\r\n self.embed_dims = embed_dims\r\n self.num_levels = num_levels\r\n self.num_heads = num_heads\r\n self.num_points = num_points\r\n self.num_bev_queue = num_bev_queue\r\n self.sampling_offsets = nn.Linear(\r\n embed_dims*self.num_bev_queue, num_bev_queue*num_heads * num_levels * num_points * 2)\r\n self.attention_weights = nn.Linear(embed_dims*self.num_bev_queue,\r\n num_bev_queue*num_heads * num_levels * num_points)\r\n self.value_proj = nn.Linear(embed_dims, embed_dims)\r\n self.output_proj = nn.Linear(embed_dims, embed_dims)\r\n self.init_weights()\r\n\r\n def init_weights(self):\r\n \"\"\"Default initialization for Parameters of Module.\"\"\"\r\n constant_init(self.sampling_offsets, 0.)\r\n thetas = torch.arange(\r\n self.num_heads,\r\n dtype=torch.float32) * (2.0 * math.pi / self.num_heads)\r\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\r\n grid_init = (grid_init /\r\n grid_init.abs().max(-1, keepdim=True)[0]).view(\r\n self.num_heads, 1, 1,\r\n 2).repeat(1, self.num_levels*self.num_bev_queue, self.num_points, 1)\r\n\r\n for i in range(self.num_points):\r\n grid_init[:, :, i, :] *= i + 1\r\n\r\n self.sampling_offsets.bias.data = grid_init.view(-1)\r\n constant_init(self.attention_weights, val=0., bias=0.)\r\n xavier_init(self.value_proj, distribution='uniform', bias=0.)\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)\r\n self._is_init = True\r\n\r\n def forward(self,\r\n query,\r\n key=None,\r\n value=None,\r\n identity=None,\r\n query_pos=None,\r\n key_padding_mask=None,\r\n reference_points=None,\r\n spatial_shapes=None,\r\n level_start_index=None,\r\n flag='decoder',\r\n\r\n **kwargs):\r\n \"\"\"Forward Function of MultiScaleDeformAttention.\r\n\r\n Args:\r\n query (Tensor): Query of Transformer with shape\r\n (num_query, bs, embed_dims).\r\n key (Tensor): The key tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n value (Tensor): The value tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n identity (Tensor): The tensor used for addition, with the\r\n same shape as `query`. Default None. If None,\r\n `query` will be used.\r\n query_pos (Tensor): The positional encoding for `query`.\r\n Default: None.\r\n key_pos (Tensor): The positional encoding for `key`. Default\r\n None.\r\n reference_points (Tensor): The normalized reference\r\n points with shape (bs, num_query, num_levels, 2),\r\n all elements is range in [0, 1], top-left (0,0),\r\n bottom-right (1, 1), including padding area.\r\n or (N, Length_{query}, num_levels, 4), add\r\n additional two dimensions is (w, h) to\r\n form reference boxes.\r\n key_padding_mask (Tensor): ByteTensor for `query`, with\r\n shape [bs, num_key].\r\n spatial_shapes (Tensor): Spatial shape of features in\r\n different levels. With shape (num_levels, 2),\r\n last dimension represents (h, w).\r\n level_start_index (Tensor): The start index of each level.\r\n A tensor has shape ``(num_levels, )`` and can be represented\r\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\r\n\r\n Returns:\r\n Tensor: forwarded results with shape [num_query, bs, embed_dims].\r\n \"\"\"\r\n\r\n if value is None:\r\n assert self.batch_first\r\n bs, len_bev, c = query.shape\r\n value = torch.stack([query, query], 1).reshape(bs*2, len_bev, c)\r\n\r\n # value = torch.cat([query, query], 0)\r\n\r\n if identity is None:\r\n identity = query\r\n if query_pos is not None:\r\n query = query + query_pos\r\n if not self.batch_first:\r\n # change to (bs, num_query ,embed_dims)\r\n query = query.permute(1, 0, 2)\r\n value = value.permute(1, 0, 2)\r\n bs, num_query, embed_dims = query.shape\r\n _, num_value, _ = value.shape\r\n assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value\r\n assert self.num_bev_queue == 2\r\n\r\n query = torch.cat([value[:bs], query], -1)\r\n value = self.value_proj(value)\r\n\r\n if key_padding_mask is not None:\r\n value = value.masked_fill(key_padding_mask[..., None], 0.0)\r\n\r\n value = value.reshape(bs*self.num_bev_queue,\r\n num_value, self.num_heads, -1)\r\n\r\n sampling_offsets = self.sampling_offsets(query)\r\n sampling_offsets = sampling_offsets.view(\r\n bs, num_query, self.num_heads, self.num_bev_queue, self.num_levels, self.num_points, 2)\r\n attention_weights = self.attention_weights(query).view(\r\n bs, num_query, self.num_heads, self.num_bev_queue, self.num_levels * self.num_points)\r\n attention_weights = attention_weights.softmax(-1)\r\n\r\n attention_weights = attention_weights.view(bs, num_query,\r\n self.num_heads,\r\n self.num_bev_queue,\r\n self.num_levels,\r\n self.num_points)\r\n\r\n attention_weights = attention_weights.permute(0, 3, 1, 2, 4, 5)\\\r\n .reshape(bs*self.num_bev_queue, num_query, self.num_heads, self.num_levels, self.num_points).contiguous()\r\n sampling_offsets = sampling_offsets.permute(0, 3, 1, 2, 4, 5, 6)\\\r\n .reshape(bs*self.num_bev_queue, num_query, self.num_heads, self.num_levels, self.num_points, 2)\r\n\r\n if reference_points.shape[-1] == 2:\r\n offset_normalizer = torch.stack(\r\n [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)\r\n sampling_locations = reference_points[:, :, None, :, None, :] \\\r\n + sampling_offsets \\\r\n / offset_normalizer[None, None, None, :, None, :]\r\n\r\n elif reference_points.shape[-1] == 4:\r\n sampling_locations = reference_points[:, :, None, :, None, :2] \\\r\n + sampling_offsets / self.num_points \\\r\n * reference_points[:, :, None, :, None, 2:] \\\r\n * 0.5\r\n else:\r\n raise ValueError(\r\n f'Last dim of reference_points must be'\r\n f' 2 or 4, but get {reference_points.shape[-1]} instead.')\r\n if torch.cuda.is_available() and value.is_cuda:\r\n\r\n # using fp16 deformable attention is unstable because it performs many sum operations\r\n if value.dtype == torch.float16:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n else:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n output = MultiScaleDeformableAttnFunction.apply(\r\n value, spatial_shapes, level_start_index, sampling_locations,\r\n attention_weights, self.im2col_step)\r\n else:\r\n\r\n output = multi_scale_deformable_attn_pytorch(\r\n value, spatial_shapes, sampling_locations, attention_weights)\r\n\r\n # output shape (bs*num_bev_queue, num_query, embed_dims)\r\n # (bs*num_bev_queue, num_query, embed_dims)-> (num_query, embed_dims, bs*num_bev_queue)\r\n output = output.permute(1, 2, 0)\r\n\r\n # fuse history value and current value\r\n # (num_query, embed_dims, bs*num_bev_queue)-> (num_query, embed_dims, bs, num_bev_queue)\r\n output = output.view(num_query, embed_dims, bs, self.num_bev_queue)\r\n output = output.mean(-1)\r\n\r\n # (num_query, embed_dims, bs)-> (bs, num_query, embed_dims)\r\n output = output.permute(2, 0, 1)\r\n\r\n output = self.output_proj(output)\r\n\r\n if not self.batch_first:\r\n output = output.permute(1, 0, 2)\r\n\r\n return self.dropout(output) + identity\r"
},
{
"identifier": "MSDeformableAttention3D",
"path": "projects/bevformer/modules/spatial_cross_attention.py",
"snippet": "class MSDeformableAttention3D(BaseModule):\r\n \"\"\"An attention module used in BEVFormer based on Deformable-Detr.\r\n `Deformable DETR: Deformable Transformers for End-to-End Object Detection.\r\n <https://arxiv.org/pdf/2010.04159.pdf>`_.\r\n Args:\r\n embed_dims (int): The embedding dimension of Attention.\r\n Default: 256.\r\n num_heads (int): Parallel attention heads. Default: 64.\r\n num_levels (int): The number of feature map used in\r\n Attention. Default: 4.\r\n num_points (int): The number of sampling points for\r\n each query in each head. Default: 4.\r\n im2col_step (int): The step used in image_to_column.\r\n Default: 64.\r\n dropout (float): A Dropout layer on `inp_identity`.\r\n Default: 0.1.\r\n batch_first (bool): Key, Query and Value are shape of\r\n (batch, n, embed_dim)\r\n or (n, batch, embed_dim). Default to False.\r\n norm_cfg (dict): Config dict for normalization layer.\r\n Default: None.\r\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\r\n Default: None.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n embed_dims=256,\r\n num_heads=8,\r\n num_levels=4,\r\n num_points=8,\r\n im2col_step=64,\r\n dropout=0.1,\r\n batch_first=True,\r\n norm_cfg=None,\r\n init_cfg=None):\r\n super().__init__(init_cfg)\r\n if embed_dims % num_heads != 0:\r\n raise ValueError(f'embed_dims must be divisible by num_heads, '\r\n f'but got {embed_dims} and {num_heads}')\r\n dim_per_head = embed_dims // num_heads\r\n self.norm_cfg = norm_cfg\r\n self.batch_first = batch_first\r\n self.output_proj = None\r\n self.fp16_enabled = False\r\n\r\n # you'd better set dim_per_head to a power of 2\r\n # which is more efficient in the CUDA implementation\r\n def _is_power_of_2(n):\r\n if (not isinstance(n, int)) or (n < 0):\r\n raise ValueError(\r\n 'invalid input for _is_power_of_2: {} (type: {})'.format(\r\n n, type(n)))\r\n return (n & (n - 1) == 0) and n != 0\r\n\r\n if not _is_power_of_2(dim_per_head):\r\n warnings.warn(\r\n \"You'd better set embed_dims in \"\r\n 'MultiScaleDeformAttention to make '\r\n 'the dimension of each attention head a power of 2 '\r\n 'which is more efficient in our CUDA implementation.')\r\n\r\n self.im2col_step = im2col_step\r\n self.embed_dims = embed_dims\r\n self.num_levels = num_levels\r\n self.num_heads = num_heads\r\n self.num_points = num_points\r\n self.sampling_offsets = nn.Linear(\r\n embed_dims, num_heads * num_levels * num_points * 2)\r\n self.attention_weights = nn.Linear(embed_dims,\r\n num_heads * num_levels * num_points)\r\n self.value_proj = nn.Linear(embed_dims, embed_dims)\r\n\r\n self.init_weights()\r\n\r\n def init_weights(self):\r\n \"\"\"Default initialization for Parameters of Module.\"\"\"\r\n constant_init(self.sampling_offsets, 0.)\r\n thetas = torch.arange(\r\n self.num_heads,\r\n dtype=torch.float32) * (2.0 * math.pi / self.num_heads)\r\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\r\n grid_init = (grid_init /\r\n grid_init.abs().max(-1, keepdim=True)[0]).view(\r\n self.num_heads, 1, 1,\r\n 2).repeat(1, self.num_levels, self.num_points, 1)\r\n for i in range(self.num_points):\r\n grid_init[:, :, i, :] *= i + 1\r\n\r\n self.sampling_offsets.bias.data = grid_init.view(-1)\r\n constant_init(self.attention_weights, val=0., bias=0.)\r\n xavier_init(self.value_proj, distribution='uniform', bias=0.)\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)\r\n self._is_init = True\r\n\r\n def forward(self,\r\n query,\r\n key=None,\r\n value=None,\r\n identity=None,\r\n query_pos=None,\r\n key_padding_mask=None,\r\n reference_points=None,\r\n spatial_shapes=None,\r\n level_start_index=None,\r\n **kwargs):\r\n \"\"\"Forward Function of MultiScaleDeformAttention.\r\n Args:\r\n query (Tensor): Query of Transformer with shape\r\n ( bs, num_query, embed_dims).\r\n key (Tensor): The key tensor with shape\r\n `(bs, num_key, embed_dims)`.\r\n value (Tensor): The value tensor with shape\r\n `(bs, num_key, embed_dims)`.\r\n identity (Tensor): The tensor used for addition, with the\r\n same shape as `query`. Default None. If None,\r\n `query` will be used.\r\n query_pos (Tensor): The positional encoding for `query`.\r\n Default: None.\r\n key_pos (Tensor): The positional encoding for `key`. Default\r\n None.\r\n reference_points (Tensor): The normalized reference\r\n points with shape (bs, num_query, num_levels, 2),\r\n all elements is range in [0, 1], top-left (0,0),\r\n bottom-right (1, 1), including padding area.\r\n or (N, Length_{query}, num_levels, 4), add\r\n additional two dimensions is (w, h) to\r\n form reference boxes.\r\n key_padding_mask (Tensor): ByteTensor for `query`, with\r\n shape [bs, num_key].\r\n spatial_shapes (Tensor): Spatial shape of features in\r\n different levels. With shape (num_levels, 2),\r\n last dimension represents (h, w).\r\n level_start_index (Tensor): The start index of each level.\r\n A tensor has shape ``(num_levels, )`` and can be represented\r\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\r\n Returns:\r\n Tensor: forwarded results with shape [num_query, bs, embed_dims].\r\n \"\"\"\r\n\r\n if value is None:\r\n value = query\r\n if identity is None:\r\n identity = query\r\n if query_pos is not None:\r\n query = query + query_pos\r\n\r\n if not self.batch_first:\r\n # change to (bs, num_query ,embed_dims)\r\n query = query.permute(1, 0, 2)\r\n value = value.permute(1, 0, 2)\r\n\r\n bs, num_query, _ = query.shape\r\n bs, num_value, _ = value.shape\r\n assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value\r\n\r\n value = self.value_proj(value)\r\n if key_padding_mask is not None:\r\n value = value.masked_fill(key_padding_mask[..., None], 0.0)\r\n value = value.view(bs, num_value, self.num_heads, -1)\r\n sampling_offsets = self.sampling_offsets(query).view(\r\n bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)\r\n attention_weights = self.attention_weights(query).view(\r\n bs, num_query, self.num_heads, self.num_levels * self.num_points)\r\n\r\n attention_weights = attention_weights.softmax(-1)\r\n\r\n attention_weights = attention_weights.view(bs, num_query,\r\n self.num_heads,\r\n self.num_levels,\r\n self.num_points)\r\n\r\n if reference_points.shape[-1] == 2:\r\n \"\"\"\r\n For each BEV query, it owns `num_Z_anchors` in 3D space that having different heights.\r\n After proejcting, each BEV query has `num_Z_anchors` reference points in each 2D image.\r\n For each referent point, we sample `num_points` sampling points.\r\n For `num_Z_anchors` reference points, it has overall `num_points * num_Z_anchors` sampling points.\r\n \"\"\"\r\n offset_normalizer = torch.stack(\r\n [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)\r\n\r\n bs, num_query, num_Z_anchors, xy = reference_points.shape\r\n reference_points = reference_points[:, :, None, None, None, :, :]\r\n sampling_offsets = sampling_offsets / \\\r\n offset_normalizer[None, None, None, :, None, :]\r\n bs, num_query, num_heads, num_levels, num_all_points, xy = sampling_offsets.shape\r\n sampling_offsets = sampling_offsets.view(\r\n bs, num_query, num_heads, num_levels, num_all_points // num_Z_anchors, num_Z_anchors, xy)\r\n sampling_locations = reference_points + sampling_offsets\r\n bs, num_query, num_heads, num_levels, num_points, num_Z_anchors, xy = sampling_locations.shape\r\n assert num_all_points == num_points * num_Z_anchors\r\n\r\n sampling_locations = sampling_locations.view(\r\n bs, num_query, num_heads, num_levels, num_all_points, xy)\r\n\r\n elif reference_points.shape[-1] == 4:\r\n assert False\r\n else:\r\n raise ValueError(\r\n f'Last dim of reference_points must be'\r\n f' 2 or 4, but get {reference_points.shape[-1]} instead.')\r\n\r\n # sampling_locations.shape: bs, num_query, num_heads, num_levels, num_all_points, 2\r\n # attention_weights.shape: bs, num_query, num_heads, num_levels, num_all_points\r\n #\r\n\r\n if torch.cuda.is_available() and value.is_cuda:\r\n if value.dtype == torch.float16:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n else:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n output = MultiScaleDeformableAttnFunction.apply(\r\n value, spatial_shapes, level_start_index, sampling_locations,\r\n attention_weights, self.im2col_step)\r\n else:\r\n output = multi_scale_deformable_attn_pytorch(\r\n value, spatial_shapes, sampling_locations, attention_weights)\r\n if not self.batch_first:\r\n output = output.permute(1, 0, 2)\r\n\r\n return output\r"
},
{
"identifier": "CustomMSDeformableAttention",
"path": "projects/bevformer/modules/decoder.py",
"snippet": "class CustomMSDeformableAttention(BaseModule):\r\n \"\"\"An attention module used in Deformable-Detr.\r\n\r\n `Deformable DETR: Deformable Transformers for End-to-End Object Detection.\r\n <https://arxiv.org/pdf/2010.04159.pdf>`_.\r\n\r\n Args:\r\n embed_dims (int): The embedding dimension of Attention.\r\n Default: 256.\r\n num_heads (int): Parallel attention heads. Default: 64.\r\n num_levels (int): The number of feature map used in\r\n Attention. Default: 4.\r\n num_points (int): The number of sampling points for\r\n each query in each head. Default: 4.\r\n im2col_step (int): The step used in image_to_column.\r\n Default: 64.\r\n dropout (float): A Dropout layer on `inp_identity`.\r\n Default: 0.1.\r\n batch_first (bool): Key, Query and Value are shape of\r\n (batch, n, embed_dim)\r\n or (n, batch, embed_dim). Default to False.\r\n norm_cfg (dict): Config dict for normalization layer.\r\n Default: None.\r\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\r\n Default: None.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n embed_dims=256,\r\n num_heads=8,\r\n num_levels=4,\r\n num_points=4,\r\n im2col_step=64,\r\n dropout=0.1,\r\n batch_first=False,\r\n norm_cfg=None,\r\n init_cfg=None):\r\n super().__init__(init_cfg)\r\n if embed_dims % num_heads != 0:\r\n raise ValueError(f'embed_dims must be divisible by num_heads, '\r\n f'but got {embed_dims} and {num_heads}')\r\n dim_per_head = embed_dims // num_heads\r\n self.norm_cfg = norm_cfg\r\n self.dropout = nn.Dropout(dropout)\r\n self.batch_first = batch_first\r\n self.fp16_enabled = False\r\n\r\n # you'd better set dim_per_head to a power of 2\r\n # which is more efficient in the CUDA implementation\r\n def _is_power_of_2(n):\r\n if (not isinstance(n, int)) or (n < 0):\r\n raise ValueError(\r\n 'invalid input for _is_power_of_2: {} (type: {})'.format(\r\n n, type(n)))\r\n return (n & (n - 1) == 0) and n != 0\r\n\r\n if not _is_power_of_2(dim_per_head):\r\n warnings.warn(\r\n \"You'd better set embed_dims in \"\r\n 'MultiScaleDeformAttention to make '\r\n 'the dimension of each attention head a power of 2 '\r\n 'which is more efficient in our CUDA implementation.')\r\n\r\n self.im2col_step = im2col_step\r\n self.embed_dims = embed_dims\r\n self.num_levels = num_levels\r\n self.num_heads = num_heads\r\n self.num_points = num_points\r\n self.sampling_offsets = nn.Linear(\r\n embed_dims, num_heads * num_levels * num_points * 2)\r\n self.attention_weights = nn.Linear(embed_dims,\r\n num_heads * num_levels * num_points)\r\n self.value_proj = nn.Linear(embed_dims, embed_dims)\r\n self.output_proj = nn.Linear(embed_dims, embed_dims)\r\n self.init_weights()\r\n\r\n def init_weights(self):\r\n \"\"\"Default initialization for Parameters of Module.\"\"\"\r\n constant_init(self.sampling_offsets, 0.)\r\n thetas = torch.arange(\r\n self.num_heads,\r\n dtype=torch.float32) * (2.0 * math.pi / self.num_heads)\r\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\r\n grid_init = (grid_init /\r\n grid_init.abs().max(-1, keepdim=True)[0]).view(\r\n self.num_heads, 1, 1,\r\n 2).repeat(1, self.num_levels, self.num_points, 1)\r\n for i in range(self.num_points):\r\n grid_init[:, :, i, :] *= i + 1\r\n\r\n self.sampling_offsets.bias.data = grid_init.view(-1)\r\n constant_init(self.attention_weights, val=0., bias=0.)\r\n xavier_init(self.value_proj, distribution='uniform', bias=0.)\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)\r\n self._is_init = True\r\n\r\n @deprecated_api_warning({'residual': 'identity'},\r\n cls_name='MultiScaleDeformableAttention')\r\n def forward(self,\r\n query,\r\n key=None,\r\n value=None,\r\n identity=None,\r\n query_pos=None,\r\n key_padding_mask=None,\r\n reference_points=None,\r\n spatial_shapes=None,\r\n level_start_index=None,\r\n flag='decoder',\r\n **kwargs):\r\n \"\"\"Forward Function of MultiScaleDeformAttention.\r\n\r\n Args:\r\n query (Tensor): Query of Transformer with shape\r\n (num_query, bs, embed_dims).\r\n key (Tensor): The key tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n value (Tensor): The value tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n identity (Tensor): The tensor used for addition, with the\r\n same shape as `query`. Default None. If None,\r\n `query` will be used.\r\n query_pos (Tensor): The positional encoding for `query`.\r\n Default: None.\r\n key_pos (Tensor): The positional encoding for `key`. Default\r\n None.\r\n reference_points (Tensor): The normalized reference\r\n points with shape (bs, num_query, num_levels, 2),\r\n all elements is range in [0, 1], top-left (0,0),\r\n bottom-right (1, 1), including padding area.\r\n or (N, Length_{query}, num_levels, 4), add\r\n additional two dimensions is (w, h) to\r\n form reference boxes.\r\n key_padding_mask (Tensor): ByteTensor for `query`, with\r\n shape [bs, num_key].\r\n spatial_shapes (Tensor): Spatial shape of features in\r\n different levels. With shape (num_levels, 2),\r\n last dimension represents (h, w).\r\n level_start_index (Tensor): The start index of each level.\r\n A tensor has shape ``(num_levels, )`` and can be represented\r\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\r\n\r\n Returns:\r\n Tensor: forwarded results with shape [num_query, bs, embed_dims].\r\n \"\"\"\r\n\r\n if value is None:\r\n value = query\r\n\r\n if identity is None:\r\n identity = query\r\n if query_pos is not None:\r\n query = query + query_pos\r\n if not self.batch_first:\r\n # change to (bs, num_query ,embed_dims)\r\n query = query.permute(1, 0, 2)\r\n value = value.permute(1, 0, 2)\r\n\r\n bs, num_query, _ = query.shape\r\n bs, num_value, _ = value.shape\r\n assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value\r\n\r\n value = self.value_proj(value)\r\n if key_padding_mask is not None:\r\n value = value.masked_fill(key_padding_mask[..., None], 0.0)\r\n value = value.view(bs, num_value, self.num_heads, -1)\r\n\r\n sampling_offsets = self.sampling_offsets(query).view(\r\n bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)\r\n attention_weights = self.attention_weights(query).view(\r\n bs, num_query, self.num_heads, self.num_levels * self.num_points)\r\n attention_weights = attention_weights.softmax(-1)\r\n\r\n attention_weights = attention_weights.view(bs, num_query,\r\n self.num_heads,\r\n self.num_levels,\r\n self.num_points)\r\n if reference_points.shape[-1] == 2:\r\n offset_normalizer = torch.stack(\r\n [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)\r\n sampling_locations = reference_points[:, :, None, :, None, :] \\\r\n + sampling_offsets \\\r\n / offset_normalizer[None, None, None, :, None, :]\r\n elif reference_points.shape[-1] == 4:\r\n sampling_locations = reference_points[:, :, None, :, None, :2] \\\r\n + sampling_offsets / self.num_points \\\r\n * reference_points[:, :, None, :, None, 2:] \\\r\n * 0.5\r\n else:\r\n raise ValueError(\r\n f'Last dim of reference_points must be'\r\n f' 2 or 4, but get {reference_points.shape[-1]} instead.')\r\n if torch.cuda.is_available() and value.is_cuda:\r\n\r\n # using fp16 deformable attention is unstable because it performs many sum operations\r\n if value.dtype == torch.float16:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n else:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n output = MultiScaleDeformableAttnFunction.apply(\r\n value, spatial_shapes, level_start_index, sampling_locations,\r\n attention_weights, self.im2col_step)\r\n else:\r\n output = multi_scale_deformable_attn_pytorch(\r\n value, spatial_shapes, sampling_locations, attention_weights)\r\n\r\n output = self.output_proj(output)\r\n\r\n if not self.batch_first:\r\n # (num_query, bs ,embed_dims)\r\n output = output.permute(1, 0, 2)\r\n\r\n return self.dropout(output) + identity\r"
}
] | import numpy as np
import torch
import torch.nn as nn
from torch.nn.init import normal_
from torchvision.transforms.functional import rotate
from mmcv.cnn import xavier_init
from mmcv.cnn.bricks.transformer import build_transformer_layer_sequence, build_positional_encoding
from mmcv.runner.base_module import BaseModule
from ...utils.builder import BEV_CONSTRUCTOR
from projects.bevformer.modules.temporal_self_attention import TemporalSelfAttention
from projects.bevformer.modules.spatial_cross_attention import MSDeformableAttention3D
from projects.bevformer.modules.decoder import CustomMSDeformableAttention | 8,438 | #---------------------------------------------------------------------------------------#
# LaneSegNet: Map Learning with Lane Segment Perception for Autonomous Driving #
# Source code: https://github.com/OpenDriveLab/LaneSegNet #
# Copyright (c) OpenDriveLab. All rights reserved. #
#---------------------------------------------------------------------------------------#
@BEV_CONSTRUCTOR.register_module()
class BEVFormerConstructer(BaseModule):
"""Implements the BEVFormer BEV Constructer.
Args:
as_two_stage (bool): Generate query from encoder features.
Default: False.
num_feature_levels (int): Number of feature maps from FPN:
Default: 4.
two_stage_num_proposals (int): Number of proposals when set
`as_two_stage` as True. Default: 300.
"""
def __init__(self,
num_feature_levels=4,
num_cams=6,
embed_dims=256,
rotate_prev_bev=True,
use_shift=True,
use_can_bus=True,
can_bus_norm=True,
use_cams_embeds=True,
pc_range=[-51.2, -51.2, -5.0, 51.2, 51.2, 3.0],
bev_h=200,
bev_w=200,
rotate_center=[100, 100],
encoder=None,
positional_encoding=None,
**kwargs):
super(BEVFormerConstructer, self).__init__(**kwargs)
self.embed_dims = embed_dims
self.num_feature_levels = num_feature_levels
self.num_cams = num_cams
self.fp16_enabled = False
self.rotate_prev_bev = rotate_prev_bev
self.use_shift = use_shift
self.use_can_bus = use_can_bus
self.can_bus_norm = can_bus_norm
self.use_cams_embeds = use_cams_embeds
self.encoder = build_transformer_layer_sequence(encoder)
self.positional_encoding = build_positional_encoding(positional_encoding)
self.pc_range = pc_range
self.real_w = self.pc_range[3] - self.pc_range[0]
self.real_h = self.pc_range[4] - self.pc_range[1]
self.bev_h = bev_h
self.bev_w = bev_w
self.rotate_center = rotate_center
self.init_layers()
def init_layers(self):
self.bev_embedding = nn.Embedding(
self.bev_h * self.bev_w, self.embed_dims)
self.level_embeds = nn.Parameter(torch.Tensor(
self.num_feature_levels, self.embed_dims))
self.cams_embeds = nn.Parameter(
torch.Tensor(self.num_cams, self.embed_dims))
self.can_bus_mlp = nn.Sequential(
nn.Linear(18, self.embed_dims // 2),
nn.ReLU(inplace=True),
nn.Linear(self.embed_dims // 2, self.embed_dims),
nn.ReLU(inplace=True),
)
if self.can_bus_norm:
self.can_bus_mlp.add_module('norm', nn.LayerNorm(self.embed_dims))
def init_weights(self):
"""Initialize the transformer weights."""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
| #---------------------------------------------------------------------------------------#
# LaneSegNet: Map Learning with Lane Segment Perception for Autonomous Driving #
# Source code: https://github.com/OpenDriveLab/LaneSegNet #
# Copyright (c) OpenDriveLab. All rights reserved. #
#---------------------------------------------------------------------------------------#
@BEV_CONSTRUCTOR.register_module()
class BEVFormerConstructer(BaseModule):
"""Implements the BEVFormer BEV Constructer.
Args:
as_two_stage (bool): Generate query from encoder features.
Default: False.
num_feature_levels (int): Number of feature maps from FPN:
Default: 4.
two_stage_num_proposals (int): Number of proposals when set
`as_two_stage` as True. Default: 300.
"""
def __init__(self,
num_feature_levels=4,
num_cams=6,
embed_dims=256,
rotate_prev_bev=True,
use_shift=True,
use_can_bus=True,
can_bus_norm=True,
use_cams_embeds=True,
pc_range=[-51.2, -51.2, -5.0, 51.2, 51.2, 3.0],
bev_h=200,
bev_w=200,
rotate_center=[100, 100],
encoder=None,
positional_encoding=None,
**kwargs):
super(BEVFormerConstructer, self).__init__(**kwargs)
self.embed_dims = embed_dims
self.num_feature_levels = num_feature_levels
self.num_cams = num_cams
self.fp16_enabled = False
self.rotate_prev_bev = rotate_prev_bev
self.use_shift = use_shift
self.use_can_bus = use_can_bus
self.can_bus_norm = can_bus_norm
self.use_cams_embeds = use_cams_embeds
self.encoder = build_transformer_layer_sequence(encoder)
self.positional_encoding = build_positional_encoding(positional_encoding)
self.pc_range = pc_range
self.real_w = self.pc_range[3] - self.pc_range[0]
self.real_h = self.pc_range[4] - self.pc_range[1]
self.bev_h = bev_h
self.bev_w = bev_w
self.rotate_center = rotate_center
self.init_layers()
def init_layers(self):
self.bev_embedding = nn.Embedding(
self.bev_h * self.bev_w, self.embed_dims)
self.level_embeds = nn.Parameter(torch.Tensor(
self.num_feature_levels, self.embed_dims))
self.cams_embeds = nn.Parameter(
torch.Tensor(self.num_cams, self.embed_dims))
self.can_bus_mlp = nn.Sequential(
nn.Linear(18, self.embed_dims // 2),
nn.ReLU(inplace=True),
nn.Linear(self.embed_dims // 2, self.embed_dims),
nn.ReLU(inplace=True),
)
if self.can_bus_norm:
self.can_bus_mlp.add_module('norm', nn.LayerNorm(self.embed_dims))
def init_weights(self):
"""Initialize the transformer weights."""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules(): | if isinstance(m, MSDeformableAttention3D) or isinstance(m, TemporalSelfAttention) \ | 2 | 2023-12-06 07:13:48+00:00 | 12k |
RobertCsordas/moe_attention | layers/transformer/moe_attention_relative_transformer.py | [
{
"identifier": "AttentionMask",
"path": "layers/transformer/multi_head_relative_pos_attention.py",
"snippet": "def shift(posmat: torch.Tensor) -> torch.Tensor:\n def __init__(self, state_size: int, n_heads: int, dropout: float, projection_size: Optional[int] = None):\n def get_attention_scores(self, mask: Optional[torch.Tensor],\n q_content: torch.Tensor, k_content: torch.Tensor,\n q_pos: torch.Tensor, k_pos: torch.Tensor,\n pos_offset: int, ar_gate: Optional[torch.Tensor] = None) -> torch.Tensor:\n def _attention(self, mask: Optional[torch.Tensor],\n q_content: torch.Tensor, k_content: torch.Tensor,\n q_pos: torch.Tensor, k_pos: torch.Tensor,\n v: torch.Tensor, pos_offset: int,\n ar_gate: Optional[torch.Tensor] = None) -> [torch.Tensor, torch.Tensor]:\n def _get_pos_subset(self, pos_encoding: torch.Tensor, length: int, offset: int) -> torch.Tensor:\n def plot(self, options: Dict[str, Any]) -> Dict[str, Any]:\n def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, input_size: Optional[int] = None,\n projection_size: Optional[int] = None, pos_clamp: Optional[int] = None,\n test_pos_clamp: Optional[int] = None):\n def _create_buffer(self, max_len: int, clamp: Optional[int] = None):\n def get_pos(self, l: int, offset: int) -> torch.Tensor:\n def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, global_pos_bias: bool = True,\n global_content_bias: bool = True, input_size: Optional[int] = None, absolute_gate: bool = False,\n projection_size: Optional[int] = None, output_size: Optional[int] = None, pos_clamp: Optional[int] = None,\n test_pos_clamp: Optional[int] = None):\n def add_head_specific_bias(self, data: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor:\n def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],\n pos_offset: Optional[int] = None, need_weights: bool = False):\n def reset_parameters(self):\nclass RelativeAttentionBase(MultiHeadAttentionBase):\nclass FixedRelativeMultiheadAttentionBase(RelativeAttentionBase):\nclass FixedRelativeMultiheadAttention(AttentionMergeMixin, FixedRelativeMultiheadAttentionBase):"
},
{
"identifier": "ActivationFunction",
"path": "layers/transformer/transformer.py",
"snippet": "class TransformerEncoderLayer(torch.nn.Module):\nclass TransformerDecoderLayer(torch.nn.Module):\nclass TransformerDecoderBase(torch.nn.Module):\n class State:\nclass TransformerEncoder(torch.nn.Module):\nclass TransformerDecoder(TransformerDecoderBase):\nclass TransformerBase(torch.nn.Module):\nclass Transformer(TransformerBase):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0):\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None) -> torch.Tensor:\n def reset_parameters(self):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0):\n def forward(self, tgt: torch.Tensor, memory: torch.Tensor, tgt_mask: Optional[AttentionMask] = None,\n memory_key_padding_mask: Optional[torch.Tensor] = None,\n full_target: Optional[torch.Tensor] = None, pos_offset: int = 0) -> torch.Tensor:\n def reset_parameters(self):\n def __init__(self, d_model: int):\n def create_state(self, batch_size: int, max_length: int, device: torch.device) -> State:\n def one_step_forward(self, state: State, data: torch.Tensor, *args, **kwargs):\n def __init__(self, layer, n_layers: int, d_model, *args, **kwargs):\n def forward(self, data: torch.Tensor, *args, **kwargs):\n def __init__(self, layer, n_layers: int, d_model: int, *args, **kwargs):\n def forward(self, data: torch.Tensor, *args, **kwargs):\ndef TransformerEncoderWithLayer(layer: Type[torch.nn.Module] = TransformerEncoderLayer):\ndef TransformerDecoderWithLayer(layer: Type[torch.nn.Module] = TransformerDecoderLayer):\n def __init__(self, encoder: torch.nn.Module, decoder: torch.nn.Module):\n def forward(self, src: torch.Tensor, tgt: torch.Tensor, tgt_mask: Optional[torch.Tensor] = None,\n src_mask: Optional[AttentionMask] = None):\n def generate_square_subsequent_mask(sz: int, device: torch.device) -> torch.Tensor:\n def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6, num_decoder_layers: int = 6,\n dim_feedforward: int = 2048, dropout: float = 0.1, activation: ActivationFunction = F.relu,\n encoder_layer=TransformerEncoderWithLayer(), decoder_layer=TransformerDecoderWithLayer(),\n attention_dropout: float = 0):"
},
{
"identifier": "reset_prenorm_params",
"path": "layers/transformer/transformer_preln.py",
"snippet": "def reset_prenorm_params(m: torch.nn.Module, n_layers: int):\n for layer in m.modules():\n if isinstance(layer, torch.nn.Linear):\n torch.nn.init.trunc_normal_(layer.weight)\n with torch.no_grad():\n layer.weight.mul_(math.sqrt(2 / (n_layers * layer.weight.shape[1])) / layer.weight.std())\n if layer.bias is not None:\n torch.nn.init.zeros_(layer.bias)\n elif isinstance(layer, torch.nn.LayerNorm):\n torch.nn.init.ones_(layer.weight)\n torch.nn.init.zeros_(layer.bias)"
},
{
"identifier": "FullMoeRelativeAttention",
"path": "layers/transformer/full_moe_relative_attention.py",
"snippet": "class FullMoeRelativeAttention(FullMoeRelativeAttentionCore):\n def __init__(self, state_size: int, n_heads: int, n_experts: int, dropout: float = 0.0, input_size: Optional[int] = None,\n projection_size: Optional[int] = None, output_size: Optional[int] = None, init_std_scale: float = 1.0,\n perplexity_reg: float = 0, share_pk: bool = True, expert_dropout: float = 0.0,\n selection_mode: str = \"sigmoid\", moe_k: int = 2, q_expert: bool = True,\n k_expert: bool = True, v_expert: bool = True, o_expert: bool = True, norm_qk_score: bool = False,\n v_projection_size: Optional[int] = None, same_sel: bool = False,\n qside_n_experts: Optional[int] = None, shared_experts: bool = False,\n kq_n_experts: Optional[int] = None, separate_kq_sel: bool = False,\n normalize_init: bool = False, normalize_retrieval: bool = False):\n\n super().__init__(\n state_size, n_heads, n_experts, dropout, input_size, projection_size, output_size, init_std_scale,\n perplexity_reg, share_pk, expert_dropout, selection_mode, moe_k, q_expert, k_expert, v_expert,\n o_expert, norm_qk_score, v_projection_size, same_sel, qside_n_experts, shared_experts,\n kq_n_experts, separate_kq_sel, normalize_init, normalize_retrieval=normalize_retrieval)\n\n self.pe_size = state_size\n std_pe = init_std_scale * math.sqrt(1 / self.pe_size)\n self.pos_to_pk = torch.nn.Parameter(torch.randn(self.get_n_copies(\"k\") * self.projection_size, self.pe_size) * std_pe)\n\n self.register_buffer(\"pos_encoding\", self.create_pos_buffer(1000), persistent=False)\n\n def shift(self, posmat: torch.Tensor) -> torch.Tensor:\n # shape: [..., n_out, n_in * 2 - 1]\n # return: [..., n_out, n_in]\n\n n_in = (posmat.shape[-1] + 1) // 2\n n_neg = n_in - 1\n n_out = posmat.shape[-2]\n\n assert posmat.shape[-1] == n_in+n_neg\n\n # example:\n #p0[-3], p0[-2], p0[-1], | p0[0], p0[1], p0[2], p0[3] |\n #p1[-3], p1[-2], | p1[-1], p1[0], p1[1], p1[2],| p1[3]\n #p2[-3], |p2[-2], p2[-1], p2[0], p2[1],| p2[2], p2[3]\n #|p3[-3], p3[-2], p3[-1], p3[0],| p3[1], p3[2], p3[3]\n\n posmat = posmat.flatten(-2)\n posmat = posmat.narrow(-1, 1, n_out * (n_in + n_neg - 1))\n\n # example:\n #p0[-2], p0[-1], | p0[0], p0[1], p0[2], p0[3] |,\n #p1[-3], p1[-2] | p1[-1], p1[0], p1[1], p1[2] |,\n #p1[3], p2[-3], | p2[-2], p2[-1], p2[0], p2[1]|,\n #p2[2], p2[3] , |p3[-3], p3[-2], p3[-1], p3[0],|\n\n posmat = posmat.view(*posmat.shape[:-1], n_out, n_in + n_neg - 1)\n return posmat[..., n_neg-1 : ]\n\n def create_pos_buffer(self, max_len: int):\n res = framework.layers.sinusoidal_pos_embedding(self.pe_size, 2 * max_len - 1, -max_len + 1,\n device=self.pos_to_pk.device)\n\n assert res.shape[0] == 2 * max_len - 1\n return res\n\n def get_pos_subset(self, length: int, offset: int) -> torch.Tensor:\n total_len = length + offset\n if (2 * total_len - 1) > self.pos_encoding.shape[0]:\n self.pos_encoding = self.create_pos_buffer(total_len).to(self.pos_encoding.device).type_as(self.pos_encoding)\n\n return self.pos_encoding.narrow(0, self.pos_encoding.shape[0] // 2 - length + 1 - offset, 2 * length - 1)\n\n def attend(self, curr_state: torch.Tensor, attend_to: torch.Tensor, pos_offset: int, v: torch.Tensor,\n k: torch.Tensor, q: torch.Tensor, mask: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n\n scale = self.scale.sqrt()\n\n k_pos = self.get_pos_subset(attend_to.shape[-2], pos_offset) * scale\n k_pos = F.linear(k_pos, self.pos_to_pk)\n k_pos = self.project_to_torch_order(k_pos)\n\n k_pos = self.dropout(k_pos)\n\n qc = qp = q\n\n att = self.shift(qp @ k_pos.transpose(-2, -1)) + qc @ k.transpose(-2, -1)\n return self.attention_proj(att, v, mask)"
},
{
"identifier": "FullMoeRopeAttention",
"path": "layers/transformer/full_moe_relative_attention.py",
"snippet": "class FullMoeRopeAttention(FullMoeRelativeAttentionCore):\n def __init__(self, state_size: int, n_heads: int, n_experts: int, dropout: float = 0.0, input_size: Optional[int] = None,\n projection_size: Optional[int] = None, output_size: Optional[int] = None, init_std_scale: float = 1.0,\n perplexity_reg: float = 0, share_pk: bool = True, expert_dropout: float = 0.0,\n selection_mode: str = \"sigmoid\", moe_k: int = 2, q_expert: bool = True,\n k_expert: bool = True, v_expert: bool = True, o_expert: bool = True, norm_qk_score: bool = False,\n v_projection_size: Optional[int] = None, same_sel: bool = False,\n qside_n_experts: Optional[int] = None, shared_experts: bool = False,\n kq_n_experts: Optional[int] = None, separate_kq_sel: bool = False,\n rotate_fraction: float = 0.5, rope_base: float = 10000, normalize_init: bool = False, normalize_retrieval: bool = False):\n\n super().__init__(\n state_size, n_heads, n_experts, dropout, input_size, projection_size, output_size, init_std_scale,\n perplexity_reg, share_pk, expert_dropout, selection_mode, moe_k, q_expert, k_expert, v_expert,\n o_expert, norm_qk_score, v_projection_size, same_sel, qside_n_experts, shared_experts,\n kq_n_experts, separate_kq_sel, normalize_init, normalize_retrieval=normalize_retrieval)\n\n self.n_rotate = int(rotate_fraction * self.projection_size)\n\n if self.n_rotate > 0:\n self.pe = RotaryPosEncoding(self.n_rotate, seq_dim=-2, base=rope_base)\n\n def rotate(self, q: torch.Tensor, k: torch.Tensor, offset: int) -> Tuple[torch.Tensor, torch.Tensor]:\n if self.n_rotate < self.projection_size:\n r_k = k[..., :self.n_rotate]\n nr_k = k[..., self.n_rotate:]\n r_q = q[..., :self.n_rotate]\n nr_q = q[..., self.n_rotate:]\n\n r_q, r_k = self.pe(r_q, r_k, offset)\n return torch.cat([r_q, nr_q], dim=-1), torch.cat([r_k, nr_k], dim=-1)\n else:\n return self.pe(q, k, offset)\n\n def attend(self, curr_state: torch.Tensor, attend_to: torch.Tensor, pos_offset: int, v: torch.Tensor,\n k: torch.Tensor, q: torch.Tensor, mask: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n\n if self.n_rotate > 0:\n q, k = self.rotate(q, k, pos_offset or 0)\n\n att = q @ k.transpose(-2, -1)\n return self.attention_proj(att, v, mask)"
},
{
"identifier": "MoA",
"path": "layers/transformer/moa.py",
"snippet": "class MoA(LoggingLayer, RegularizedLayer, OncePerIterLayer, torch.nn.Module):\n def __init__(self, state_size: int, n_heads: int, n_experts: int, dropout: float = 0.0, input_size: Optional[int] = None,\n projection_size: Optional[int] = None, output_size: Optional[int] = None, init_std_scale: float = 1.0,\n perplexity_reg: float = 0, share_pk: bool = True, expert_dropout: float = 0.0,\n selection_mode: str = \"sigmoid\", mode: str = \"my\",\n cvloss: float = 0.0, switchloss: float = 0.0, zloss: float = 0.0):\n\n super().__init__()\n\n self.input_size = input_size or state_size\n self.output_size = output_size or state_size\n self.pe_size = self.input_size\n self.n_experts = n_experts\n self.perplexity_reg = perplexity_reg\n self.sel_hist_dst = []\n self.share_pk = share_pk\n self.expert_dropout = expert_dropout\n self.selection_mode = selection_mode\n self.iter = 0\n self.sel_counts_dst_100 = 0\n self.mode = mode\n\n self.cvloss = cvloss\n self.switchloss = switchloss\n self.zloss = zloss\n\n if self.mode not in {\"my\", \"moa\"}:\n raise ValueError(\"Unknown mode: \" + self.mode)\n\n self.n_heads = n_heads\n self.dropout = torch.nn.Dropout(dropout) if dropout > 0 else lambda x: x\n self.projection_size = projection_size or (state_size // n_heads)\n\n\n std_in = init_std_scale * math.sqrt(1 / self.input_size)\n std_out = init_std_scale * math.sqrt(1 / (n_heads * self.projection_size))\n std_pos = init_std_scale * math.sqrt(1 / self.pe_size)\n self.data_to_q = torch.nn.Parameter(torch.randn(n_experts, self.input_size, self.projection_size) * std_in)\n self.data_to_kv = torch.nn.Parameter(torch.randn(self.input_size, self.projection_size*2) * std_in)\n self.out_proj = torch.nn.Parameter(torch.randn(n_experts, self.projection_size, self.output_size) * std_out)\n self.pos_to_pk = torch.nn.Parameter(torch.randn(self.projection_size, self.pe_size) * std_pos)\n\n self.sel_dst = torch.nn.Parameter(torch.randn(n_experts, self.input_size) * std_in)\n\n self.renorm_rows(self.sel_dst)\n\n self.scale = torch.nn.Parameter(torch.full([1], 1.0 / math.sqrt(self.projection_size)))\n\n self.register_buffer(\"pos_encoding\", self.create_pos_buffer(1000), persistent=False)\n\n def cv_squared(self, x):\n eps = 1e-10\n\n if x.shape[0] == 1:\n return 0\n return x.float().var() / (x.float().mean()**2 + eps)\n\n def compute_cvloss(self, probs):\n return self.cv_squared(F.normalize(probs.sum(0), p=1, dim=0))\n\n def compute_switchloss(self, probs, freqs):\n loss = F.normalize(probs.sum(0), p=1, dim=0) * \\\n F.normalize(freqs.float(), p=1, dim=0)\n return loss.sum() * self.n_experts\n\n def compute_zloss(self, logits):\n zloss = torch.mean(torch.log(torch.exp(logits).sum(dim=1)) ** 2)\n return zloss\n\n def renorm_rows(self, x: torch.Tensor):\n with torch.no_grad():\n std_t = x.std(dim=-1, keepdim=True)\n x.div_(x.norm(dim=-1, keepdim=True))\n x.mul_(std_t / x.std())\n\n def create_pos_buffer(self, max_len: int):\n res = framework.layers.sinusoidal_pos_embedding(self.pe_size, 2 * max_len - 1, -max_len + 1,\n device=self.pos_to_pk.device)\n\n assert res.shape[0] == 2 * max_len - 1\n return res\n\n def get_pos_subset(self, length: int, offset: int) -> torch.Tensor:\n total_len = length + offset\n if (2 * total_len - 1) > self.pos_encoding.shape[0]:\n self.pos_encoding = self.create_pos_buffer(total_len).to(self.pos_encoding.device).type_as(self.pos_encoding)\n\n return self.pos_encoding.narrow(0, self.pos_encoding.shape[0] // 2 - length + 1 - offset, 2 * length - 1)\n\n def project_to_torch_order(self, x: torch.Tensor, bias: Optional[torch.Tensor] = None):\n return x.view(*x.shape[:-1], -1, self.projection_size).transpose(-2, -3)\n\n def get_mask_tensor(self, src_len: int, mask: Optional[AttentionMask]) -> Optional[torch.Tensor]:\n if mask is None or (mask.position_mask is None and mask.src_length_mask is None):\n return None\n\n # mask.position_mask: [..., N_out, N_in]\n # mask.src_length_mask: [B, ...., N_in]\n # True where it has to be masked\n\n if mask.position_mask is not None:\n n_pad = src_len - mask.position_mask.shape[-1]\n if n_pad > 0:\n pm = F.pad(mask.position_mask, (n_pad, 0), 'constant', value=False)\n else:\n pm = mask.position_mask\n\n if mask.position_mask is None:\n m = mask.src_length_mask.unsqueeze(-2)\n elif mask.src_length_mask is None:\n m = pm\n else:\n m = mask.src_length_mask.unsqueeze(-2) | pm\n\n return m\n\n def shift(self, posmat: torch.Tensor) -> torch.Tensor:\n # shape: [..., n_out, n_in * 2 - 1]\n # return: [..., n_out, n_in]\n\n n_in = (posmat.shape[-1] + 1) // 2\n n_neg = n_in - 1\n n_out = posmat.shape[-2]\n\n assert posmat.shape[-1] == n_in+n_neg\n\n # example:\n #p0[-3], p0[-2], p0[-1], | p0[0], p0[1], p0[2], p0[3] |\n #p1[-3], p1[-2], | p1[-1], p1[0], p1[1], p1[2],| p1[3]\n #p2[-3], |p2[-2], p2[-1], p2[0], p2[1],| p2[2], p2[3]\n #|p3[-3], p3[-2], p3[-1], p3[0],| p3[1], p3[2], p3[3]\n\n posmat = posmat.flatten(-2)\n posmat = posmat.narrow(-1, 1, n_out * (n_in + n_neg - 1))\n\n # example:\n #p0[-2], p0[-1], | p0[0], p0[1], p0[2], p0[3] |,\n #p1[-3], p1[-2] | p1[-1], p1[0], p1[1], p1[2] |,\n #p1[3], p2[-3], | p2[-2], p2[-1], p2[0], p2[1]|,\n #p2[2], p2[3] , |p3[-3], p3[-2], p3[-1], p3[0],|\n\n posmat = posmat.view(*posmat.shape[:-1], n_out, n_in + n_neg - 1)\n return posmat[..., n_neg-1 : ]\n\n def train(self, mode: bool = True):\n self.sel_hist_dst = []\n return super().train(mode)\n\n def get_loss_on_hist(self, l: List[torch.Tensor]) -> torch.Tensor:\n assert l[0].ndim == 3\n l = [t.flatten(end_dim=-2) for t in l]\n sel = torch.cat(l, -2)\n sel_d = F.log_softmax(sel, dim=-1)\n sel_d = framework.utils.distributed_ops.log_mean(sel_d, -2)\n return self.perplexity_reg * ( - utils.entropy_l(sel_d).mean())\n\n def get_reg_loss(self) -> Dict[str, torch.Tensor]:\n l = super().get_reg_loss()\n if self.sel_hist_dst:\n l[\"moe_att_entropy_dst\"] = self.get_loss_on_hist(self.sel_hist_dst) if self.mode == \"my\" else sum(self.sel_hist_dst)\n self.sel_hist_dst = []\n\n return l\n\n def gate_topk(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n with torch.no_grad():\n if self.expert_dropout > 0 and self.training:\n mask = torch.rand_like(x) < self.expert_dropout\n x2 = x.masked_fill(mask, float('-inf'))\n else:\n x2 = x\n _, sel_index = x2.topk(self.n_heads, dim=-1, sorted=False)\n\n y = torch.gather(x, -1, sel_index)\n return y, sel_index\n\n def get_sel_my(self, t: torch.Tensor, w: torch.Tensor):\n sel = F.linear(t, w)\n sel_val, sel_index = self.gate_topk(sel)\n\n if self.selection_mode == \"softmax\":\n sel_val = sel_val.softmax(-1)\n elif self.selection_mode == \"sigmoid\":\n sel_val = sel_val.sigmoid()\n else:\n raise ValueError(\"Unknown selection mode: \" + self.selection_mode)\n\n if self.training and self.perplexity_reg > 0:\n self.sel_hist_dst.append(sel)\n\n sel_index_pp = [cvmm_prepare_sel(sel_index[..., h].int(), self.n_experts) for h in range(self.n_heads)]\n return sel_val, sel_index, sel_index_pp\n\n def get_sel_moa(self, t: torch.Tensor, w: torch.Tensor):\n logits = F.linear(t, w)\n probs = logits.softmax(-1)\n top_k_gates, sel_index = self.gate_topk(probs)\n\n if self.training:\n if self.cvloss > 0 or self.switchloss > 0:\n zeros = torch.zeros_like(logits, requires_grad=True, dtype=top_k_gates.dtype)\n gates = zeros.scatter(1, sel_index, top_k_gates)\n gates = gates.flatten(end_dim=-2)\n counts = (gates > 0).float().sum(0)\n\n loss = 0\n if self.cvloss > 0:\n loss += self.cvloss * self.compute_cvloss(gates)\n if self.switchloss > 0:\n loss += self.switchloss * self.compute_switchloss(probs.flatten(end_dim=-2), counts)\n if self.zloss > 0:\n loss += self.zloss * self.compute_zloss(logits.flatten(end_dim=-2))\n\n self.sel_hist_dst.append(loss)\n\n sel_index_pp = [cvmm_prepare_sel(sel_index[..., h].int(), self.n_experts) for h in range(self.n_heads)]\n return top_k_gates, sel_index, sel_index_pp\n\n\n def get_sel(self, t: torch.Tensor, w: torch.Tensor):\n if self.mode == \"my\":\n return self.get_sel_my(t, w)\n elif self.mode == \"moa\":\n return self.get_sel_moa(t, w)\n else:\n raise ValueError(\"Unknown mode: \" + self.mode)\n\n def before_loss(self):\n self.iter += 1\n if self.iter % 100 == 0:\n sorted_counts = self.sel_counts_dst_100.sort(descending=True).values\n self.log(\"sel_counts/dst\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n self.sel_counts_dst_100 = 0\n\n def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],\n pos_offset: Optional[int] = None, need_weights: bool = False):\n # curr_state: [batch_size, out_len, c]\n # attend_to: [batch_size, in_len, c]\n\n if pos_offset is None:\n assert curr_state.shape[1] == attend_to.shape[1], \"If attend_to has different shape than curr_state, pos_offset should be provided\"\n pos_offset = 0\n\n dst_sel_val, raw_dst_sel_index, dst_sel_index = self.get_sel(curr_state, self.sel_dst)\n\n if self.training and self.iter % 10 == 0:\n self.sel_counts_dst_100 += F.one_hot(raw_dst_sel_index.flatten(), self.n_experts).sum(0)\n\n scale = self.scale.sqrt()\n\n pemb = self.get_pos_subset(attend_to.shape[-2], pos_offset)\n k_pos = F.linear(pemb, self.pos_to_pk) * scale\n\n k, v = (attend_to @ self.data_to_kv).split(self.projection_size, dim=-1)\n k = k * scale\n\n total_res = []\n for ah in range(self.n_heads):\n q = cvmm(curr_state, dst_sel_index[ah], self.data_to_q) * scale\n\n qc = qp = q\n\n kd = self.dropout(k)\n\n att = self.shift(qp @ self.dropout(k_pos).transpose(-2,-1)) + qc @ kd.transpose(-2, -1)\n att.masked_fill_(self.get_mask_tensor(attend_to.shape[-2], mask), float('-inf'))\n att = F.softmax(att, dim=-1)\n res = att @ v\n\n total_res.append(cvmm(res, dst_sel_index[ah], self.out_proj) * dst_sel_val[..., ah : ah + 1])\n\n return sum(total_res)"
}
] | from typing import Optional
from .multi_head_relative_pos_attention import AttentionMask
from .transformer import ActivationFunction
from .transformer_preln import reset_prenorm_params
from .full_moe_relative_attention import FullMoeRelativeAttention, FullMoeRopeAttention
from .moa import MoA
import torch
import torch.nn
import torch.nn.functional as F
import math | 7,348 |
class MoeAttentionRelativeTransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, moe_att_n_experts, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
attention_dropout=0, drop_expand: bool = True,
head_projection_size: Optional[int] = None, preln: bool = False, n_layers: Optional[int] = None,
att_perplexity_reg: float = 0.0, expert_dropout: float = 0.0, att_selection_mode="sigmoid",
attention_variant="moa", q_expert: bool = True, k_expert: bool = True, v_expert: bool = True,
o_expert: bool = True, moe_k: int = 2,
norm_qk_score: bool = False, v_projection_size: Optional[int] = None, same_sel: bool = False,
qside_n_experts: Optional[int] = None, shared_experts: bool = False,
kq_n_experts: Optional[int] = None, separate_kq_sel: bool = False,
cvloss: float = 0.0, switchloss: float = 0.0, zloss: float = 0.0,
moa_mode: str = "my", rotate_fraction: float = 0.5, rope_base: float = 10000,
moeatt_norm_init: bool = False):
super().__init__()
self.is_preln = preln
if attention_variant not in {"full", "full_rope"} and (not q_expert):
raise ValueError("q_expert can be disabled only when using qside attention")
if attention_variant == "moa":
|
class MoeAttentionRelativeTransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, moe_att_n_experts, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
attention_dropout=0, drop_expand: bool = True,
head_projection_size: Optional[int] = None, preln: bool = False, n_layers: Optional[int] = None,
att_perplexity_reg: float = 0.0, expert_dropout: float = 0.0, att_selection_mode="sigmoid",
attention_variant="moa", q_expert: bool = True, k_expert: bool = True, v_expert: bool = True,
o_expert: bool = True, moe_k: int = 2,
norm_qk_score: bool = False, v_projection_size: Optional[int] = None, same_sel: bool = False,
qside_n_experts: Optional[int] = None, shared_experts: bool = False,
kq_n_experts: Optional[int] = None, separate_kq_sel: bool = False,
cvloss: float = 0.0, switchloss: float = 0.0, zloss: float = 0.0,
moa_mode: str = "my", rotate_fraction: float = 0.5, rope_base: float = 10000,
moeatt_norm_init: bool = False):
super().__init__()
self.is_preln = preln
if attention_variant not in {"full", "full_rope"} and (not q_expert):
raise ValueError("q_expert can be disabled only when using qside attention")
if attention_variant == "moa": | self.self_attn = MoA( | 5 | 2023-12-13 08:45:02+00:00 | 12k |
Q-Future/Q-Align | q_align/model/convert_mplug_owl2_weight_to_hf.py | [
{
"identifier": "MPLUGOwl2Config",
"path": "q_align/model/configuration_mplug_owl2.py",
"snippet": "class MPLUGOwl2Config(LlamaConfig):\n model_type = \"mplug_owl2\"\n def __init__(self, visual_config=None, **kwargs):\n if visual_config is None:\n self.visual_config = DEFAULT_VISUAL_CONFIG\n else:\n self.visual_config = visual_config\n \n super().__init__(\n **kwargs,\n )"
},
{
"identifier": "MplugOwlVisionConfig",
"path": "q_align/model/configuration_mplug_owl2.py",
"snippet": "class MplugOwlVisionConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`MplugOwlVisionModel`]. It is used to instantiate\n a\n mPLUG-Owl vision encoder according to the specified arguments, defining the model architecture. Instantiating a\n configuration defaults will yield a similar configuration to that of the mPLUG-Owl\n [x-plug/x_plug-llama-7b](https://huggingface.co/x-plug/x_plug-llama-7b) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n image_size (`int`, *optional*, defaults to 224):\n The size (resolution) of each image.\n patch_size (`int`, *optional*, defaults to 32):\n The size (resolution) of each patch.\n hidden_act (`str` or `function`, *optional*, defaults to `\"quick_gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` ``\"quick_gelu\"` are supported.\n layer_norm_eps (`float`, *optional*, defaults to 1e-5):\n The epsilon used by the layer normalization layers.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_factor (`float`, *optional*, defaults to 1):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n\n\n ```\"\"\"\n\n model_type = \"mplug_owl_vision_model\"\n\n def __init__(\n self,\n hidden_size=1024,\n intermediate_size=4096,\n projection_dim=768,\n num_hidden_layers=24,\n num_attention_heads=16,\n num_channels=3,\n image_size=448,\n patch_size=14,\n hidden_act=\"quick_gelu\",\n layer_norm_eps=1e-6,\n attention_dropout=0.0,\n initializer_range=0.02,\n initializer_factor=1.0,\n use_flash_attn=False,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.projection_dim = projection_dim\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.num_channels = num_channels\n self.patch_size = patch_size\n self.image_size = image_size\n self.initializer_range = initializer_range\n self.initializer_factor = initializer_factor\n self.attention_dropout = attention_dropout\n self.layer_norm_eps = layer_norm_eps\n self.hidden_act = hidden_act\n self.use_flash_attn = use_flash_attn\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the vision config dict if we are loading from MplugOwlConfig\n if config_dict.get(\"model_type\") == \"mplug-owl\":\n config_dict = config_dict[\"vision_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)"
},
{
"identifier": "MplugOwlVisualAbstractorConfig",
"path": "q_align/model/configuration_mplug_owl2.py",
"snippet": "class MplugOwlVisualAbstractorConfig(PretrainedConfig):\n model_type = \"mplug_owl_visual_abstract\"\n\n def __init__(\n self,\n num_learnable_queries=64,\n hidden_size=1024,\n num_hidden_layers=6,\n num_attention_heads=16,\n intermediate_size=2816,\n attention_probs_dropout_prob=0.,\n initializer_range=0.02,\n layer_norm_eps=1e-6,\n encoder_hidden_size=1024,\n grid_size=None,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.num_learnable_queries = num_learnable_queries\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.encoder_hidden_size = encoder_hidden_size\n self.grid_size = grid_size if grid_size else 32\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the visual_abstractor config dict if we are loading from MplugOwlConfig\n if config_dict.get(\"model_type\") == \"mplug-owl\":\n config_dict = config_dict[\"abstractor_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)"
},
{
"identifier": "MPLUGOwl2LlamaForCausalLM",
"path": "q_align/model/modeling_mplug_owl2.py",
"snippet": "class MPLUGOwl2LlamaForCausalLM(LlamaForCausalLM, MPLUGOwl2MetaForCausalLM):\n config_class = MPLUGOwl2Config\n\n def __init__(self, config):\n super(LlamaForCausalLM, self).__init__(config)\n self.model = MPLUGOwl2LlamaModel(config)\n \n self.tokenizer = AutoTokenizer.from_pretrained(\"q-future/one-align\")\n self.image_processor = CLIPImageProcessor.from_pretrained(\"q-future/one-align\")\n\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n self.preferential_ids_ = [id_[1] for id_ in self.tokenizer([\"excellent\",\"good\",\"fair\",\"poor\",\"bad\"])[\"input_ids\"]]\n\n # Initialize weights and apply final processing\n self.post_init()\n \n\n def get_model(self):\n return self.model\n \n def score(self, images, \n task_: str = \"quality\",\n input_: str = \"image\",\n ):\n if not hasattr(self, \"weight_tensor\"):\n self.weight_tensor = torch.Tensor([5.,4.,3.,2.,1.]).half().to(self.device)\n prompt = \"USER: How would you rate the {} of this {}?\\n<|image|>\\nASSISTANT: The {} of the {} is\".format(task_, input_, input_, task_)\n if input_ == \"image\":\n images = [expand2square(img, tuple(int(x*255) for x in self.image_processor.image_mean)) for img in images]\n input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)\n with torch.inference_mode():\n image_tensor = self.image_processor.preprocess(images, return_tensors=\"pt\")[\"pixel_values\"].half().to(self.device)\n output_logits = self(input_ids.repeat(image_tensor.shape[0], 1),\n images=image_tensor)[\"logits\"][:,-1, self.preferential_ids_]\n return torch.softmax(output_logits, -1) @ self.weight_tensor\n else:\n video = [[expand2square(frame, tuple(int(x*255) for x in self.image_processor.image_mean)) for frame in vid] for vid in images]\n input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)\n with torch.inference_mode():\n video_tensors = [self.image_processor.preprocess(vid, return_tensors=\"pt\")[\"pixel_values\"].half().to(self.model.device) for vid in video]\n output_logits = self(input_ids.repeat(len(video_tensors), 1),\n images=video_tensors)[\"logits\"][:,-1, self.preferential_ids_]\n return torch.softmax(output_logits, -1) @ self.weight_tensor\n \n def forward(\n self,\n input_ids: torch.LongTensor = None,\n # modality_indicators: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n images: Optional[torch.FloatTensor] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n input_ids, modality_indicators, attention_mask, past_key_values, inputs_embeds, labels = \\\n self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images)\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n modality_indicators=modality_indicators,\n attention_mask=attention_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict\n )\n\n hidden_states = outputs[0]\n logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model/pipeline parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n \"images\": kwargs.get(\"images\", None),\n }\n )\n return model_inputs"
}
] | import argparse
import gc
import json
import math
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
from .configuration_mplug_owl2 import MPLUGOwl2Config, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig
from .modeling_mplug_owl2 import MPLUGOwl2LlamaForCausalLM
from transformers import LlamaTokenizerFast | 8,310 | # dim=0,
# ).reshape(n_hidden, n_hidden)
# )
# state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat(
# [
# wv.view(n_heads_per_shard, hidden_per_head, n_hidden)
# for wv in range(wvs)
# ],
# dim=0,
# ).reshape(n_hidden, n_hidden)
# state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat(
# [loaded[i]['encoder'][f"layers.{layer_i}.self_attention.o_proj.weight"] for i in range(num_shards)], dim=1
# )
# state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat(
# [loaded[i]['encoder'][f"layers.{layer_i}.mlp.gate_proj.weight"] for i in range(num_shards)], dim=0
# )
# state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
# [loaded[i]['encoder'][f"layers.{layer_i}.mlp.down_proj.weight"] for i in range(num_shards)], dim=1
# )
# state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat(
# [loaded[i]['encoder'][f"layers.{layer_i}.mlp.up_proj.weight"] for i in range(num_shards)], dim=0
# )
state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
print(f'Sharded file saved to {filename}')
filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if num_shards==1:
# Unsharded
state_dict = {
"model.embed_tokens.weight": loaded['embedding']['word_embeddings']['weight'],
"model.norm.weight": loaded['encoder']['norm.weight'],
"lm_head.weight": loaded['encoder']['lm_head.weight'],
}
else:
state_dict = {
"model.embed_tokens.weight": loaded[0]['embedding']['word_embeddings']['weight'],
"model.norm.weight": loaded[0]['encoder']['norm.weight'],
"lm_head.weight": loaded[0]['encoder']['lm_head.weight'],
}
loaded_all = torch.load(original_filename, map_location="cpu")['model']
# Vision Part
state_dict.update({
"model.vision_model.embeddings.cls_token": loaded_all['vision_model']['cls_token'],
"model.vision_model.embeddings.patch_embed.weight": loaded_all['vision_model']['patch_embed']['weight'],
"model.vision_model.embeddings.position_embedding": loaded_all['vision_model']['position_embeddings'],
"model.vision_model.embeddings.pre_layernorm.bias": loaded_all['vision_model']['pre_layernorm']['bias'],
"model.vision_model.embeddings.pre_layernorm.weight": loaded_all['vision_model']['pre_layernorm']['weight'],
"model.vision_model.post_layernorm.bias": loaded_all['vision_model']['transformer']['final_layernorm.bias'],
"model.vision_model.post_layernorm.weight": loaded_all['vision_model']['transformer']['final_layernorm.weight'],
})
for v_layer_idx in range(24):
state_dict.update({
f"model.vision_model.encoder.layers.{v_layer_idx}.input_layernorm.bias": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.input_layernorm.bias'],
f"model.vision_model.encoder.layers.{v_layer_idx}.input_layernorm.weight": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.input_layernorm.weight'],
f"model.vision_model.encoder.layers.{v_layer_idx}.mlp.fc1.bias": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.mlp.dense_h_to_4h.bias'],
f"model.vision_model.encoder.layers.{v_layer_idx}.mlp.fc1.weight": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.mlp.dense_h_to_4h.weight'],
f"model.vision_model.encoder.layers.{v_layer_idx}.mlp.fc2.bias": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.mlp.dense_4h_to_h.bias'],
f"model.vision_model.encoder.layers.{v_layer_idx}.mlp.fc2.weight": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.mlp.dense_4h_to_h.weight'],
f"model.vision_model.encoder.layers.{v_layer_idx}.post_attention_layernorm.bias": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.post_attention_layernorm.bias'],
f"model.vision_model.encoder.layers.{v_layer_idx}.post_attention_layernorm.weight": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.post_attention_layernorm.weight'],
f"model.vision_model.encoder.layers.{v_layer_idx}.self_attn.dense.bias": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.self_attention.dense.bias'],
f"model.vision_model.encoder.layers.{v_layer_idx}.self_attn.dense.weight": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.self_attention.dense.weight'],
f"model.vision_model.encoder.layers.{v_layer_idx}.self_attn.query_key_value.bias": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.self_attention.query_key_value.bias'],
f"model.vision_model.encoder.layers.{v_layer_idx}.self_attn.query_key_value.weight": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.self_attention.query_key_value.weight'],
})
# Abstractor Part
state_dict.update({
"model.visual_abstractor.query_embeds": loaded_all['vision_abstractor']['learnable_queries'],
"model.visual_abstractor.visual_fc.bias": loaded_all['vision_abstractor']['visual_fc']['bias'],
"model.visual_abstractor.visual_fc.weight": loaded_all['vision_abstractor']['visual_fc']['weight'],
"model.visual_abstractor.vit_eos": loaded_all['vision_abstractor']['vit_eos'],
})
for v_layer_idx in range(6):
state_dict.update({
# f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.k_pos_embed":
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.key.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.k_proj.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.key.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.k_proj.weight"],
# f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.q_pos_embed": "pytorch_model-00004-of-00004.bin",
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.query.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.q_proj.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.query.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.q_proj.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.value.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.v_proj.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.value.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.v_proj.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.norm1.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.norm1.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.norm1.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.norm1.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.normk.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.normk.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.normk.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.normk.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.ffn_ln.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.ffn_ln.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.ffn_ln.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.ffn_ln.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.w1.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.w1.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.w1.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.w1.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.w2.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.w2.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.w2.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.w2.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.w3.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.w3.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.w3.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.w3.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.norm2.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.norm2.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.norm2.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.norm2.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.out_proj.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.o_proj.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.out_proj.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.o_proj.weight"],
})
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
# Write configs
index_dict["metadata"] = {"total_size": param_count * 2}
write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
| # Copyright 2023 DAMO Academy and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
LlamaTokenizerFast = None
"""
Sample usage:
```
python3 /pure-mlo-scratch/sfan/model-parallel-trainer/llama2megatron/convert_llama2hf.py \
--input_dir /pure-mlo-scratch/llama/ --model_size 7 --output_dir /pure-mlo-scratch/llama/converted_HF_7B
```
Thereafter, models can be loaded via:
```py
from transformers import LlamaForCausalLM, LlamaTokenizer
model = LlamaForCausalLM.from_pretrained("/output/path")
tokenizer = LlamaTokenizer.from_pretrained("/output/path")
```
Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
"""
llama_s2layer = {7: 32, 13: 40, 30: 60, 65: 80, 70: 80}
llama_s2heads = {7: 32, 13: 40, 30: 52, 65: 64, 70: 64}
llama_s2dense = {7: 11008, 13: 13824, 30: 17920, 65: 22016,
70: 28672} # should be (2/3)*4*d, but it isn't exaclty that
llama_s2hidden = {7: 4096, 13: 5120, 32: 6656, 65: 8192, 70: 8192}
def compute_intermediate_size(n):
return int(math.ceil(n * 8 / 3) + 255) // 256 * 256
def read_json(path):
with open(path, "r") as f:
return json.load(f)
def write_json(text, path):
with open(path, "w") as f:
json.dump(text, f)
def write_model(model_path,
input_base_path,
model_size,
num_input_shards=1,
num_output_shards=2,
skip_permute=True,
norm_eps=1e-05):
# if os.path.exists(model_path):
# shutil.rmtree(model_path)
os.makedirs(model_path, exist_ok=True)
# tmp_model_path = os.path.join(model_path, "tmp")
tmp_model_path = model_path
os.makedirs(tmp_model_path, exist_ok=True)
num_shards = num_input_shards
n_layers = llama_s2layer[model_size]
n_heads = llama_s2heads[model_size]
n_heads_per_shard = n_heads // num_shards
n_dense = llama_s2dense[model_size]
n_hidden = llama_s2hidden[model_size]
hidden_per_head = n_hidden // n_heads
base = 10000.0
inv_freq = 1.0 / (base ** (torch.arange(0, hidden_per_head, 2).float() / hidden_per_head))
# permute for sliced rotary
def permute(w, skip_permute=skip_permute):
if skip_permute:
return w
return w.view(n_heads, n_hidden // n_heads // 2, 2, n_hidden).transpose(1, 2).reshape(n_hidden, n_hidden)
print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
# Load weights
if num_shards==1:
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
# /pure-mlo-scratch/alhernan/megatron-data/checkpoints/llama2-7b-tp4-pp1-optim/release/mp_rank_00/model_optim_rng.pt
if os.path.exists(os.path.join(input_base_path, 'release')):
filename = os.path.join(input_base_path, 'release', 'mp_rank_00', 'model_optim_rng.pt')
elif input_base_path.split('/')[-1].startswith('iter_'):
iteration = eval(input_base_path.split('/')[-1].replace('iter_', '').lstrip('0'))
load_dir = '/'.join(input_base_path.split('/')[:-1])
filename = os.path.join(input_base_path, 'mp_rank_00', 'model_optim_rng.pt')
if not os.path.exists(filename):
filename = filename.replace('model_optim_rng.pt', 'model_rng.pt')
else:
tracker_filename = os.path.join(input_base_path, 'latest_checkpointed_iteration.txt')
with open(tracker_filename, 'r') as f:
metastring = f.read().strip()
iteration = 'iter_{:07d}'.format(int(metastring))
filename = os.path.join(input_base_path, iteration, 'mp_rank_00', 'model_optim_rng.pt')
if not os.path.exists(filename):
filename = filename.replace('model_optim_rng.pt', 'model_rng.pt')
original_filename = filename
loaded = torch.load(filename, map_location="cpu")['model']['language_model']
else:
# Sharded
filenames = []
for i in range(num_shards):
if os.path.exists(os.path.join(input_base_path, 'release')):
filename = os.path.join(input_base_path, 'release', f'mp_rank_{i:02d}', 'model_optim_rng.pt')
else:
tracker_filename = os.path.join(input_base_path, 'latest_checkpointed_iteration.txt')
with open(tracker_filename, 'r') as f:
metastring = f.read().strip()
iteration = 'iter_{:07d}'.format(int(metastring))
filename = os.path.join(input_base_path, iteration, f'mp_rank_{i:02d}', 'model_optim_rng.pt')
if not os.path.exists(filename):
filename = filename.replace('model_optim_rng.pt', 'model_rng.pt')
filenames.append(filename)
loaded = [
torch.load(filenames[i], map_location="cpu")['model']['language_model']
for i in range(num_shards)
]
print('Llama-Megatron Loaded!')
param_count = 0
index_dict = {"weight_map": {}}
print(f'Weighted Converting for {n_layers} layers...')
for layer_i in range(n_layers):
print(layer_i)
filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if num_shards == 1:
# Unsharded
state_dict = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": loaded['encoder'][f"layers.{layer_i}.self_attention.q_proj.weight"],
f"model.layers.{layer_i}.self_attn.k_proj.multiway.0.weight": loaded['encoder'][f"layers.{layer_i}.self_attention.k_proj.multiway.0.weight"],
f"model.layers.{layer_i}.self_attn.v_proj.multiway.0.weight": loaded['encoder'][f"layers.{layer_i}.self_attention.v_proj.multiway.0.weight"],
f"model.layers.{layer_i}.self_attn.k_proj.multiway.1.weight": loaded['encoder'][f"layers.{layer_i}.self_attention.k_proj.multiway.1.weight"],
f"model.layers.{layer_i}.self_attn.v_proj.multiway.1.weight": loaded['encoder'][f"layers.{layer_i}.self_attention.v_proj.multiway.1.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded['encoder'][f"layers.{layer_i}.self_attention.o_proj.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded['encoder'][f"layers.{layer_i}.mlp.gate_proj.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded['encoder'][f"layers.{layer_i}.mlp.down_proj.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded['encoder'][f"layers.{layer_i}.mlp.up_proj.weight"],
f"model.layers.{layer_i}.input_layernorm.multiway.0.weight": loaded['encoder'][f"layers.{layer_i}.input_layernorm.multiway.0.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.multiway.0.weight": loaded['encoder'][f"layers.{layer_i}.post_attention_layernorm.multiway.0.weight"],
f"model.layers.{layer_i}.input_layernorm.multiway.1.weight": loaded['encoder'][f"layers.{layer_i}.input_layernorm.multiway.1.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.multiway.1.weight": loaded['encoder'][f"layers.{layer_i}.post_attention_layernorm.multiway.1.weight"],
}
else:
raise NotImplemented
# else:
# # Sharded
# # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
# state_dict = {
# f"model.layers.{layer_i}.input_layernorm.weight": loaded[0]['encoder'][
# f"layers.{layer_i}.input_layernorm.multiway.0.weight"
# ].clone(),
# f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0]['encoder'][
# f"layers.{layer_i}.post_attention_layernorm.multiway.0.weight"
# ].clone(),
# }
# wqs, wks, wvs, ffn_w1s, ffn_w3s = [], [], [], [], []
# for shard_idx in range(num_shards):
# wqs.append(loaded[shard_idx]['encoder'][f"layers.{layer_i}.self_attention.q_proj.weight"])
# wks.append(loaded[shard_idx]['encoder'][f"layers.{layer_i}.self_attention.k_proj.multiway.0.weight"])
# wvs.append(loaded[shard_idx]['encoder'][f"layers.{layer_i}.self_attention.v_proj.multiway.0.weight"])
# state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute(
# torch.cat(
# [
# wq.view(n_heads_per_shard, hidden_per_head, n_hidden)
# for wq in range(wqs)
# ],
# dim=0,
# ).reshape(n_hidden, n_hidden)
# )
# state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute(
# torch.cat(
# [
# wk.view(n_heads_per_shard, hidden_per_head, n_hidden)
# for wk in range(wks)
# ],
# dim=0,
# ).reshape(n_hidden, n_hidden)
# )
# state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat(
# [
# wv.view(n_heads_per_shard, hidden_per_head, n_hidden)
# for wv in range(wvs)
# ],
# dim=0,
# ).reshape(n_hidden, n_hidden)
# state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat(
# [loaded[i]['encoder'][f"layers.{layer_i}.self_attention.o_proj.weight"] for i in range(num_shards)], dim=1
# )
# state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat(
# [loaded[i]['encoder'][f"layers.{layer_i}.mlp.gate_proj.weight"] for i in range(num_shards)], dim=0
# )
# state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
# [loaded[i]['encoder'][f"layers.{layer_i}.mlp.down_proj.weight"] for i in range(num_shards)], dim=1
# )
# state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat(
# [loaded[i]['encoder'][f"layers.{layer_i}.mlp.up_proj.weight"] for i in range(num_shards)], dim=0
# )
state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
print(f'Sharded file saved to {filename}')
filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if num_shards==1:
# Unsharded
state_dict = {
"model.embed_tokens.weight": loaded['embedding']['word_embeddings']['weight'],
"model.norm.weight": loaded['encoder']['norm.weight'],
"lm_head.weight": loaded['encoder']['lm_head.weight'],
}
else:
state_dict = {
"model.embed_tokens.weight": loaded[0]['embedding']['word_embeddings']['weight'],
"model.norm.weight": loaded[0]['encoder']['norm.weight'],
"lm_head.weight": loaded[0]['encoder']['lm_head.weight'],
}
loaded_all = torch.load(original_filename, map_location="cpu")['model']
# Vision Part
state_dict.update({
"model.vision_model.embeddings.cls_token": loaded_all['vision_model']['cls_token'],
"model.vision_model.embeddings.patch_embed.weight": loaded_all['vision_model']['patch_embed']['weight'],
"model.vision_model.embeddings.position_embedding": loaded_all['vision_model']['position_embeddings'],
"model.vision_model.embeddings.pre_layernorm.bias": loaded_all['vision_model']['pre_layernorm']['bias'],
"model.vision_model.embeddings.pre_layernorm.weight": loaded_all['vision_model']['pre_layernorm']['weight'],
"model.vision_model.post_layernorm.bias": loaded_all['vision_model']['transformer']['final_layernorm.bias'],
"model.vision_model.post_layernorm.weight": loaded_all['vision_model']['transformer']['final_layernorm.weight'],
})
for v_layer_idx in range(24):
state_dict.update({
f"model.vision_model.encoder.layers.{v_layer_idx}.input_layernorm.bias": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.input_layernorm.bias'],
f"model.vision_model.encoder.layers.{v_layer_idx}.input_layernorm.weight": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.input_layernorm.weight'],
f"model.vision_model.encoder.layers.{v_layer_idx}.mlp.fc1.bias": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.mlp.dense_h_to_4h.bias'],
f"model.vision_model.encoder.layers.{v_layer_idx}.mlp.fc1.weight": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.mlp.dense_h_to_4h.weight'],
f"model.vision_model.encoder.layers.{v_layer_idx}.mlp.fc2.bias": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.mlp.dense_4h_to_h.bias'],
f"model.vision_model.encoder.layers.{v_layer_idx}.mlp.fc2.weight": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.mlp.dense_4h_to_h.weight'],
f"model.vision_model.encoder.layers.{v_layer_idx}.post_attention_layernorm.bias": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.post_attention_layernorm.bias'],
f"model.vision_model.encoder.layers.{v_layer_idx}.post_attention_layernorm.weight": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.post_attention_layernorm.weight'],
f"model.vision_model.encoder.layers.{v_layer_idx}.self_attn.dense.bias": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.self_attention.dense.bias'],
f"model.vision_model.encoder.layers.{v_layer_idx}.self_attn.dense.weight": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.self_attention.dense.weight'],
f"model.vision_model.encoder.layers.{v_layer_idx}.self_attn.query_key_value.bias": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.self_attention.query_key_value.bias'],
f"model.vision_model.encoder.layers.{v_layer_idx}.self_attn.query_key_value.weight": loaded_all['vision_model']['transformer'][f'layers.{v_layer_idx}.self_attention.query_key_value.weight'],
})
# Abstractor Part
state_dict.update({
"model.visual_abstractor.query_embeds": loaded_all['vision_abstractor']['learnable_queries'],
"model.visual_abstractor.visual_fc.bias": loaded_all['vision_abstractor']['visual_fc']['bias'],
"model.visual_abstractor.visual_fc.weight": loaded_all['vision_abstractor']['visual_fc']['weight'],
"model.visual_abstractor.vit_eos": loaded_all['vision_abstractor']['vit_eos'],
})
for v_layer_idx in range(6):
state_dict.update({
# f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.k_pos_embed":
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.key.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.k_proj.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.key.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.k_proj.weight"],
# f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.q_pos_embed": "pytorch_model-00004-of-00004.bin",
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.query.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.q_proj.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.query.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.q_proj.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.value.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.v_proj.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.attention.value.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.v_proj.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.norm1.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.norm1.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.norm1.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.norm1.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.normk.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.normk.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.normk.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.normk.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.ffn_ln.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.ffn_ln.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.ffn_ln.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.ffn_ln.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.w1.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.w1.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.w1.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.w1.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.w2.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.w2.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.w2.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.w2.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.w3.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.w3.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.mlp.w3.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.mlp.w3.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.norm2.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.norm2.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.norm2.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.norm2.weight"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.out_proj.bias": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.o_proj.bias"],
f"model.visual_abstractor.encoder.layers.{v_layer_idx}.crossattention.output.out_proj.weight": loaded_all['vision_abstractor']['transformer'][f"layers.{v_layer_idx}.self_attention.o_proj.weight"],
})
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
# Write configs
index_dict["metadata"] = {"total_size": param_count * 2}
write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
| config = MPLUGOwl2Config() | 0 | 2023-12-14 03:36:30+00:00 | 12k |
nox-410/tvm.tl | python/tvm/target/detect_target.py | [
{
"identifier": "Target",
"path": "python/tvm/target/target.py",
"snippet": "class Target(Object):\n \"\"\"Target device information, use through TVM API.\n\n Note\n ----\n You can create target using the constructor or the following functions\n\n - :py:func:`tvm.target.arm_cpu` create arm_cpu target\n - :py:func:`tvm.target.cuda` create CUDA target\n - :py:func:`tvm.target.rocm` create ROCM target\n - :py:func:`tvm.target.mali` create Mali target\n - :py:func:`tvm.target.intel_graphics` create Intel Graphics target\n \"\"\"\n\n def __init__(self, target, host=None):\n \"\"\"Construct a TVM target object from\n 1) Raw target string\n 2) Target config dict\n 3) Target tag\n\n Parameters\n ----------\n target : Union[str, Dict[str, Any]]\n Can be one of a literal target string, a json string describing\n a configuration, or a dictionary of configuration options.\n When using a dictionary or json string to configure target, the\n possible values are:\n\n kind : str (required)\n Which codegen path to use, for example 'llvm' or 'cuda'.\n keys : List of str (optional)\n A set of strategies that can be dispatched to. When using\n \"kind=opencl\" for example, one could set keys to [\"mali\", \"opencl\", \"gpu\"].\n device : str (optional)\n A single key that corresponds to the actual device being run on.\n This will be effectively appended to the keys.\n libs : List of str (optional)\n The set of external libraries to use. For example ['cblas', 'mkl'].\n system-lib : bool (optional)\n If True, build a module that contains self registered functions.\n Useful for environments where dynamic loading like dlopen is banned.\n mcpu : str (optional)\n The specific cpu being run on. Serves only as an annotation.\n model : str (optional)\n An annotation indicating what model a workload came from.\n runtime : str (optional)\n An annotation indicating which runtime to use with a workload.\n mtriple : str (optional)\n The llvm triplet describing the target, for example \"arm64-linux-android\".\n mattr : List of str (optional)\n The llvm features to compile with, for example [\"+avx512f\", \"+mmx\"].\n mfloat-abi : str (optional)\n An llvm setting that is one of 'hard' or 'soft' indicating whether to use\n hardware or software floating-point operations.\n mabi : str (optional)\n An llvm setting. Generate code for the specified ABI, for example \"lp64d\".\n host : Union[str, Dict[str, Any]] (optional)\n Description for target host. Can be recursive. Similar to target.\n host : Optional[Union[str, Dict[str, Any]]]\n Similar to target but for target host. Can be one of a literal target host string,\n a json string describing a configuration, or a dictionary of configuration options.\n When using a dictionary or json string to configure target, the possible values are\n same as target.\n \"\"\"\n if isinstance(target, str) and \"-libs=mkldnn\" in target:\n target = target.replace(\"mkldnn\", \"dnnl\")\n warnings.warn(\n \"Legacy support of mkldnn is going to be deprecated. \"\n \"Please use -libs=dnnl instead.\",\n )\n if isinstance(target, (dict, str)):\n target = convert(target)\n if isinstance(host, (dict, str)):\n host = convert(host)\n if target is None or not isinstance(target, (Map, String, Target)):\n raise ValueError(\"target has to be a string or dictionary.\")\n if host is not None:\n if not isinstance(host, (Map, String, Target)):\n raise ValueError(\"target host has to be a string or dictionary.\")\n self.__init_handle_by_constructor__(_ffi_api.Target, Target(target), Target(host))\n else:\n self.__init_handle_by_constructor__(_ffi_api.Target, target)\n\n def __enter__(self):\n _ffi_api.TargetEnterScope(self)\n return self\n\n def __exit__(self, ptype, value, trace):\n _ffi_api.TargetExitScope(self)\n\n def export(self):\n return _ffi_api.TargetExport(self)\n\n def with_host(self, host=None):\n return _ffi_api.WithHost(self, Target(host))\n\n @staticmethod\n def from_device(device: Union[str, Device]) -> \"Target\":\n \"\"\"Detects Target associated with the given device. If the device does not exist,\n there will be an Error.\n\n Parameters\n ----------\n dev : Union[str, Device]\n The device to detect the target for.\n Supported device types: [\"cuda\", \"metal\", \"rocm\", \"vulkan\", \"opencl\", \"cpu\"]\n\n Returns\n -------\n target : Target\n The detected target.\n \"\"\"\n from .detect_target import ( # pylint: disable=import-outside-toplevel\n detect_target_from_device,\n )\n\n return detect_target_from_device(device)\n\n @staticmethod\n def current(allow_none=True):\n \"\"\"Returns the current target.\n\n Parameters\n ----------\n allow_none : bool\n Whether allow the current target to be none\n\n Raises\n ------\n ValueError if current target is not set.\n \"\"\"\n return _ffi_api.TargetCurrent(allow_none)\n\n @property\n def arch(self):\n \"\"\"Returns the cuda arch from the target if it exists.\"\"\"\n return str(self.attrs.get(\"arch\", \"\"))\n\n @property\n def max_num_threads(self):\n \"\"\"Returns the max_num_threads from the target if it exists.\"\"\"\n return int(self.attrs[\"max_num_threads\"])\n\n @property\n def max_block_size_x(self):\n \"\"\"Returns the max block size in x-dimension from the target if it exists.\"\"\"\n return int(self.attrs[\"max_block_size_x\"])\n\n @property\n def max_block_size_y(self):\n \"\"\"Returns the max block size in y-dimension from the target if it exists.\"\"\"\n return int(self.attrs[\"max_block_size_y\"])\n\n @property\n def thread_warp_size(self):\n \"\"\"Returns the thread_warp_size from the target if it exists.\"\"\"\n return int(self.attrs[\"thread_warp_size\"])\n\n @property\n def max_shared_memory_per_block(self):\n return int(self.attrs[\"max_shared_memory_per_block\"])\n\n @property\n def max_function_args(self):\n return int(self.attrs.get(\"max_function_args\", 0))\n\n @property\n def vtcm_capacity(self):\n return int(self.attrs.get(\"vtcm-capacity\", 0))\n\n @property\n def device_name(self):\n return str(self.attrs.get(\"device\", \"\"))\n\n @property\n def model(self):\n \"\"\"Returns model from the target if it exists.\"\"\"\n return str(self.attrs.get(\"model\", \"unknown\"))\n\n @property\n def mcpu(self):\n \"\"\"Returns the mcpu from the target if it exists.\"\"\"\n return str(self.attrs.get(\"mcpu\", \"\"))\n\n @property\n def mattr(self):\n \"\"\"Returns the mattr from the target if it exists.\"\"\"\n return list(self.attrs.get(\"mattr\", []))\n\n @property\n def supports_integer_dot_product(self):\n if self.attrs.get(\"supports_integer_dot_product\", []):\n return bool(self.attrs[\"supports_integer_dot_product\"])\n if self.kind.name == \"cuda\":\n sm_version = int(self.arch.split(\"_\")[1])\n if sm_version >= 61:\n return True\n return False\n\n @property\n def libs(self):\n return list(self.attrs.get(\"libs\", []))\n\n @property\n def supports_cooperative_matrix(self):\n if self.attrs.get(\"supports_cooperative_matrix\", []):\n return bool(self.attrs[\"supports_cooperative_matrix\"])\n else:\n return False\n\n @property\n def features(self):\n return TargetFeatures(self)\n\n @property\n def l2_cache_size_bytes(self):\n return int(self.attrs.get(\"l2_cache_size_bytes\", 0))\n\n def get_kind_attr(self, attr_name):\n \"\"\"Get additional attribute about the target kind.\n\n Parameters\n ----------\n attr_name : str\n The attribute name.\n\n Returns\n -------\n value : object\n The attribute value\n \"\"\"\n return _ffi_api.TargetKindGetAttr(self.kind, attr_name)\n\n def get_target_device_type(self):\n \"\"\"Returns the device_type for this target.\"\"\"\n return _ffi_api.TargetGetDeviceType(self)\n\n @staticmethod\n def list_kinds():\n \"\"\"Returns the list of available target names.\"\"\"\n return list(_ffi_api.ListTargetKinds())\n\n @staticmethod\n def canon_target(target):\n \"\"\"Given a single target-like object, returns the TVM Target object representing it.\n Can convert from:\n - None (to None).\n - An existing TVM Target object.\n - A string, eg \"cuda\" or \"cuda -arch=sm_80\"\n - A Python dictionary, eg {\"kind\": \"cuda\", \"arch\": \"sm_80\" }\n \"\"\"\n if target is None:\n return None\n if isinstance(target, Target):\n return target\n return Target(target)\n\n @staticmethod\n def canon_target_and_host(target, target_host=None):\n \"\"\"Returns a TVM Target capturing target and target_host. Also returns the host in\n canonical form. The given target can be in any form recognized by\n Target.canon_target. If given, target_host can be in any form recognized by\n Target.canon_target. If target_host is given it will be set as the 'host' in the\n result Target object (and a warning given).\n\n Note that this method does not support heterogeneous compilation targets.\n \"\"\"\n target = Target.canon_target(target)\n if target is None:\n assert target_host is None, \"Target host is not empty when target is empty.\"\n return target, target_host\n if target.host is None and target_host is not None:\n warnings.warn(\n \"target_host parameter is going to be deprecated. \"\n \"Please pass in tvm.target.Target(target, host=target_host) instead.\"\n )\n target_host = Target.canon_target(target_host)\n target = target.with_host(target_host)\n if target is not None:\n # In case the target already had a host, extract it here.\n target_host = target.host\n return target, target_host\n\n @staticmethod\n def canon_multi_target(multi_targets):\n \"\"\"Given a single target-like object, or a collection-like object of target-like objects,\n returns a TVM Array of TVM Target objects representing then. Can convert from:\n - None (to None).\n - A single target-like object in a form recognized by canon_target.\n - A Python list or TVM Array of target-like objects in a form recognized by\n canon_target.\n - A Python dict or TVM Map from TVM IntImm objects representing device types to\n a target-like object in a form recognized by canon_target. (This is a legacy\n method to represent heterogeneous targets. The keys are ignored.)\n \"\"\"\n if multi_targets is None:\n return None\n if isinstance(multi_targets, (dict, Map)) and \"kind\" not in multi_targets:\n # Convert legacy heterogeneous map representation to ordinary list of targets.\n return Target.canon_multi_target(list(multi_targets.values()))\n if isinstance(multi_targets, (list, Array)):\n # Multiple Target results.\n return convert([Target.canon_target(tgt) for tgt in multi_targets])\n # Single Target result.\n return convert([Target.canon_target(multi_targets)])\n\n @staticmethod\n def canon_multi_target_and_host(target, target_host=None):\n \"\"\"Returns a TVM Array<Target> capturing target and target_host. The given target can be in\n any form recognized by Target.canon_multi_target. If given, target_host can be in\n any form recognized by Target.canon_target. If target_host is given it will be set\n as the 'host' in each result Target object (and a warning given).\n \"\"\"\n # Convert target to Array<Target>, but not yet accounting for any host.\n raw_targets = Target.canon_multi_target(target)\n assert raw_targets is not None and len(raw_targets) > 0\n # Convert host to Target, if given.\n if raw_targets[0].host is None and target_host is not None:\n warnings.warn(\n \"target_host parameter is going to be deprecated. \"\n \"Please pass in tvm.target.Target(target, host=target_host) instead.\"\n )\n # Make sure the (canonical) host is captured in all the (canonical) targets.\n target_host = Target.canon_target(target_host)\n raw_targets = convert([tgt.with_host(target_host) for tgt in raw_targets])\n return raw_targets\n\n @staticmethod\n def canon_target_map_and_host(target_map, target_host=None):\n \"\"\"Returns target_map as a map from TVM Target's in canonical form to IRModules. The keys\n of the input target_map can be in any form recognized by Target.canon_target.\n Similarly, if given, target_host can be in any form recognized by\n Target.canon_target. The final target_map keys will capture the target_host in\n canonical form. Also returns the target_host in canonical form.\"\"\"\n new_target_map = {}\n canonical_target_host = None\n for tgt, mod in target_map.items():\n tgt = Target.canon_target(tgt)\n assert tgt is not None\n if canonical_target_host is None:\n if tgt.host is not None:\n canonical_target_host = tgt.host\n elif target_host is not None:\n # No deprecation warning in this case since host may have been manufactured\n # behind the scenes in build_module.py build.\n canonical_target_host = Target.canon_target(target_host)\n if tgt.host is None and canonical_target_host is not None:\n tgt = tgt.with_host(canonical_target_host)\n new_target_map[tgt] = mod\n return new_target_map, canonical_target_host\n\n @staticmethod\n def target_or_current(target):\n \"\"\"Returns target, or the current target in the environment if target is None\"\"\"\n if target is None:\n target = Target.current()\n if target is None:\n raise ValueError(\"Target is not set in env or passed as argument.\")\n return target"
},
{
"identifier": "get_global_func",
"path": "python/tvm/_ffi/registry.py",
"snippet": "def get_global_func(name, allow_missing=False):\n \"\"\"Get a global function by name\n\n Parameters\n ----------\n name : str\n The name of the global function\n\n allow_missing : bool\n Whether allow missing function or raise an error.\n\n Returns\n -------\n func : PackedFunc\n The function to be returned, None if function is missing.\n \"\"\"\n return _get_global_func(name, allow_missing)"
},
{
"identifier": "Device",
"path": "python/tvm/_ffi/runtime_ctypes.py",
"snippet": "class Device(ctypes.Structure):\n \"\"\"TVM device strucure.\n\n Typically constructed using convenience function\n :meth:`tvm.runtime.device`.\n\n Exposes uniform interface to device-specific APIs such as CUDA or\n OpenCL. Some properties may return None depending on whether an\n API exposes that particular property.\n\n NOTE! The integer values in MASK2STR and STR2MASK *must* correspond\n to the values provided by the DLDeviceType and TVMDeviceExtType enums.\n \"\"\"\n\n kDLCPU = 1\n kDLCUDA = 2\n kDLCUDAHost = 3\n kDLOpenCL = 4\n kDLVulkan = 7\n kDLMetal = 8\n kDLVPI = 9\n kDLROCM = 10\n kDLROCMHost = 11\n kDLExtDev = 12\n kDLCUDAManaged = 13\n kDLOneAPI = 14\n kDLWebGPU = 15\n kDLHexagon = 16\n kDLAOCL = 32\n kDLSDAccel = 33\n kOpenGL = 34\n kDLMicroDev = 35\n\n _fields_ = [(\"device_type\", ctypes.c_int), (\"device_id\", ctypes.c_int)]\n MASK2STR = {\n kDLCPU: \"cpu\",\n kDLCUDA: \"cuda\",\n kDLCUDAHost: \"cuda_host\",\n kDLCUDAManaged: \"cuda_managed\",\n kDLOpenCL: \"opencl\",\n kDLVulkan: \"vulkan\",\n kDLMetal: \"metal\",\n kDLVPI: \"vpi\",\n kDLROCM: \"rocm\",\n kDLROCMHost: \"rocm_host\",\n kDLExtDev: \"ext_dev\",\n kDLOneAPI: \"oneapi\",\n kDLWebGPU: \"webgpu\",\n kDLHexagon: \"hexagon\",\n kDLAOCL: \"aocl\",\n kDLSDAccel: \"sdaccel\",\n kOpenGL: \"opengl\",\n kDLMicroDev: \"microdev\",\n }\n\n STR2MASK = {\n \"llvm\": kDLCPU,\n \"stackvm\": kDLCPU,\n \"cpu\": kDLCPU,\n \"c\": kDLCPU,\n \"test\": kDLCPU,\n \"hybrid\": kDLCPU,\n \"composite\": kDLCPU,\n \"cuda\": kDLCUDA,\n \"nvptx\": kDLCUDA,\n \"cl\": kDLOpenCL,\n \"opencl\": kDLOpenCL,\n \"sdaccel\": kDLOpenCL,\n \"aocl\": kDLAOCL,\n \"aocl_sw_emu\": kDLAOCL,\n \"vulkan\": kDLVulkan,\n \"metal\": kDLMetal,\n \"vpi\": kDLVPI,\n \"rocm\": kDLROCM,\n \"ext_dev\": kDLExtDev,\n \"hexagon\": kDLHexagon,\n \"webgpu\": kDLWebGPU,\n }\n\n def __init__(self, device_type, device_id):\n super(Device, self).__init__()\n self.device_type = int(device_type)\n self.device_id = device_id\n\n def _GetDeviceAttr(self, device_type, device_id, attr_id):\n \"\"\"Internal helper function to invoke runtime.GetDeviceAttr\"\"\"\n # pylint: disable=import-outside-toplevel\n import tvm.runtime._ffi_api\n\n return tvm.runtime._ffi_api.GetDeviceAttr(device_type, device_id, attr_id)\n\n @property\n def exist(self):\n \"\"\"Whether this device exists.\n\n Returns True if TVM has support for the device, if the\n physical device is present, and the device is accessible\n through appropriate drivers (e.g. cuda/vulkan).\n\n Returns\n -------\n exist : bool\n True if the device exists\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 0) != 0\n\n @property\n def max_threads_per_block(self):\n \"\"\"Maximum number of threads on each block.\n\n Returns device value for cuda, metal, rocm, opencl, and vulkan\n devices. Returns remote device value for RPC devices.\n Returns None for all other devices.\n\n Returns\n -------\n max_threads_per_block : int or None\n The number of threads on each block\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 1)\n\n @property\n def warp_size(self):\n \"\"\"Number of threads that execute concurrently.\n\n Returns device value for cuda, rocm, and vulkan. Returns\n 1 for metal and opencl devices, regardless of the physical\n device. Returns remote device value for RPC devices. Returns\n None for all other devices.\n\n Returns\n -------\n warp_size : int or None\n Number of threads that execute concurrently\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 2)\n\n @property\n def max_shared_memory_per_block(self):\n \"\"\"Total amount of shared memory per block in bytes.\n\n Returns device value for cuda, rocm, opencl, and vulkan.\n Returns remote device value for RPC devices. Returns None for\n all other devices.\n\n Returns\n -------\n max_shared_memory_per_block : int or None\n Total amount of shared memory per block in bytes\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 3)\n\n @property\n def compute_version(self):\n \"\"\"Get compute version number as string.\n\n Returns maximum API version (e.g. CUDA/OpenCL/Vulkan)\n supported by the device.\n\n Returns device value for cuda, rocm, opencl, and\n vulkan. Returns remote device value for RPC devices. Returns\n None for all other devices.\n\n Returns\n -------\n version : str or None\n The version string in `major.minor` format.\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 4)\n\n @property\n def device_name(self):\n \"\"\"Return the vendor-specific name of device.\n\n Returns device value for cuda, rocm, opencl, and vulkan.\n Returns remote device value for RPC devices. Returns None for\n all other devices.\n\n Returns\n -------\n device_name : str or None\n The name of the device.\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 5)\n\n @property\n def max_clock_rate(self):\n \"\"\"Return the max clock frequency of device (kHz).\n\n Returns device value for cuda, rocm, and opencl. Returns\n remote device value for RPC devices. Returns None for all\n other devices.\n\n Returns\n -------\n max_clock_rate : int or None\n The maximum clock frequency of the device (kHz)\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 6)\n\n @property\n def multi_processor_count(self):\n \"\"\"Return the number of compute units in the device.\n\n Returns device value for cuda, rocm, and opencl. Returns\n remote device value for RPC devices. Returns None for all\n other devices.\n\n Returns\n -------\n multi_processor_count : int or None\n Thee number of compute units in the device\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 7)\n\n @property\n def max_thread_dimensions(self):\n \"\"\"Return the maximum size of each thread axis\n\n Returns device value for cuda, rocm, opencl, and vulkan.\n Returns remote device value for RPC devices. Returns None for\n all other devices.\n\n Returns\n -------\n dims: List of int, or None\n The maximum length of threadIdx.x, threadIdx.y, threadIdx.z\n\n \"\"\"\n return json.loads(self._GetDeviceAttr(self.device_type, self.device_id, 8))\n\n @property\n def api_version(self):\n \"\"\"Returns version number of the SDK used to compile TVM.\n\n For example, CUDA_VERSION for cuda or VK_HEADER_VERSION for\n Vulkan.\n\n Returns device value for cuda, rocm, opencl, and vulkan.\n Returns remote device value for RPC devices. Returns None for\n all other devices.\n\n Returns\n -------\n version : int or None\n The version of the SDK\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 11)\n\n @property\n def driver_version(self):\n \"\"\"Returns version number of the driver\n\n Returns driver vendor's internal version number.\n (e.g. \"450.408.256\" for nvidia-driver-450)\n\n Returns device value for opencl and vulkan. Returns remote\n device value for RPC devices. Returns None for all other\n devices.\n\n Returns\n -------\n version : str or None\n The version string in `major.minor.patch` format.\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 12)\n\n @property\n def l2_cache_size_bytes(self):\n \"\"\"Return the size of the device L2 cache in bytes\n\n Supported devices include CUDA/ROCM/OpenCL.\n\n Returns\n -------\n l2_cache_size_bytes : int or None\n The size of the device L2 cache in bytes returned by device runtime API.\n Return None if the device does not support this feature.\n\n Note\n ----\n The value returned by opencl's API is smaller than actual device L2 cache size.\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 13)\n\n def texture_spatial_limit(self):\n \"\"\"Returns limits for textures by spatial dimensions\n\n Returns\n -------\n limit : int or None\n Maximum size of the texture by spatial dimensions\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 12)\n\n def create_raw_stream(self):\n \"\"\"Create a new runtime stream at the context.\n\n User should free the stream after use.\n\n Returns\n -------\n stream : TVMStreamHandle\n The created runtime stream.\n \"\"\"\n stream = ctypes.c_void_p()\n check_call(_LIB.TVMStreamCreate(self.device_type, self.device_id, ctypes.byref(stream)))\n return stream\n\n def free_raw_stream(self, stream):\n \"\"\"Free a created stream handle.\n\n Parameters\n ----------\n stream : TVMStreamHandle\n The stream which should to be released.\n \"\"\"\n check_call(_LIB.TVMStreamFree(self.device_type, self.device_id, stream))\n\n def set_raw_stream(self, stream):\n \"\"\"Set a created stream handle.\n\n Parameters\n ----------\n stream : TVMStreamHandle\n The stream which should to be set to the device.\n \"\"\"\n check_call(_LIB.TVMSetStream(self.device_type, self.device_id, stream))\n\n def sync(self, stream=None):\n \"\"\"Synchronize until jobs finished at the context.\n\n Parameters\n ----------\n stream : TVMStreamHandle\n Jobs in this stream should be finished.\n \"\"\"\n check_call(_LIB.TVMSynchronize(self.device_type, self.device_id, stream))\n\n def __eq__(self, other):\n return (\n isinstance(other, Device)\n and self.device_id == other.device_id\n and self.device_type == other.device_type\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(str(self))\n\n def __repr__(self):\n if self.device_type >= RPC_SESS_MASK:\n tbl_id = self.device_type / RPC_SESS_MASK - 1\n dev_type = self.device_type % RPC_SESS_MASK\n return \"remote[%d]:%s(%d)\" % (tbl_id, Device.MASK2STR[dev_type], self.device_id)\n return \"%s(%d)\" % (Device.MASK2STR[self.device_type], self.device_id)"
},
{
"identifier": "device",
"path": "python/tvm/runtime/ndarray.py",
"snippet": "@property\ndef device(self):\n \"\"\"Device of this array\"\"\"\n return self.handle.contents.device"
}
] | from typing import Union
from . import Target
from .._ffi import get_global_func
from .._ffi.runtime_ctypes import Device
from ..runtime.ndarray import device | 7,225 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Detect target."""
def _detect_metal(dev: Device) -> Target:
return Target(
{
"kind": "metal",
"max_shared_memory_per_block": 32768,
"max_threads_per_block": dev.max_threads_per_block,
"thread_warp_size": dev.warp_size,
}
)
def _detect_cuda(dev: Device) -> Target:
return Target(
{
"kind": "cuda",
"max_shared_memory_per_block": dev.max_shared_memory_per_block,
"max_threads_per_block": dev.max_threads_per_block,
"thread_warp_size": dev.warp_size,
"arch": "sm_" + dev.compute_version.replace(".", ""),
}
)
def _detect_rocm(dev: Device) -> Target:
return Target(
{
"kind": "rocm",
"mtriple": "amdgcn-and-amdhsa-hcc",
"max_shared_memory_per_block": dev.max_shared_memory_per_block,
"max_threads_per_block": dev.max_threads_per_block,
"thread_warp_size": dev.warp_size,
}
)
def _detect_vulkan(dev: Device) -> Target:
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Detect target."""
def _detect_metal(dev: Device) -> Target:
return Target(
{
"kind": "metal",
"max_shared_memory_per_block": 32768,
"max_threads_per_block": dev.max_threads_per_block,
"thread_warp_size": dev.warp_size,
}
)
def _detect_cuda(dev: Device) -> Target:
return Target(
{
"kind": "cuda",
"max_shared_memory_per_block": dev.max_shared_memory_per_block,
"max_threads_per_block": dev.max_threads_per_block,
"thread_warp_size": dev.warp_size,
"arch": "sm_" + dev.compute_version.replace(".", ""),
}
)
def _detect_rocm(dev: Device) -> Target:
return Target(
{
"kind": "rocm",
"mtriple": "amdgcn-and-amdhsa-hcc",
"max_shared_memory_per_block": dev.max_shared_memory_per_block,
"max_threads_per_block": dev.max_threads_per_block,
"thread_warp_size": dev.warp_size,
}
)
def _detect_vulkan(dev: Device) -> Target: | f_get_target_property = get_global_func("device_api.vulkan.get_target_property") | 1 | 2023-12-14 02:37:47+00:00 | 12k |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.